diff --git a/.github/workflows/prerelease.yml b/.github/workflows/prerelease.yml index 60ce608b21..258458e2d4 100644 --- a/.github/workflows/prerelease.yml +++ b/.github/workflows/prerelease.yml @@ -33,7 +33,7 @@ jobs: id: version if: steps.version_type.outputs.type != 'skip' run: | - RESULT=$(python ./tools/ci_tools.py --nightly) + RESULT=$(python ./tools/ci_tools.py --nightly --github_token ${{ secrets.GITHUB_TOKEN }}) echo ::set-output name=next_tag::$RESULT diff --git a/CHANGELOG.md b/CHANGELOG.md index add7f53ae9..e9405ff759 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,18 +1,114 @@ # Changelog -## [3.6.0-nightly.5](https://github.com/pypeclub/OpenPype/tree/HEAD) +## [3.7.0-nightly.3](https://github.com/pypeclub/OpenPype/tree/HEAD) -[Full Changelog](https://github.com/pypeclub/OpenPype/compare/3.5.0...HEAD) +[Full Changelog](https://github.com/pypeclub/OpenPype/compare/3.6.4...HEAD) + +### 📖 Documentation + +- docs\[website\]: Add Ellipse Studio \(logo\) as an OpenPype contributor [\#2324](https://github.com/pypeclub/OpenPype/pull/2324) **🆕 New features** -- Maya : Colorspace configuration [\#2170](https://github.com/pypeclub/OpenPype/pull/2170) -- Blender: Added support for audio [\#2168](https://github.com/pypeclub/OpenPype/pull/2168) -- Flame: a host basic integration [\#2165](https://github.com/pypeclub/OpenPype/pull/2165) -- Houdini: simple HDA workflow [\#2072](https://github.com/pypeclub/OpenPype/pull/2072) +- Store typed version dependencies for workfiles [\#2192](https://github.com/pypeclub/OpenPype/pull/2192) **🚀 Enhancements** +- Hiero: Add experimental tools action [\#2323](https://github.com/pypeclub/OpenPype/pull/2323) +- Input links: Cleanup and unification of differences [\#2322](https://github.com/pypeclub/OpenPype/pull/2322) +- General: Run process log stderr as info log level [\#2309](https://github.com/pypeclub/OpenPype/pull/2309) +- Tools: Cleanup of unused classes [\#2304](https://github.com/pypeclub/OpenPype/pull/2304) +- Project Manager: Added ability to delete project [\#2298](https://github.com/pypeclub/OpenPype/pull/2298) +- Ftrack: Synchronize input links [\#2287](https://github.com/pypeclub/OpenPype/pull/2287) +- StandalonePublisher: Remove unused plugin ExtractHarmonyZip [\#2277](https://github.com/pypeclub/OpenPype/pull/2277) +- Ftrack: Support multiple reviews [\#2271](https://github.com/pypeclub/OpenPype/pull/2271) +- Ftrack: Remove unused clean component plugin [\#2269](https://github.com/pypeclub/OpenPype/pull/2269) +- Houdini: Add experimental tools action [\#2267](https://github.com/pypeclub/OpenPype/pull/2267) +- Tools: Assets widget [\#2265](https://github.com/pypeclub/OpenPype/pull/2265) +- Nuke: extract baked review videos presets [\#2248](https://github.com/pypeclub/OpenPype/pull/2248) +- TVPaint: Workers rendering [\#2209](https://github.com/pypeclub/OpenPype/pull/2209) + +**🐛 Bug fixes** + +- Fix - provider icons are pulled from a folder [\#2326](https://github.com/pypeclub/OpenPype/pull/2326) +- InputLinks: Typo in "inputLinks" key [\#2314](https://github.com/pypeclub/OpenPype/pull/2314) +- Deadline timeout and logging [\#2312](https://github.com/pypeclub/OpenPype/pull/2312) +- nuke: do not multiply representation on class method [\#2311](https://github.com/pypeclub/OpenPype/pull/2311) +- Workfiles tool: Fix task formatting [\#2306](https://github.com/pypeclub/OpenPype/pull/2306) +- Delivery: Fix delivery paths created on windows [\#2302](https://github.com/pypeclub/OpenPype/pull/2302) +- Maya: Deadline - fix limit groups [\#2295](https://github.com/pypeclub/OpenPype/pull/2295) +- New Publisher: Fix mapping of indexes [\#2285](https://github.com/pypeclub/OpenPype/pull/2285) +- Alternate site for site sync doesnt work for sequences [\#2284](https://github.com/pypeclub/OpenPype/pull/2284) +- FFmpeg: Execute ffprobe using list of arguments instead of string command [\#2281](https://github.com/pypeclub/OpenPype/pull/2281) +- Nuke: Anatomy fill data use task as dictionary [\#2278](https://github.com/pypeclub/OpenPype/pull/2278) +- Bug: fix variable name \_asset\_id in workfiles application [\#2274](https://github.com/pypeclub/OpenPype/pull/2274) +- Version handling fixes [\#2272](https://github.com/pypeclub/OpenPype/pull/2272) + +## [3.6.4](https://github.com/pypeclub/OpenPype/tree/3.6.4) (2021-11-23) + +[Full Changelog](https://github.com/pypeclub/OpenPype/compare/CI/3.7.0-nightly.1...3.6.4) + +**🐛 Bug fixes** + +- Nuke: inventory update removes all loaded read nodes [\#2294](https://github.com/pypeclub/OpenPype/pull/2294) + +## [3.6.3](https://github.com/pypeclub/OpenPype/tree/3.6.3) (2021-11-19) + +[Full Changelog](https://github.com/pypeclub/OpenPype/compare/CI/3.6.3-nightly.1...3.6.3) + +**🐛 Bug fixes** + +- Deadline: Fix publish targets [\#2280](https://github.com/pypeclub/OpenPype/pull/2280) + +## [3.6.2](https://github.com/pypeclub/OpenPype/tree/3.6.2) (2021-11-18) + +[Full Changelog](https://github.com/pypeclub/OpenPype/compare/CI/3.6.2-nightly.2...3.6.2) + +**🚀 Enhancements** + +- Royal Render: Support for rr channels in separate dirs [\#2268](https://github.com/pypeclub/OpenPype/pull/2268) +- SceneInventory: Choose loader in asset switcher [\#2262](https://github.com/pypeclub/OpenPype/pull/2262) +- Style: New fonts in OpenPype style [\#2256](https://github.com/pypeclub/OpenPype/pull/2256) +- Tools: SceneInventory in OpenPype [\#2255](https://github.com/pypeclub/OpenPype/pull/2255) +- Tools: Tasks widget [\#2251](https://github.com/pypeclub/OpenPype/pull/2251) +- Added endpoint for configured extensions [\#2221](https://github.com/pypeclub/OpenPype/pull/2221) + +**🐛 Bug fixes** + +- Tools: Parenting of tools in Nuke and Hiero [\#2266](https://github.com/pypeclub/OpenPype/pull/2266) +- limiting validator to specific editorial hosts [\#2264](https://github.com/pypeclub/OpenPype/pull/2264) +- Tools: Select Context dialog attribute fix [\#2261](https://github.com/pypeclub/OpenPype/pull/2261) +- Maya: Render publishing fails on linux [\#2260](https://github.com/pypeclub/OpenPype/pull/2260) +- LookAssigner: Fix tool reopen [\#2259](https://github.com/pypeclub/OpenPype/pull/2259) +- Standalone: editorial not publishing thumbnails on all subsets [\#2258](https://github.com/pypeclub/OpenPype/pull/2258) +- Loader doesn't allow changing of version before loading [\#2254](https://github.com/pypeclub/OpenPype/pull/2254) +- Burnins: Support mxf metadata [\#2247](https://github.com/pypeclub/OpenPype/pull/2247) +- Maya: Support for configurable AOV separator characters [\#2197](https://github.com/pypeclub/OpenPype/pull/2197) +- Maya: texture colorspace modes in looks [\#2195](https://github.com/pypeclub/OpenPype/pull/2195) + +## [3.6.1](https://github.com/pypeclub/OpenPype/tree/3.6.1) (2021-11-16) + +[Full Changelog](https://github.com/pypeclub/OpenPype/compare/CI/3.6.1-nightly.1...3.6.1) + +## [3.6.0](https://github.com/pypeclub/OpenPype/tree/3.6.0) (2021-11-15) + +[Full Changelog](https://github.com/pypeclub/OpenPype/compare/CI/3.6.0-nightly.6...3.6.0) + +### 📖 Documentation + +- Add alternative sites for Site Sync [\#2206](https://github.com/pypeclub/OpenPype/pull/2206) +- Add command line way of running site sync server [\#2188](https://github.com/pypeclub/OpenPype/pull/2188) + +**🚀 Enhancements** + +- Tools: Creator in OpenPype [\#2244](https://github.com/pypeclub/OpenPype/pull/2244) +- Tools: Subset manager in OpenPype [\#2243](https://github.com/pypeclub/OpenPype/pull/2243) +- General: Skip module directories without init file [\#2239](https://github.com/pypeclub/OpenPype/pull/2239) +- General: Static interfaces [\#2238](https://github.com/pypeclub/OpenPype/pull/2238) +- Style: Fix transparent image in style [\#2235](https://github.com/pypeclub/OpenPype/pull/2235) +- Add a "following workfile versioning" option on publish [\#2225](https://github.com/pypeclub/OpenPype/pull/2225) +- Modules: Module can add cli commands [\#2224](https://github.com/pypeclub/OpenPype/pull/2224) +- Webpublisher: Separate webpublisher logic [\#2222](https://github.com/pypeclub/OpenPype/pull/2222) - Add both side availability on Site Sync sites to Loader [\#2220](https://github.com/pypeclub/OpenPype/pull/2220) - Tools: Center loader and library loader on show [\#2219](https://github.com/pypeclub/OpenPype/pull/2219) - Maya : Validate shape zero [\#2212](https://github.com/pypeclub/OpenPype/pull/2212) @@ -21,91 +117,30 @@ - Ftrack: Replace Queue with deque in event handlers logic [\#2204](https://github.com/pypeclub/OpenPype/pull/2204) - Tools: New select context dialog [\#2200](https://github.com/pypeclub/OpenPype/pull/2200) - Maya : Validate mesh ngons [\#2199](https://github.com/pypeclub/OpenPype/pull/2199) +- Dirmap in Nuke [\#2198](https://github.com/pypeclub/OpenPype/pull/2198) - Delivery: Check 'frame' key in template for sequence delivery [\#2196](https://github.com/pypeclub/OpenPype/pull/2196) +- Settings: Site sync project settings improvement [\#2193](https://github.com/pypeclub/OpenPype/pull/2193) - Usage of tools code [\#2185](https://github.com/pypeclub/OpenPype/pull/2185) -- Settings: Dictionary based on project roots [\#2184](https://github.com/pypeclub/OpenPype/pull/2184) -- Subset name: Be able to pass asset document to get subset name [\#2179](https://github.com/pypeclub/OpenPype/pull/2179) -- Tools: Experimental tools [\#2167](https://github.com/pypeclub/OpenPype/pull/2167) -- Loader: Refactor and use OpenPype stylesheets [\#2166](https://github.com/pypeclub/OpenPype/pull/2166) -- Add loader for linked smart objects in photoshop [\#2149](https://github.com/pypeclub/OpenPype/pull/2149) **🐛 Bug fixes** +- Ftrack: Sync project ftrack id cache issue [\#2250](https://github.com/pypeclub/OpenPype/pull/2250) +- Ftrack: Session creation and Prepare project [\#2245](https://github.com/pypeclub/OpenPype/pull/2245) +- Added queue for studio processing in PS [\#2237](https://github.com/pypeclub/OpenPype/pull/2237) +- Python 2: Unicode to string conversion [\#2236](https://github.com/pypeclub/OpenPype/pull/2236) +- Fix - enum for color coding in PS [\#2234](https://github.com/pypeclub/OpenPype/pull/2234) +- Pyblish Tool: Fix targets handling [\#2232](https://github.com/pypeclub/OpenPype/pull/2232) +- Ftrack: Base event fix of 'get\_project\_from\_entity' method [\#2214](https://github.com/pypeclub/OpenPype/pull/2214) - Maya : multiple subsets review broken [\#2210](https://github.com/pypeclub/OpenPype/pull/2210) - Fix - different command used for Linux and Mac OS [\#2207](https://github.com/pypeclub/OpenPype/pull/2207) - Tools: Workfiles tool don't use avalon widgets [\#2205](https://github.com/pypeclub/OpenPype/pull/2205) - Ftrack: Fill missing ftrack id on mongo project [\#2203](https://github.com/pypeclub/OpenPype/pull/2203) - Project Manager: Fix copying of tasks [\#2191](https://github.com/pypeclub/OpenPype/pull/2191) -- StandalonePublisher: Source validator don't expect representations [\#2190](https://github.com/pypeclub/OpenPype/pull/2190) -- Blender: Fix trying to pack an image when the shader node has no texture [\#2183](https://github.com/pypeclub/OpenPype/pull/2183) -- MacOS: Launching of applications may cause Permissions error [\#2175](https://github.com/pypeclub/OpenPype/pull/2175) -- Maya: Aspect ratio [\#2174](https://github.com/pypeclub/OpenPype/pull/2174) -- Blender: Fix 'Deselect All' with object not in 'Object Mode' [\#2163](https://github.com/pypeclub/OpenPype/pull/2163) -- Maya: Fix hotbox broken by scriptsmenu [\#2151](https://github.com/pypeclub/OpenPype/pull/2151) -- Added validator for source files for Standalone Publisher [\#2138](https://github.com/pypeclub/OpenPype/pull/2138) - -**Merged pull requests:** - -- Settings: Site sync project settings improvement [\#2193](https://github.com/pypeclub/OpenPype/pull/2193) -- Add validate active site button to sync queue on a project [\#2176](https://github.com/pypeclub/OpenPype/pull/2176) -- Bump pillow from 8.2.0 to 8.3.2 [\#2162](https://github.com/pypeclub/OpenPype/pull/2162) ## [3.5.0](https://github.com/pypeclub/OpenPype/tree/3.5.0) (2021-10-17) [Full Changelog](https://github.com/pypeclub/OpenPype/compare/CI/3.5.0-nightly.8...3.5.0) -**Deprecated:** - -- Maya: Change mayaAscii family to mayaScene [\#2106](https://github.com/pypeclub/OpenPype/pull/2106) - -**🆕 New features** - -- Added project and task into context change message in Maya [\#2131](https://github.com/pypeclub/OpenPype/pull/2131) -- Add ExtractBurnin to photoshop review [\#2124](https://github.com/pypeclub/OpenPype/pull/2124) -- PYPE-1218 - changed namespace to contain subset name in Maya [\#2114](https://github.com/pypeclub/OpenPype/pull/2114) -- Added running configurable disk mapping command before start of OP [\#2091](https://github.com/pypeclub/OpenPype/pull/2091) -- SFTP provider [\#2073](https://github.com/pypeclub/OpenPype/pull/2073) - -**🚀 Enhancements** - -- Maya: make rig validators configurable in settings [\#2137](https://github.com/pypeclub/OpenPype/pull/2137) -- Settings: Updated readme for entity types in settings [\#2132](https://github.com/pypeclub/OpenPype/pull/2132) -- Nuke: unified clip loader [\#2128](https://github.com/pypeclub/OpenPype/pull/2128) -- Settings UI: Project model refreshing and sorting [\#2104](https://github.com/pypeclub/OpenPype/pull/2104) -- Create Read From Rendered - Disable Relative paths by default [\#2093](https://github.com/pypeclub/OpenPype/pull/2093) -- Added choosing different dirmap mapping if workfile synched locally [\#2088](https://github.com/pypeclub/OpenPype/pull/2088) -- General: Remove IdleManager module [\#2084](https://github.com/pypeclub/OpenPype/pull/2084) -- Tray UI: Message box about missing settings defaults [\#2080](https://github.com/pypeclub/OpenPype/pull/2080) -- Tray UI: Show menu where first click happened [\#2079](https://github.com/pypeclub/OpenPype/pull/2079) -- Global: add global validators to settings [\#2078](https://github.com/pypeclub/OpenPype/pull/2078) -- Use CRF for burnin when available [\#2070](https://github.com/pypeclub/OpenPype/pull/2070) -- Project manager: Filter first item after selection of project [\#2069](https://github.com/pypeclub/OpenPype/pull/2069) - -**🐛 Bug fixes** - -- Maya: fix model publishing [\#2130](https://github.com/pypeclub/OpenPype/pull/2130) -- Fix - oiiotool wasn't recognized even if present [\#2129](https://github.com/pypeclub/OpenPype/pull/2129) -- General: Disk mapping group [\#2120](https://github.com/pypeclub/OpenPype/pull/2120) -- Hiero: publishing effect first time makes wrong resources path [\#2115](https://github.com/pypeclub/OpenPype/pull/2115) -- Add startup script for Houdini Core. [\#2110](https://github.com/pypeclub/OpenPype/pull/2110) -- TVPaint: Behavior name of loop also accept repeat [\#2109](https://github.com/pypeclub/OpenPype/pull/2109) -- Ftrack: Project settings save custom attributes skip unknown attributes [\#2103](https://github.com/pypeclub/OpenPype/pull/2103) -- Blender: Fix NoneType error when animation\_data is missing for a rig [\#2101](https://github.com/pypeclub/OpenPype/pull/2101) -- Fix broken import in sftp provider [\#2100](https://github.com/pypeclub/OpenPype/pull/2100) -- Global: Fix docstring on publish plugin extract review [\#2097](https://github.com/pypeclub/OpenPype/pull/2097) -- Delivery Action Files Sequence fix [\#2096](https://github.com/pypeclub/OpenPype/pull/2096) -- General: Cloud mongo ca certificate issue [\#2095](https://github.com/pypeclub/OpenPype/pull/2095) -- TVPaint: Creator use context from workfile [\#2087](https://github.com/pypeclub/OpenPype/pull/2087) -- Blender: fix texture missing when publishing blend files [\#2085](https://github.com/pypeclub/OpenPype/pull/2085) -- General: Startup validations oiio tool path fix on linux [\#2083](https://github.com/pypeclub/OpenPype/pull/2083) -- Deadline: Collect deadline server does not check existence of deadline key [\#2082](https://github.com/pypeclub/OpenPype/pull/2082) -- Blender: fixed Curves with modifiers in Rigs [\#2081](https://github.com/pypeclub/OpenPype/pull/2081) -- Nuke UI scaling [\#2077](https://github.com/pypeclub/OpenPype/pull/2077) - -**Merged pull requests:** - -- Bump pywin32 from 300 to 301 [\#2086](https://github.com/pypeclub/OpenPype/pull/2086) - ## [3.4.1](https://github.com/pypeclub/OpenPype/tree/3.4.1) (2021-09-23) [Full Changelog](https://github.com/pypeclub/OpenPype/compare/CI/3.4.1-nightly.1...3.4.1) diff --git a/igniter/bootstrap_repos.py b/igniter/bootstrap_repos.py index f7f35824c8..151597e505 100644 --- a/igniter/bootstrap_repos.py +++ b/igniter/bootstrap_repos.py @@ -10,6 +10,7 @@ import tempfile from pathlib import Path from typing import Union, Callable, List, Tuple import hashlib +import platform from zipfile import ZipFile, BadZipFile @@ -196,21 +197,23 @@ class OpenPypeVersion(semver.VersionInfo): return str(self.finalize_version()) @staticmethod - def version_in_str(string: str) -> Tuple: + def version_in_str(string: str) -> Union[None, OpenPypeVersion]: """Find OpenPype version in given string. Args: string (str): string to search. Returns: - tuple: True/False and OpenPypeVersion if found. + OpenPypeVersion: of detected or None. """ m = re.search(OpenPypeVersion._VERSION_REGEX, string) if not m: - return False, None + return None version = OpenPypeVersion.parse(string[m.start():m.end()]) - return True, version + if "staging" in string[m.start():m.end()]: + version.staging = True + return version @classmethod def parse(cls, version): @@ -531,6 +534,7 @@ class BootstrapRepos: processed_path = file self._print(f"- processing {processed_path}") + checksums.append( ( sha256sum(file.as_posix()), @@ -542,7 +546,10 @@ class BootstrapRepos: checksums_str = "" for c in checksums: - checksums_str += "{}:{}\n".format(c[0], c[1]) + file_str = c[1] + if platform.system().lower() == "windows": + file_str = c[1].as_posix().replace("\\", "/") + checksums_str += "{}:{}\n".format(c[0], file_str) zip_file.writestr("checksums", checksums_str) # test if zip is ok zip_file.testzip() @@ -563,6 +570,8 @@ class BootstrapRepos: and string with reason as second. """ + if os.getenv("OPENPYPE_DONT_VALIDATE_VERSION"): + return True, "Disabled validation" if not path.exists(): return False, "Path doesn't exist" @@ -589,13 +598,16 @@ class BootstrapRepos: # calculate and compare checksums in the zip file for file in checksums: + file_name = file[1] + if platform.system().lower() == "windows": + file_name = file_name.replace("/", "\\") h = hashlib.sha256() try: - h.update(zip_file.read(file[1])) + h.update(zip_file.read(file_name)) except FileNotFoundError: - return False, f"Missing file [ {file[1]} ]" + return False, f"Missing file [ {file_name} ]" if h.hexdigest() != file[0]: - return False, f"Invalid checksum on {file[1]}" + return False, f"Invalid checksum on {file_name}" # get list of files in zip minus `checksums` file itself # and turn in to set to compare against list of files @@ -604,7 +616,7 @@ class BootstrapRepos: files_in_zip = zip_file.namelist() files_in_zip.remove("checksums") files_in_zip = set(files_in_zip) - files_in_checksum = set([file[1] for file in checksums]) + files_in_checksum = {file[1] for file in checksums} diff = files_in_zip.difference(files_in_checksum) if diff: return False, f"Missing files {diff}" @@ -628,16 +640,19 @@ class BootstrapRepos: ] files_in_dir.remove("checksums") files_in_dir = set(files_in_dir) - files_in_checksum = set([file[1] for file in checksums]) + files_in_checksum = {file[1] for file in checksums} for file in checksums: + file_name = file[1] + if platform.system().lower() == "windows": + file_name = file_name.replace("/", "\\") try: - current = sha256sum((path / file[1]).as_posix()) + current = sha256sum((path / file_name).as_posix()) except FileNotFoundError: - return False, f"Missing file [ {file[1]} ]" + return False, f"Missing file [ {file_name} ]" if file[0] != current: - return False, f"Invalid checksum on {file[1]}" + return False, f"Invalid checksum on {file_name}" diff = files_in_dir.difference(files_in_checksum) if diff: return False, f"Missing files {diff}" @@ -1161,9 +1176,9 @@ class BootstrapRepos: name = item.name if item.is_dir() else item.stem result = OpenPypeVersion.version_in_str(name) - if result[0]: + if result: detected_version: OpenPypeVersion - detected_version = result[1] + detected_version = result if item.is_dir() and not self._is_openpype_in_dir( item, detected_version diff --git a/igniter/tools.py b/igniter/tools.py index 04d7451335..3e862f5803 100644 --- a/igniter/tools.py +++ b/igniter/tools.py @@ -59,7 +59,7 @@ def validate_mongo_connection(cnx: str) -> (bool, str): return False, "Not mongodb schema" kwargs = { - "serverSelectionTimeoutMS": 2000 + "serverSelectionTimeoutMS": os.environ.get("AVALON_TIMEOUT", 2000) } # Add certificate path if should be required if should_add_certificate_path_to_mongo_url(cnx): diff --git a/igniter/version.py b/igniter/version.py index 56d58f7f60..8e7731f6d6 100644 --- a/igniter/version.py +++ b/igniter/version.py @@ -1,4 +1,4 @@ # -*- coding: utf-8 -*- """Definition of Igniter version.""" -__version__ = "1.0.1" +__version__ = "1.0.2" diff --git a/openpype/api.py b/openpype/api.py index e4bbb104a3..a6529202ff 100644 --- a/openpype/api.py +++ b/openpype/api.py @@ -17,6 +17,7 @@ from .lib import ( version_up, get_asset, get_hierarchy, + get_workdir_data, get_version_from_path, get_last_version_from_path, get_app_environments_for_context, diff --git a/openpype/cli.py b/openpype/cli.py index bc23fdf2b1..4c4dc1a3c6 100644 --- a/openpype/cli.py +++ b/openpype/cli.py @@ -158,7 +158,9 @@ def extractenvironments(output_json_path, project, asset, task, app): @click.option("-d", "--debug", is_flag=True, help="Print debug messages") @click.option("-t", "--targets", help="Targets module", default=None, multiple=True) -def publish(debug, paths, targets): +@click.option("-g", "--gui", is_flag=True, + help="Show Publish UI", default=False) +def publish(debug, paths, targets, gui): """Start CLI publishing. Publish collects json from paths provided as an argument. @@ -166,7 +168,7 @@ def publish(debug, paths, targets): """ if debug: os.environ['OPENPYPE_DEBUG'] = '3' - PypeCommands.publish(list(paths), targets) + PypeCommands.publish(list(paths), targets, gui) @main.command() @@ -357,3 +359,40 @@ def run(script): def runtests(folder, mark, pyargs): """Run all automatic tests after proper initialization via start.py""" PypeCommands().run_tests(folder, mark, pyargs) + + +@main.command() +@click.option("-d", "--debug", + is_flag=True, help=("Run process in debug mode")) +@click.option("-a", "--active_site", required=True, + help="Name of active stie") +def syncserver(debug, active_site): + """Run sync site server in background. + + Some Site Sync use cases need to expose site to another one. + For example if majority of artists work in studio, they are not using + SS at all, but if you want to expose published assets to 'studio' site + to SFTP for only a couple of artists, some background process must + mark published assets to live on multiple sites (they might be + physically in same location - mounted shared disk). + + Process mimics OP Tray with specific 'active_site' name, all + configuration for this "dummy" user comes from Setting or Local + Settings (configured by starting OP Tray with env + var OPENPYPE_LOCAL_ID set to 'active_site'. + """ + if debug: + os.environ['OPENPYPE_DEBUG'] = '3' + PypeCommands().syncserver(active_site) + + +@main.command() +@click.argument("directory") +def repack_version(directory): + """Repack OpenPype version from directory. + + This command will re-create zip file from specified directory, + recalculating file checksums. It will try to use version detected in + directory name. + """ + PypeCommands().repack_version(directory) diff --git a/openpype/hosts/aftereffects/api/__init__.py b/openpype/hosts/aftereffects/api/__init__.py index 5f6a64a6d0..b1edb91a5c 100644 --- a/openpype/hosts/aftereffects/api/__init__.py +++ b/openpype/hosts/aftereffects/api/__init__.py @@ -4,7 +4,7 @@ import logging from avalon import io from avalon import api as avalon -from avalon.vendor import Qt +from Qt import QtWidgets from openpype import lib, api import pyblish.api as pyblish import openpype.hosts.aftereffects @@ -41,10 +41,10 @@ def check_inventory(): # Warn about outdated containers. print("Starting new QApplication..") - app = Qt.QtWidgets.QApplication(sys.argv) + app = QtWidgets.QApplication(sys.argv) - message_box = Qt.QtWidgets.QMessageBox() - message_box.setIcon(Qt.QtWidgets.QMessageBox.Warning) + message_box = QtWidgets.QMessageBox() + message_box.setIcon(QtWidgets.QMessageBox.Warning) msg = "There are outdated containers in the scene." message_box.setText(msg) message_box.exec_() diff --git a/openpype/hosts/aftereffects/plugins/create/create_render.py b/openpype/hosts/aftereffects/plugins/create/create_render.py index 4234ee0f0c..b796e9eaac 100644 --- a/openpype/hosts/aftereffects/plugins/create/create_render.py +++ b/openpype/hosts/aftereffects/plugins/create/create_render.py @@ -1,5 +1,5 @@ import openpype.api -from avalon.vendor import Qt +from Qt import QtWidgets from avalon import aftereffects import logging @@ -56,7 +56,7 @@ class CreateRender(openpype.api.Creator): stub.rename_item(item.id, stub.PUBLISH_ICON + self.data["subset"]) def _show_msg(self, txt): - msg = Qt.QtWidgets.QMessageBox() - msg.setIcon(Qt.QtWidgets.QMessageBox.Warning) + msg = QtWidgets.QMessageBox() + msg.setIcon(QtWidgets.QMessageBox.Warning) msg.setText(txt) msg.exec_() diff --git a/openpype/hosts/fusion/api/lib.py b/openpype/hosts/fusion/api/lib.py index 77866fde9d..7afcdd82ea 100644 --- a/openpype/hosts/fusion/api/lib.py +++ b/openpype/hosts/fusion/api/lib.py @@ -1,6 +1,6 @@ import sys -from avalon.vendor.Qt import QtGui +from Qt import QtGui import avalon.fusion from avalon import io diff --git a/openpype/hosts/fusion/plugins/inventory/set_tool_color.py b/openpype/hosts/fusion/plugins/inventory/set_tool_color.py index 940a0e9941..9fc7012db7 100644 --- a/openpype/hosts/fusion/plugins/inventory/set_tool_color.py +++ b/openpype/hosts/fusion/plugins/inventory/set_tool_color.py @@ -1,5 +1,5 @@ from avalon import api, style -from avalon.vendor.Qt import QtGui, QtWidgets +from Qt import QtGui, QtWidgets import avalon.fusion diff --git a/openpype/hosts/fusion/scripts/set_rendermode.py b/openpype/hosts/fusion/scripts/set_rendermode.py index cb104445a8..73eec528a2 100644 --- a/openpype/hosts/fusion/scripts/set_rendermode.py +++ b/openpype/hosts/fusion/scripts/set_rendermode.py @@ -1,4 +1,4 @@ -from avalon.vendor.Qt import QtWidgets +from Qt import QtWidgets from avalon.vendor import qtawesome import avalon.fusion as avalon diff --git a/openpype/hosts/fusion/utility_scripts/switch_ui.py b/openpype/hosts/fusion/utility_scripts/switch_ui.py index e0b6b3f882..2be91af32a 100644 --- a/openpype/hosts/fusion/utility_scripts/switch_ui.py +++ b/openpype/hosts/fusion/utility_scripts/switch_ui.py @@ -2,12 +2,13 @@ import os import glob import logging +from Qt import QtWidgets, QtCore + import avalon.io as io import avalon.api as api import avalon.pipeline as pipeline import avalon.fusion import avalon.style as style -from avalon.vendor.Qt import QtWidgets, QtCore from avalon.vendor import qtawesome as qta diff --git a/openpype/hosts/harmony/plugins/publish/collect_farm_render.py b/openpype/hosts/harmony/plugins/publish/collect_farm_render.py index fc80e7c029..31a249591e 100644 --- a/openpype/hosts/harmony/plugins/publish/collect_farm_render.py +++ b/openpype/hosts/harmony/plugins/publish/collect_farm_render.py @@ -126,7 +126,8 @@ class CollectFarmRender(openpype.lib.abstract_collect_render. # because of using 'renderFarm' as a family, replace 'Farm' with # capitalized task name - issue of avalon-core Creator app subset_name = node.split("/")[1] - task_name = context.data["anatomyData"]["task"].capitalize() + task_name = context.data["anatomyData"]["task"][ + "name"].capitalize() replace_str = "" if task_name.lower() not in subset_name.lower(): replace_str = task_name diff --git a/openpype/hosts/harmony/plugins/publish/collect_palettes.py b/openpype/hosts/harmony/plugins/publish/collect_palettes.py index b8671badb3..e47cbaf17e 100644 --- a/openpype/hosts/harmony/plugins/publish/collect_palettes.py +++ b/openpype/hosts/harmony/plugins/publish/collect_palettes.py @@ -28,7 +28,7 @@ class CollectPalettes(pyblish.api.ContextPlugin): # skip collecting if not in allowed task if self.allowed_tasks: - task_name = context.data["anatomyData"]["task"].lower() + task_name = context.data["anatomyData"]["task"]["name"].lower() if (not any([re.search(pattern, task_name) for pattern in self.allowed_tasks])): return diff --git a/openpype/hosts/hiero/api/lib.py b/openpype/hosts/hiero/api/lib.py index 876fae5da9..21b65e5c96 100644 --- a/openpype/hosts/hiero/api/lib.py +++ b/openpype/hosts/hiero/api/lib.py @@ -5,13 +5,13 @@ import os import re import sys import ast +import shutil import hiero +from Qt import QtWidgets import avalon.api as avalon import avalon.io -from avalon.vendor.Qt import QtWidgets from openpype.api import (Logger, Anatomy, get_anatomy_settings) from . import tags -import shutil from compiler.ast import flatten try: @@ -30,6 +30,7 @@ self = sys.modules[__name__] self._has_been_setup = False self._has_menu = False self._registered_gui = None +self._parent = None self.pype_tag_name = "openpypeData" self.default_sequence_name = "openpypeSequence" self.default_bin_name = "openpypeBin" @@ -1029,3 +1030,15 @@ def before_project_save(event): # also mark old versions of loaded containers check_inventory_versions() + + +def get_main_window(): + """Acquire Nuke's main window""" + if self._parent is None: + top_widgets = QtWidgets.QApplication.topLevelWidgets() + name = "Foundry::UI::DockMainWindow" + main_window = next(widget for widget in top_widgets if + widget.inherits("QMainWindow") and + widget.metaObject().className() == name) + self._parent = main_window + return self._parent diff --git a/openpype/hosts/hiero/api/menu.py b/openpype/hosts/hiero/api/menu.py index e3de220777..5aaab7a2e5 100644 --- a/openpype/hosts/hiero/api/menu.py +++ b/openpype/hosts/hiero/api/menu.py @@ -37,12 +37,16 @@ def menu_install(): Installing menu into Hiero """ + from Qt import QtGui from . import ( publish, launch_workfiles_app, reload_config, apply_colorspace_project, apply_colorspace_clips ) + from .lib import get_main_window + + main_window = get_main_window() + # here is the best place to add menu - from avalon.vendor.Qt import QtGui menu_name = os.environ['AVALON_LABEL'] @@ -86,18 +90,24 @@ def menu_install(): creator_action = menu.addAction("Create ...") creator_action.setIcon(QtGui.QIcon("icons:CopyRectangle.png")) - creator_action.triggered.connect(host_tools.show_creator) + creator_action.triggered.connect( + lambda: host_tools.show_creator(parent=main_window) + ) loader_action = menu.addAction("Load ...") loader_action.setIcon(QtGui.QIcon("icons:CopyRectangle.png")) - loader_action.triggered.connect(host_tools.show_loader) + loader_action.triggered.connect( + lambda: host_tools.show_loader(parent=main_window) + ) sceneinventory_action = menu.addAction("Manage ...") sceneinventory_action.setIcon(QtGui.QIcon("icons:CopyRectangle.png")) - sceneinventory_action.triggered.connect(host_tools.show_scene_inventory) - menu.addSeparator() + sceneinventory_action.triggered.connect( + lambda: host_tools.show_scene_inventory(parent=main_window) + ) if os.getenv("OPENPYPE_DEVELOP"): + menu.addSeparator() reload_action = menu.addAction("Reload pipeline") reload_action.setIcon(QtGui.QIcon("icons:ColorAdd.png")) reload_action.triggered.connect(reload_config) @@ -110,3 +120,10 @@ def menu_install(): apply_colorspace_c_action = menu.addAction("Apply Colorspace Clips") apply_colorspace_c_action.setIcon(QtGui.QIcon("icons:ColorAdd.png")) apply_colorspace_c_action.triggered.connect(apply_colorspace_clips) + + menu.addSeparator() + + exeprimental_action = menu.addAction("Experimental tools...") + exeprimental_action.triggered.connect( + lambda: host_tools.show_experimental_tools_dialog(parent=main_window) + ) diff --git a/openpype/hosts/hiero/api/pipeline.py b/openpype/hosts/hiero/api/pipeline.py index 6f6588e1be..d52cb68ba7 100644 --- a/openpype/hosts/hiero/api/pipeline.py +++ b/openpype/hosts/hiero/api/pipeline.py @@ -209,9 +209,11 @@ def update_container(track_item, data=None): def launch_workfiles_app(*args): ''' Wrapping function for workfiles launcher ''' + from .lib import get_main_window + main_window = get_main_window() # show workfile gui - host_tools.show_workfiles() + host_tools.show_workfiles(parent=main_window) def publish(parent): diff --git a/openpype/hosts/hiero/api/plugin.py b/openpype/hosts/hiero/api/plugin.py index c46ef9abfa..75d1c1b18f 100644 --- a/openpype/hosts/hiero/api/plugin.py +++ b/openpype/hosts/hiero/api/plugin.py @@ -6,6 +6,7 @@ from avalon.vendor import qargparse import avalon.api as avalon import openpype.api as openpype from . import lib +from copy import deepcopy log = openpype.Logger().get_logger(__name__) @@ -799,7 +800,8 @@ class PublishClip: # increasing steps by index of rename iteration self.count_steps *= self.rename_index - hierarchy_formating_data = dict() + hierarchy_formating_data = {} + hierarchy_data = deepcopy(self.hierarchy_data) _data = self.track_item_default_data.copy() if self.ui_inputs: # adding tag metadata from ui @@ -824,19 +826,19 @@ class PublishClip: _data.update({"shot": self.shot_num}) # solve # in test to pythonic expression - for _k, _v in self.hierarchy_data.items(): + for _k, _v in hierarchy_data.items(): if "#" not in _v["value"]: continue - self.hierarchy_data[ + hierarchy_data[ _k]["value"] = self._replace_hash_to_expression( _k, _v["value"]) # fill up pythonic expresisons in hierarchy data - for k, _v in self.hierarchy_data.items(): + for k, _v in hierarchy_data.items(): hierarchy_formating_data[k] = _v["value"].format(**_data) else: # if no gui mode then just pass default data - hierarchy_formating_data = self.hierarchy_data + hierarchy_formating_data = hierarchy_data tag_hierarchy_data = self._solve_tag_hierarchy_data( hierarchy_formating_data @@ -886,30 +888,38 @@ class PublishClip: "families": [self.data["family"]] } - def _convert_to_entity(self, key): + def _convert_to_entity(self, type, template): """ Converting input key to key with type. """ # convert to entity type - entity_type = self.types.get(key, None) + entity_type = self.types.get(type, None) assert entity_type, "Missing entity type for `{}`".format( - key + type ) + # first collect formating data to use for formating template + formating_data = {} + for _k, _v in self.hierarchy_data.items(): + value = _v["value"].format( + **self.track_item_default_data) + formating_data[_k] = value + return { "entity_type": entity_type, - "entity_name": self.hierarchy_data[key]["value"].format( - **self.track_item_default_data + "entity_name": template.format( + **formating_data ) } def _create_parents(self): """ Create parents and return it in list. """ - self.parents = list() + self.parents = [] patern = re.compile(self.parents_search_patern) - par_split = [patern.findall(t).pop() + + par_split = [(patern.findall(t).pop(), t) for t in self.hierarchy.split("/")] - for key in par_split: - parent = self._convert_to_entity(key) + for type, template in par_split: + parent = self._convert_to_entity(type, template) self.parents.append(parent) diff --git a/openpype/hosts/houdini/api/usd.py b/openpype/hosts/houdini/api/usd.py index 6f808779ea..a992f1d082 100644 --- a/openpype/hosts/houdini/api/usd.py +++ b/openpype/hosts/houdini/api/usd.py @@ -3,9 +3,10 @@ import contextlib import logging -from Qt import QtCore, QtGui -from openpype.tools.utils.widgets import AssetWidget -from avalon import style, io +from Qt import QtWidgets, QtCore, QtGui +from avalon import io +from openpype import style +from openpype.tools.utils.assets_widget import SingleSelectAssetsWidget from pxr import Sdf @@ -13,6 +14,60 @@ from pxr import Sdf log = logging.getLogger(__name__) +class SelectAssetDialog(QtWidgets.QWidget): + """Frameless assets dialog to select asset with double click. + + Args: + parm: Parameter where selected asset name is set. + """ + def __init__(self, parm): + self.setWindowTitle("Pick Asset") + self.setWindowFlags(QtCore.Qt.FramelessWindowHint | QtCore.Qt.Popup) + + assets_widget = SingleSelectAssetsWidget(io, parent=self) + + layout = QtWidgets.QHBoxLayout(self) + layout.addWidget(assets_widget) + + assets_widget.double_clicked.connect(self._set_parameter) + self._assets_widget = assets_widget + self._parm = parm + + def _set_parameter(self): + name = self._assets_widget.get_selected_asset_name() + self._parm.set(name) + self.close() + + def _on_show(self): + pos = QtGui.QCursor.pos() + # Select the current asset if there is any + select_id = None + name = self._parm.eval() + if name: + db_asset = io.find_one( + {"name": name, "type": "asset"}, + {"_id": True} + ) + if db_asset: + select_id = db_asset["_id"] + + # Set stylesheet + self.setStyleSheet(style.load_stylesheet()) + # Refresh assets (is threaded) + self._assets_widget.refresh() + # Select asset - must be done after refresh + if select_id is not None: + self._assets_widget.select_asset(select_id) + + # Show cursor (top right of window) near cursor + self.resize(250, 400) + self.move(self.mapFromGlobal(pos) - QtCore.QPoint(self.width(), 0)) + + def showEvent(self, event): + super(SelectAssetDialog, self).showEvent(event) + self._on_show() + + def pick_asset(node): """Show a user interface to select an Asset in the project @@ -21,43 +76,15 @@ def pick_asset(node): """ - pos = QtGui.QCursor.pos() - parm = node.parm("asset_name") if not parm: log.error("Node has no 'asset' parameter: %s", node) return - # Construct the AssetWidget as a frameless popup so it automatically + # Construct a frameless popup so it automatically # closes when clicked outside of it. global tool - tool = AssetWidget(io) - tool.setContentsMargins(5, 5, 5, 5) - tool.setWindowTitle("Pick Asset") - tool.setStyleSheet(style.load_stylesheet()) - tool.setWindowFlags(QtCore.Qt.FramelessWindowHint | QtCore.Qt.Popup) - tool.refresh() - - # Select the current asset if there is any - name = parm.eval() - if name: - db_asset = io.find_one({"name": name, "type": "asset"}) - if db_asset: - silo = db_asset.get("silo") - if silo: - tool.set_silo(silo) - tool.select_assets([name], expand=True) - - # Show cursor (top right of window) near cursor - tool.resize(250, 400) - tool.move(tool.mapFromGlobal(pos) - QtCore.QPoint(tool.width(), 0)) - - def set_parameter_callback(index): - name = index.data(tool.model.DocumentRole)["name"] - parm.set(name) - tool.close() - - tool.view.doubleClicked.connect(set_parameter_callback) + tool = SelectAssetDialog(parm) tool.show() diff --git a/openpype/hosts/houdini/startup/MainMenuCommon.xml b/openpype/hosts/houdini/startup/MainMenuCommon.xml index 2b556a2e75..c34310cf72 100644 --- a/openpype/hosts/houdini/startup/MainMenuCommon.xml +++ b/openpype/hosts/houdini/startup/MainMenuCommon.xml @@ -67,6 +67,16 @@ from avalon.houdini import pipeline pipeline.reload_pipeline()]]> + + + + + + diff --git a/openpype/hosts/maya/api/__init__.py b/openpype/hosts/maya/api/__init__.py index e330904abf..b25fd44217 100644 --- a/openpype/hosts/maya/api/__init__.py +++ b/openpype/hosts/maya/api/__init__.py @@ -138,7 +138,7 @@ def on_save(_): def on_open(_): """On scene open let's assume the containers have changed.""" - from avalon.vendor.Qt import QtWidgets + from Qt import QtWidgets from openpype.widgets import popup cmds.evalDeferred( diff --git a/openpype/hosts/maya/api/lib.py b/openpype/hosts/maya/api/lib.py index 4074aa7fa8..52ebcaff64 100644 --- a/openpype/hosts/maya/api/lib.py +++ b/openpype/hosts/maya/api/lib.py @@ -6,19 +6,19 @@ import platform import uuid import math -import bson import json import logging import itertools import contextlib from collections import OrderedDict, defaultdict from math import ceil +from six import string_types +import bson from maya import cmds, mel import maya.api.OpenMaya as om from avalon import api, maya, io, pipeline -from avalon.vendor.six import string_types import avalon.maya.lib import avalon.maya.interactive @@ -1936,7 +1936,7 @@ def validate_fps(): if current_fps != fps: - from avalon.vendor.Qt import QtWidgets + from Qt import QtWidgets from ...widgets import popup # Find maya main window @@ -2694,7 +2694,7 @@ def update_content_on_context_change(): def show_message(title, msg): - from avalon.vendor.Qt import QtWidgets + from Qt import QtWidgets from openpype.widgets import message_window # Find maya main window diff --git a/openpype/hosts/maya/api/lib_renderproducts.py b/openpype/hosts/maya/api/lib_renderproducts.py index 4983109d58..e8e4b9aaef 100644 --- a/openpype/hosts/maya/api/lib_renderproducts.py +++ b/openpype/hosts/maya/api/lib_renderproducts.py @@ -180,6 +180,7 @@ class ARenderProducts: self.layer = layer self.render_instance = render_instance self.multipart = False + self.aov_separator = render_instance.data.get("aovSeparator", "_") # Initialize self.layer_data = self._get_layer_data() @@ -676,7 +677,7 @@ class RenderProductsVray(ARenderProducts): """ prefix = super(RenderProductsVray, self).get_renderer_prefix() - prefix = "{}.".format(prefix) + prefix = "{}{}".format(prefix, self.aov_separator) return prefix def _get_layer_data(self): diff --git a/openpype/hosts/maya/plugins/create/create_render.py b/openpype/hosts/maya/plugins/create/create_render.py index 4fd4b9d986..85919d1166 100644 --- a/openpype/hosts/maya/plugins/create/create_render.py +++ b/openpype/hosts/maya/plugins/create/create_render.py @@ -21,6 +21,7 @@ from openpype.api import ( from openpype.modules import ModulesManager from avalon.api import Session +from avalon.api import CreatorError class CreateRender(plugin.Creator): @@ -81,13 +82,21 @@ class CreateRender(plugin.Creator): } _image_prefixes = { - 'mentalray': 'maya///_', + 'mentalray': 'maya///{aov_separator}', # noqa 'vray': 'maya///', - 'arnold': 'maya///_', - 'renderman': 'maya///_', - 'redshift': 'maya///_' + 'arnold': 'maya///{aov_separator}', # noqa + 'renderman': 'maya///{aov_separator}', + 'redshift': 'maya///{aov_separator}' # noqa } + _aov_chars = { + "dot": ".", + "dash": "-", + "underscore": "_" + } + + _project_settings = None + def __init__(self, *args, **kwargs): """Constructor.""" super(CreateRender, self).__init__(*args, **kwargs) @@ -95,12 +104,24 @@ class CreateRender(plugin.Creator): if not deadline_settings["enabled"]: self.deadline_servers = {} return - project_settings = get_project_settings(Session["AVALON_PROJECT"]) + self._project_settings = get_project_settings( + Session["AVALON_PROJECT"]) + + # project_settings/maya/create/CreateRender/aov_separator + try: + self.aov_separator = self._aov_chars[( + self._project_settings["maya"] + ["create"] + ["CreateRender"] + ["aov_separator"] + )] + except KeyError: + self.aov_separator = "_" + try: default_servers = deadline_settings["deadline_urls"] project_servers = ( - project_settings["deadline"] - ["deadline_servers"] + self._project_settings["deadline"]["deadline_servers"] ) self.deadline_servers = { k: default_servers[k] @@ -409,8 +430,10 @@ class CreateRender(plugin.Creator): renderer (str): Renderer name. """ + prefix = self._image_prefixes[renderer] + prefix = prefix.replace("{aov_separator}", self.aov_separator) cmds.setAttr(self._image_prefix_nodes[renderer], - self._image_prefixes[renderer], + prefix, type="string") asset = get_asset() @@ -446,37 +469,37 @@ class CreateRender(plugin.Creator): self._set_global_output_settings() - @staticmethod - def _set_renderer_option(renderer_node, arg=None, value=None): - # type: (str, str, str) -> str - """Set option on renderer node. - - If renderer settings node doesn't exists, it is created first. - - Args: - renderer_node (str): Renderer name. - arg (str, optional): Argument name. - value (str, optional): Argument value. - - Returns: - str: Renderer settings node. - - """ - settings = cmds.ls(type=renderer_node) - result = settings[0] if settings else cmds.createNode(renderer_node) - cmds.setAttr(arg.format(result), value) - return result - def _set_vray_settings(self, asset): # type: (dict) -> None """Sets important settings for Vray.""" - node = self._set_renderer_option( - "VRaySettingsNode", "{}.fileNameRenderElementSeparator", "_" - ) + settings = cmds.ls(type="VRaySettingsNode") + node = settings[0] if settings else cmds.createNode("VRaySettingsNode") + # set separator + # set it in vray menu + if cmds.optionMenuGrp("vrayRenderElementSeparator", exists=True, + q=True): + items = cmds.optionMenuGrp( + "vrayRenderElementSeparator", ill=True, query=True) + + separators = [cmds.menuItem(i, label=True, query=True) for i in items] # noqa: E501 + try: + sep_idx = separators.index(self.aov_separator) + except ValueError: + raise CreatorError( + "AOV character {} not in {}".format( + self.aov_separator, separators)) + + cmds.optionMenuGrp( + "vrayRenderElementSeparator", sl=sep_idx + 1, edit=True) + cmds.setAttr( + "{}.fileNameRenderElementSeparator".format(node), + self.aov_separator, + type="string" + ) # set format to exr cmds.setAttr( - "{}.imageFormatStr".format(node), 5) + "{}.imageFormatStr".format(node), "exr", type="string") # animType cmds.setAttr( diff --git a/openpype/hosts/maya/plugins/load/actions.py b/openpype/hosts/maya/plugins/load/actions.py index d4525511f4..1a9adf6142 100644 --- a/openpype/hosts/maya/plugins/load/actions.py +++ b/openpype/hosts/maya/plugins/load/actions.py @@ -133,7 +133,7 @@ class ImportMayaLoader(api.Loader): """ - from avalon.vendor.Qt import QtWidgets + from Qt import QtWidgets accept = QtWidgets.QMessageBox.Ok buttons = accept | QtWidgets.QMessageBox.Cancel diff --git a/openpype/hosts/maya/plugins/publish/collect_look.py b/openpype/hosts/maya/plugins/publish/collect_look.py index 9c047b252f..20a9d4ca12 100644 --- a/openpype/hosts/maya/plugins/publish/collect_look.py +++ b/openpype/hosts/maya/plugins/publish/collect_look.py @@ -532,7 +532,7 @@ class CollectLook(pyblish.api.InstancePlugin): color_space = cmds.getAttr(color_space_attr) except ValueError: # node doesn't have colorspace attribute - color_space = "raw" + color_space = "Raw" # Compare with the computed file path, e.g. the one with the # pattern in it, to generate some logging information about this # difference diff --git a/openpype/hosts/maya/plugins/publish/collect_render.py b/openpype/hosts/maya/plugins/publish/collect_render.py index d2f277329a..580d459a90 100644 --- a/openpype/hosts/maya/plugins/publish/collect_render.py +++ b/openpype/hosts/maya/plugins/publish/collect_render.py @@ -41,6 +41,7 @@ Provides: import re import os +import platform import json from maya import cmds @@ -61,6 +62,12 @@ class CollectMayaRender(pyblish.api.ContextPlugin): label = "Collect Render Layers" sync_workfile_version = False + _aov_chars = { + "dot": ".", + "dash": "-", + "underscore": "_" + } + def process(self, context): """Entry point to collector.""" render_instance = None @@ -166,6 +173,18 @@ class CollectMayaRender(pyblish.api.ContextPlugin): if renderer.startswith("renderman"): renderer = "renderman" + try: + aov_separator = self._aov_chars[( + context.data["project_settings"] + ["create"] + ["CreateRender"] + ["aov_separator"] + )] + except KeyError: + aov_separator = "_" + + render_instance.data["aovSeparator"] = aov_separator + # return all expected files for all cameras and aovs in given # frame range layer_render_products = get_layer_render_products( @@ -255,12 +274,28 @@ class CollectMayaRender(pyblish.api.ContextPlugin): common_publish_meta_path, part) if part == expected_layer_name: break + + # TODO: replace this terrible linux hotfix with real solution :) + if platform.system().lower() in ["linux", "darwin"]: + common_publish_meta_path = "/" + common_publish_meta_path + self.log.info( "Publish meta path: {}".format(common_publish_meta_path)) self.log.info(full_exp_files) self.log.info("collecting layer: {}".format(layer_name)) # Get layer specific settings, might be overrides + + try: + aov_separator = self._aov_chars[( + context.data["project_settings"] + ["create"] + ["CreateRender"] + ["aov_separator"] + )] + except KeyError: + aov_separator = "_" + data = { "subset": expected_layer_name, "attachTo": attach_to, @@ -302,7 +337,8 @@ class CollectMayaRender(pyblish.api.ContextPlugin): "convertToScanline") or False, "useReferencedAovs": render_instance.data.get( "useReferencedAovs") or render_instance.data.get( - "vrayUseReferencedAovs") or False + "vrayUseReferencedAovs") or False, + "aovSeparator": aov_separator } if deadline_url: diff --git a/openpype/hosts/maya/plugins/publish/collect_yeti_rig.py b/openpype/hosts/maya/plugins/publish/collect_yeti_rig.py index 0d240b1a32..029432223b 100644 --- a/openpype/hosts/maya/plugins/publish/collect_yeti_rig.py +++ b/openpype/hosts/maya/plugins/publish/collect_yeti_rig.py @@ -275,7 +275,7 @@ class CollectYetiRig(pyblish.api.InstancePlugin): list: file sequence. """ - from avalon.vendor import clique + import clique escaped = re.escape(filepath) re_pattern = escaped.replace(pattern, "-?[0-9]+") diff --git a/openpype/hosts/maya/plugins/publish/extract_look.py b/openpype/hosts/maya/plugins/publish/extract_look.py index e0b85907e9..2407617b6f 100644 --- a/openpype/hosts/maya/plugins/publish/extract_look.py +++ b/openpype/hosts/maya/plugins/publish/extract_look.py @@ -332,10 +332,10 @@ class ExtractLook(openpype.api.Extractor): if do_maketx and files_metadata[filepath]["color_space"].lower() == "srgb": # noqa: E501 linearize = True # set its file node to 'raw' as tx will be linearized - files_metadata[filepath]["color_space"] = "raw" + files_metadata[filepath]["color_space"] = "Raw" - if do_maketx: - color_space = "raw" + # if do_maketx: + # color_space = "Raw" source, mode, texture_hash = self._process_texture( filepath, @@ -383,11 +383,11 @@ class ExtractLook(openpype.api.Extractor): color_space = cmds.getAttr(color_space_attr) except ValueError: # node doesn't have color space attribute - color_space = "raw" + color_space = "Raw" else: - if files_metadata[source]["color_space"] == "raw": + if files_metadata[source]["color_space"] == "Raw": # set color space to raw if we linearized it - color_space = "raw" + color_space = "Raw" # Remap file node filename to destination remap[color_space_attr] = color_space attr = resource["attribute"] diff --git a/openpype/hosts/maya/plugins/publish/submit_maya_muster.py b/openpype/hosts/maya/plugins/publish/submit_maya_muster.py index 207cf56cfe..ac3de4114c 100644 --- a/openpype/hosts/maya/plugins/publish/submit_maya_muster.py +++ b/openpype/hosts/maya/plugins/publish/submit_maya_muster.py @@ -1,13 +1,14 @@ import os import json import getpass -import appdirs import platform +import appdirs +import requests + from maya import cmds from avalon import api -from avalon.vendor import requests import pyblish.api from openpype.hosts.maya.api import lib diff --git a/openpype/hosts/maya/plugins/publish/validate_assembly_transforms.py b/openpype/hosts/maya/plugins/publish/validate_assembly_transforms.py index 00600a6f62..dca59b147b 100644 --- a/openpype/hosts/maya/plugins/publish/validate_assembly_transforms.py +++ b/openpype/hosts/maya/plugins/publish/validate_assembly_transforms.py @@ -89,8 +89,8 @@ class ValidateAssemblyModelTransforms(pyblish.api.InstancePlugin): """ + from Qt import QtWidgets from openpype.hosts.maya.api import lib - from avalon.vendor.Qt import QtWidgets # Store namespace in variable, cosmetics thingy messagebox = QtWidgets.QMessageBox diff --git a/openpype/hosts/maya/plugins/publish/validate_muster_connection.py b/openpype/hosts/maya/plugins/publish/validate_muster_connection.py index 1a7ee11230..af32c82f97 100644 --- a/openpype/hosts/maya/plugins/publish/validate_muster_connection.py +++ b/openpype/hosts/maya/plugins/publish/validate_muster_connection.py @@ -1,9 +1,10 @@ import os import json + import appdirs +import requests import pyblish.api -from avalon.vendor import requests from openpype.plugin import contextplugin_should_run import openpype.hosts.maya.api.action diff --git a/openpype/hosts/maya/plugins/publish/validate_rendersettings.py b/openpype/hosts/maya/plugins/publish/validate_rendersettings.py index 65ddacfc57..6079d34fbe 100644 --- a/openpype/hosts/maya/plugins/publish/validate_rendersettings.py +++ b/openpype/hosts/maya/plugins/publish/validate_rendersettings.py @@ -55,13 +55,19 @@ class ValidateRenderSettings(pyblish.api.InstancePlugin): ImagePrefixTokens = { - 'arnold': 'maya///_', + 'arnold': 'maya///{aov_separator}', # noqa 'redshift': 'maya///', 'vray': 'maya///', - 'renderman': '_..' + 'renderman': '{aov_separator}..' # noqa } - redshift_AOV_prefix = "/_" + _aov_chars = { + "dot": ".", + "dash": "-", + "underscore": "_" + } + + redshift_AOV_prefix = "/{aov_separator}" # noqa: E501 # WARNING: There is bug? in renderman, translating token # to something left behind mayas default image prefix. So instead @@ -107,6 +113,9 @@ class ValidateRenderSettings(pyblish.api.InstancePlugin): anim_override = lib.get_attr_in_layer("defaultRenderGlobals.animation", layer=layer) + + prefix = prefix.replace( + "{aov_separator}", instance.data.get("aovSeparator", "_")) if not anim_override: invalid = True cls.log.error("Animation needs to be enabled. Use the same " @@ -138,12 +147,16 @@ class ValidateRenderSettings(pyblish.api.InstancePlugin): else: node = vray_settings[0] - if cmds.getAttr( - "{}.fileNameRenderElementSeparator".format(node)) != "_": - invalid = False + scene_sep = cmds.getAttr( + "{}.fileNameRenderElementSeparator".format(node)) + if scene_sep != instance.data.get("aovSeparator", "_"): cls.log.error("AOV separator is not set correctly.") + invalid = True if renderer == "redshift": + redshift_AOV_prefix = cls.redshift_AOV_prefix.replace( + "{aov_separator}", instance.data.get("aovSeparator", "_") + ) if re.search(cls.R_AOV_TOKEN, prefix): invalid = True cls.log.error(("Do not use AOV token [ {} ] - " @@ -155,7 +168,7 @@ class ValidateRenderSettings(pyblish.api.InstancePlugin): for aov in rs_aovs: aov_prefix = cmds.getAttr("{}.filePrefix".format(aov)) # check their image prefix - if aov_prefix != cls.redshift_AOV_prefix: + if aov_prefix != redshift_AOV_prefix: cls.log.error(("AOV ({}) image prefix is not set " "correctly {} != {}").format( cmds.getAttr("{}.name".format(aov)), @@ -181,7 +194,7 @@ class ValidateRenderSettings(pyblish.api.InstancePlugin): file_prefix = cmds.getAttr("rmanGlobals.imageFileFormat") dir_prefix = cmds.getAttr("rmanGlobals.imageOutputDir") - if file_prefix.lower() != cls.ImagePrefixTokens[renderer].lower(): + if file_prefix.lower() != prefix.lower(): invalid = True cls.log.error("Wrong image prefix [ {} ]".format(file_prefix)) @@ -198,18 +211,20 @@ class ValidateRenderSettings(pyblish.api.InstancePlugin): cls.log.error("Wrong image prefix [ {} ] - " "You can't use '' token " "with merge AOVs turned on".format(prefix)) - else: - if not re.search(cls.R_AOV_TOKEN, prefix): - invalid = True - cls.log.error("Wrong image prefix [ {} ] - " - "doesn't have: '' or " - "token".format(prefix)) + elif not re.search(cls.R_AOV_TOKEN, prefix): + invalid = True + cls.log.error("Wrong image prefix [ {} ] - " + "doesn't have: '' or " + "token".format(prefix)) # prefix check - if prefix.lower() != cls.ImagePrefixTokens[renderer].lower(): + default_prefix = cls.ImagePrefixTokens[renderer] + default_prefix = default_prefix.replace( + "{aov_separator}", instance.data.get("aovSeparator", "_")) + if prefix.lower() != default_prefix.lower(): cls.log.warning("warning: prefix differs from " "recommended {}".format( - cls.ImagePrefixTokens[renderer])) + default_prefix)) if padding != cls.DEFAULT_PADDING: invalid = True @@ -257,9 +272,14 @@ class ValidateRenderSettings(pyblish.api.InstancePlugin): @classmethod def repair(cls, instance): - renderer = instance.data['renderer'] layer_node = instance.data['setMembers'] + redshift_AOV_prefix = cls.redshift_AOV_prefix.replace( + "{aov_separator}", instance.data.get("aovSeparator", "_") + ) + default_prefix = cls.ImagePrefixTokens[renderer].replace( + "{aov_separator}", instance.data.get("aovSeparator", "_") + ) with lib.renderlayer(layer_node): default = lib.RENDER_ATTRS['default'] @@ -270,7 +290,7 @@ class ValidateRenderSettings(pyblish.api.InstancePlugin): node = render_attrs["node"] prefix_attr = render_attrs["prefix"] - fname_prefix = cls.ImagePrefixTokens[renderer] + fname_prefix = default_prefix cmds.setAttr("{}.{}".format(node, prefix_attr), fname_prefix, type="string") @@ -281,7 +301,7 @@ class ValidateRenderSettings(pyblish.api.InstancePlugin): else: # renderman handles stuff differently cmds.setAttr("rmanGlobals.imageFileFormat", - cls.ImagePrefixTokens[renderer], + default_prefix, type="string") cmds.setAttr("rmanGlobals.imageOutputDir", cls.RendermanDirPrefix, @@ -294,10 +314,13 @@ class ValidateRenderSettings(pyblish.api.InstancePlugin): else: node = vray_settings[0] + cmds.optionMenuGrp("vrayRenderElementSeparator", + v=instance.data.get("aovSeparator", "_")) cmds.setAttr( "{}.fileNameRenderElementSeparator".format( node), - "_" + instance.data.get("aovSeparator", "_"), + type="string" ) if renderer == "redshift": @@ -306,7 +329,7 @@ class ValidateRenderSettings(pyblish.api.InstancePlugin): for aov in rs_aovs: # fix AOV prefixes cmds.setAttr( - "{}.filePrefix".format(aov), cls.redshift_AOV_prefix) + "{}.filePrefix".format(aov), redshift_AOV_prefix) # fix AOV file format default_ext = cmds.getAttr( "redshiftOptions.imageFormat", asString=True) diff --git a/openpype/hosts/nuke/api/__init__.py b/openpype/hosts/nuke/api/__init__.py index e6dab5cfc9..e684b48fa3 100644 --- a/openpype/hosts/nuke/api/__init__.py +++ b/openpype/hosts/nuke/api/__init__.py @@ -70,7 +70,8 @@ def install(): family_states = [ "write", "review", - "nukenodes" + "nukenodes", + "model", "gizmo" ] diff --git a/openpype/hosts/nuke/api/lib.py b/openpype/hosts/nuke/api/lib.py index 6d593ca588..e36a5aa5ba 100644 --- a/openpype/hosts/nuke/api/lib.py +++ b/openpype/hosts/nuke/api/lib.py @@ -18,7 +18,7 @@ from openpype.api import ( BuildWorkfile, get_version_from_path, get_anatomy_settings, - get_hierarchy, + get_workdir_data, get_asset, get_current_project_settings, ApplicationManager @@ -41,6 +41,10 @@ opnl.workfiles_launched = False opnl._node_tab_name = "{}".format(os.getenv("AVALON_LABEL") or "Avalon") +def get_nuke_imageio_settings(): + return get_anatomy_settings(opnl.project_name)["imageio"]["nuke"] + + def get_created_node_imageio_setting(**kwarg): ''' Get preset data for dataflow (fileType, compression, bitDepth) ''' @@ -51,8 +55,7 @@ def get_created_node_imageio_setting(**kwarg): assert any([creator, nodeclass]), nuke.message( "`{}`: Missing mandatory kwargs `host`, `cls`".format(__file__)) - imageio = get_anatomy_settings(opnl.project_name)["imageio"] - imageio_nodes = imageio["nuke"]["nodes"]["requiredNodes"] + imageio_nodes = get_nuke_imageio_settings()["nodes"]["requiredNodes"] imageio_node = None for node in imageio_nodes: @@ -70,8 +73,7 @@ def get_imageio_input_colorspace(filename): ''' Get input file colorspace based on regex in settings. ''' imageio_regex_inputs = ( - get_anatomy_settings(opnl.project_name) - ["imageio"]["nuke"]["regexInputs"]["inputs"]) + get_nuke_imageio_settings()["regexInputs"]["inputs"]) preset_clrsp = None for regexInput in imageio_regex_inputs: @@ -268,15 +270,21 @@ def format_anatomy(data): if not version: file = script_name() data["version"] = get_version_from_path(file) - project_document = io.find_one({"type": "project"}) + + project_doc = io.find_one({"type": "project"}) + asset_doc = io.find_one({ + "type": "asset", + "name": data["avalon"]["asset"] + }) + task_name = os.environ["AVALON_TASK"] + host_name = os.environ["AVALON_APP"] + context_data = get_workdir_data( + project_doc, asset_doc, task_name, host_name + ) + data.update(context_data) data.update({ "subset": data["avalon"]["subset"], - "asset": data["avalon"]["asset"], - "task": os.environ["AVALON_TASK"], "family": data["avalon"]["family"], - "project": {"name": project_document["name"], - "code": project_document["data"].get("code", '')}, - "hierarchy": get_hierarchy(), "frame": "#" * padding, }) return anatomy.format(data) @@ -547,8 +555,7 @@ def add_rendering_knobs(node, farm=True): Return: node (obj): with added knobs ''' - knob_options = [ - "Use existing frames", "Local"] + knob_options = ["Use existing frames", "Local"] if farm: knob_options.append("On farm") @@ -906,8 +913,7 @@ class WorkfileSettings(object): ''' Setting colorpace following presets ''' # get imageio - imageio = get_anatomy_settings(opnl.project_name)["imageio"] - nuke_colorspace = imageio["nuke"] + nuke_colorspace = get_nuke_imageio_settings() try: self.set_root_colorspace(nuke_colorspace["workfile"]) @@ -1164,386 +1170,6 @@ def get_write_node_template_attr(node): return anlib.fix_data_for_node_create(correct_data) -class ExporterReview: - """ - Base class object for generating review data from Nuke - - Args: - klass (pyblish.plugin): pyblish plugin parent - instance (pyblish.instance): instance of pyblish context - - """ - _temp_nodes = [] - data = dict({ - "representations": list() - }) - - def __init__(self, - klass, - instance - ): - - self.log = klass.log - self.instance = instance - self.path_in = self.instance.data.get("path", None) - self.staging_dir = self.instance.data["stagingDir"] - self.collection = self.instance.data.get("collection", None) - - def get_file_info(self): - if self.collection: - self.log.debug("Collection: `{}`".format(self.collection)) - # get path - self.fname = os.path.basename(self.collection.format( - "{head}{padding}{tail}")) - self.fhead = self.collection.format("{head}") - - # get first and last frame - self.first_frame = min(self.collection.indexes) - self.last_frame = max(self.collection.indexes) - if "slate" in self.instance.data["families"]: - self.first_frame += 1 - else: - self.fname = os.path.basename(self.path_in) - self.fhead = os.path.splitext(self.fname)[0] + "." - self.first_frame = self.instance.data.get("frameStartHandle", None) - self.last_frame = self.instance.data.get("frameEndHandle", None) - - if "#" in self.fhead: - self.fhead = self.fhead.replace("#", "")[:-1] - - def get_representation_data(self, tags=None, range=False): - add_tags = [] - if tags: - add_tags = tags - - repre = { - 'name': self.name, - 'ext': self.ext, - 'files': self.file, - "stagingDir": self.staging_dir, - "tags": [self.name.replace("_", "-")] + add_tags - } - - if range: - repre.update({ - "frameStart": self.first_frame, - "frameEnd": self.last_frame, - }) - - self.data["representations"].append(repre) - - def get_view_process_node(self): - """ - Will get any active view process. - - Arguments: - self (class): in object definition - - Returns: - nuke.Node: copy node of Input Process node - """ - anlib.reset_selection() - ipn_orig = None - for v in nuke.allNodes(filter="Viewer"): - ip = v['input_process'].getValue() - ipn = v['input_process_node'].getValue() - if "VIEWER_INPUT" not in ipn and ip: - ipn_orig = nuke.toNode(ipn) - ipn_orig.setSelected(True) - - if ipn_orig: - # copy selected to clipboard - nuke.nodeCopy('%clipboard%') - # reset selection - anlib.reset_selection() - # paste node and selection is on it only - nuke.nodePaste('%clipboard%') - # assign to variable - ipn = nuke.selectedNode() - - return ipn - - def clean_nodes(self): - for node in self._temp_nodes: - nuke.delete(node) - self._temp_nodes = [] - self.log.info("Deleted nodes...") - - -class ExporterReviewLut(ExporterReview): - """ - Generator object for review lut from Nuke - - Args: - klass (pyblish.plugin): pyblish plugin parent - instance (pyblish.instance): instance of pyblish context - - - """ - - def __init__(self, - klass, - instance, - name=None, - ext=None, - cube_size=None, - lut_size=None, - lut_style=None): - # initialize parent class - ExporterReview.__init__(self, klass, instance) - self._temp_nodes = [] - - # deal with now lut defined in viewer lut - if hasattr(klass, "viewer_lut_raw"): - self.viewer_lut_raw = klass.viewer_lut_raw - else: - self.viewer_lut_raw = False - - self.name = name or "baked_lut" - self.ext = ext or "cube" - self.cube_size = cube_size or 32 - self.lut_size = lut_size or 1024 - self.lut_style = lut_style or "linear" - - # set frame start / end and file name to self - self.get_file_info() - - self.log.info("File info was set...") - - self.file = self.fhead + self.name + ".{}".format(self.ext) - self.path = os.path.join( - self.staging_dir, self.file).replace("\\", "/") - - def generate_lut(self): - # ---------- start nodes creation - - # CMSTestPattern - cms_node = nuke.createNode("CMSTestPattern") - cms_node["cube_size"].setValue(self.cube_size) - # connect - self._temp_nodes.append(cms_node) - self.previous_node = cms_node - self.log.debug("CMSTestPattern... `{}`".format(self._temp_nodes)) - - # Node View Process - ipn = self.get_view_process_node() - if ipn is not None: - # connect - ipn.setInput(0, self.previous_node) - self._temp_nodes.append(ipn) - self.previous_node = ipn - self.log.debug("ViewProcess... `{}`".format(self._temp_nodes)) - - if not self.viewer_lut_raw: - # OCIODisplay - dag_node = nuke.createNode("OCIODisplay") - # connect - dag_node.setInput(0, self.previous_node) - self._temp_nodes.append(dag_node) - self.previous_node = dag_node - self.log.debug("OCIODisplay... `{}`".format(self._temp_nodes)) - - # GenerateLUT - gen_lut_node = nuke.createNode("GenerateLUT") - gen_lut_node["file"].setValue(self.path) - gen_lut_node["file_type"].setValue(".{}".format(self.ext)) - gen_lut_node["lut1d"].setValue(self.lut_size) - gen_lut_node["style1d"].setValue(self.lut_style) - # connect - gen_lut_node.setInput(0, self.previous_node) - self._temp_nodes.append(gen_lut_node) - self.log.debug("GenerateLUT... `{}`".format(self._temp_nodes)) - - # ---------- end nodes creation - - # Export lut file - nuke.execute( - gen_lut_node.name(), - int(self.first_frame), - int(self.first_frame)) - - self.log.info("Exported...") - - # ---------- generate representation data - self.get_representation_data() - - self.log.debug("Representation... `{}`".format(self.data)) - - # ---------- Clean up - self.clean_nodes() - - return self.data - - -class ExporterReviewMov(ExporterReview): - """ - Metaclass for generating review mov files - - Args: - klass (pyblish.plugin): pyblish plugin parent - instance (pyblish.instance): instance of pyblish context - - """ - - def __init__(self, - klass, - instance, - name=None, - ext=None, - ): - # initialize parent class - ExporterReview.__init__(self, klass, instance) - - # passing presets for nodes to self - if hasattr(klass, "nodes"): - self.nodes = klass.nodes - else: - self.nodes = {} - - # deal with now lut defined in viewer lut - self.viewer_lut_raw = klass.viewer_lut_raw - self.bake_colorspace_fallback = klass.bake_colorspace_fallback - self.bake_colorspace_main = klass.bake_colorspace_main - self.write_colorspace = instance.data["colorspace"] - - self.name = name or "baked" - self.ext = ext or "mov" - - # set frame start / end and file name to self - self.get_file_info() - - self.log.info("File info was set...") - - self.file = self.fhead + self.name + ".{}".format(self.ext) - self.path = os.path.join( - self.staging_dir, self.file).replace("\\", "/") - - def render(self, render_node_name): - self.log.info("Rendering... ") - # Render Write node - nuke.execute( - render_node_name, - int(self.first_frame), - int(self.last_frame)) - - self.log.info("Rendered...") - - def save_file(self): - import shutil - with anlib.maintained_selection(): - self.log.info("Saving nodes as file... ") - # create nk path - path = os.path.splitext(self.path)[0] + ".nk" - # save file to the path - shutil.copyfile(self.instance.context.data["currentFile"], path) - - self.log.info("Nodes exported...") - return path - - def generate_mov(self, farm=False): - # ---------- start nodes creation - - # Read node - r_node = nuke.createNode("Read") - r_node["file"].setValue(self.path_in) - r_node["first"].setValue(self.first_frame) - r_node["origfirst"].setValue(self.first_frame) - r_node["last"].setValue(self.last_frame) - r_node["origlast"].setValue(self.last_frame) - r_node["colorspace"].setValue(self.write_colorspace) - - # connect - self._temp_nodes.append(r_node) - self.previous_node = r_node - self.log.debug("Read... `{}`".format(self._temp_nodes)) - - # View Process node - ipn = self.get_view_process_node() - if ipn is not None: - # connect - ipn.setInput(0, self.previous_node) - self._temp_nodes.append(ipn) - self.previous_node = ipn - self.log.debug("ViewProcess... `{}`".format(self._temp_nodes)) - - if not self.viewer_lut_raw: - colorspaces = [ - self.bake_colorspace_main, self.bake_colorspace_fallback - ] - - if any(colorspaces): - # OCIOColorSpace with controled output - dag_node = nuke.createNode("OCIOColorSpace") - self._temp_nodes.append(dag_node) - for c in colorspaces: - test = dag_node["out_colorspace"].setValue(str(c)) - if test: - self.log.info( - "Baking in colorspace... `{}`".format(c)) - break - - if not test: - dag_node = nuke.createNode("OCIODisplay") - else: - # OCIODisplay - dag_node = nuke.createNode("OCIODisplay") - - # connect - dag_node.setInput(0, self.previous_node) - self._temp_nodes.append(dag_node) - self.previous_node = dag_node - self.log.debug("OCIODisplay... `{}`".format(self._temp_nodes)) - - # Write node - write_node = nuke.createNode("Write") - self.log.debug("Path: {}".format(self.path)) - write_node["file"].setValue(self.path) - write_node["file_type"].setValue(self.ext) - - # Knobs `meta_codec` and `mov64_codec` are not available on centos. - # TODO change this to use conditions, if possible. - try: - write_node["meta_codec"].setValue("ap4h") - except Exception: - self.log.info("`meta_codec` knob was not found") - - try: - write_node["mov64_codec"].setValue("ap4h") - except Exception: - self.log.info("`mov64_codec` knob was not found") - write_node["mov64_write_timecode"].setValue(1) - write_node["raw"].setValue(1) - # connect - write_node.setInput(0, self.previous_node) - self._temp_nodes.append(write_node) - self.log.debug("Write... `{}`".format(self._temp_nodes)) - # ---------- end nodes creation - - # ---------- render or save to nk - if farm: - nuke.scriptSave() - path_nk = self.save_file() - self.data.update({ - "bakeScriptPath": path_nk, - "bakeWriteNodeName": write_node.name(), - "bakeRenderPath": self.path - }) - else: - self.render(write_node.name()) - # ---------- generate representation data - self.get_representation_data( - tags=["review", "delete"], - range=True - ) - - self.log.debug("Representation... `{}`".format(self.data)) - - # ---------- Clean up - self.clean_nodes() - nuke.scriptSave() - return self.data - - def get_dependent_nodes(nodes): """Get all dependent nodes connected to the list of nodes. @@ -1654,6 +1280,8 @@ def launch_workfiles_app(): from openpype.lib import ( env_value_to_bool ) + from avalon.nuke.pipeline import get_main_window + # get all imortant settings open_at_start = env_value_to_bool( env_key="OPENPYPE_WORKFILE_TOOL_ON_START", @@ -1665,7 +1293,8 @@ def launch_workfiles_app(): if not opnl.workfiles_launched: opnl.workfiles_launched = True - host_tools.show_workfiles() + main_window = get_main_window() + host_tools.show_workfiles(parent=main_window) def process_workfile_builder(): diff --git a/openpype/hosts/nuke/api/menu.py b/openpype/hosts/nuke/api/menu.py index 3e74893589..4636098604 100644 --- a/openpype/hosts/nuke/api/menu.py +++ b/openpype/hosts/nuke/api/menu.py @@ -1,16 +1,21 @@ import os import nuke from avalon.api import Session +from avalon.nuke.pipeline import get_main_window from .lib import WorkfileSettings from openpype.api import Logger, BuildWorkfile, get_current_project_settings from openpype.tools.utils import host_tools +from avalon.nuke.pipeline import get_main_window + log = Logger().get_logger(__name__) menu_label = os.environ["AVALON_LABEL"] + def install(): + main_window = get_main_window() menubar = nuke.menu("Nuke") menu = menubar.findItem(menu_label) @@ -25,7 +30,7 @@ def install(): menu.removeItem(rm_item[1].name()) menu.addCommand( name, - host_tools.show_workfiles, + lambda: host_tools.show_workfiles(parent=main_window), index=2 ) menu.addSeparator(index=3) @@ -88,7 +93,7 @@ def install(): menu.addSeparator() menu.addCommand( "Experimental tools...", - host_tools.show_experimental_tools_dialog + lambda: host_tools.show_experimental_tools_dialog(parent=main_window) ) # adding shortcuts diff --git a/openpype/hosts/nuke/api/plugin.py b/openpype/hosts/nuke/api/plugin.py index 62eadecaf4..82299dd354 100644 --- a/openpype/hosts/nuke/api/plugin.py +++ b/openpype/hosts/nuke/api/plugin.py @@ -1,3 +1,4 @@ +import os import random import string @@ -26,7 +27,7 @@ class PypeCreator(PypeCreatorMixin, avalon.nuke.pipeline.Creator): self.data["subset"]): msg = ("The subset name `{0}` is already used on a node in" "this workfile.".format(self.data["subset"])) - self.log.error(msg + '\n\nPlease use other subset name!') + self.log.error(msg + "\n\nPlease use other subset name!") raise NameError("`{0}: {1}".format(__name__, msg)) return @@ -49,15 +50,18 @@ def get_review_presets_config(): class NukeLoader(api.Loader): container_id_knob = "containerId" - container_id = ''.join(random.choice( - string.ascii_uppercase + string.digits) for _ in range(10)) + container_id = None + + def reset_container_id(self): + self.container_id = "".join(random.choice( + string.ascii_uppercase + string.digits) for _ in range(10)) def get_container_id(self, node): id_knob = node.knobs().get(self.container_id_knob) return id_knob.value() if id_knob else None def get_members(self, source): - """Return nodes that has same 'containerId' as `source`""" + """Return nodes that has same "containerId" as `source`""" source_id = self.get_container_id(source) return [node for node in nuke.allNodes(recurseGroups=True) if self.get_container_id(node) == source_id @@ -67,13 +71,16 @@ class NukeLoader(api.Loader): source_id = self.get_container_id(node) if source_id: - node[self.container_id_knob].setValue(self.container_id) + node[self.container_id_knob].setValue(source_id) else: HIDEN_FLAG = 0x00040000 _knob = anlib.Knobby( "String_Knob", self.container_id, - flags=[nuke.READ_ONLY, HIDEN_FLAG]) + flags=[ + nuke.READ_ONLY, + HIDEN_FLAG + ]) knob = _knob.create(self.container_id_knob) node.addKnob(knob) @@ -94,3 +101,422 @@ class NukeLoader(api.Loader): nuke.delete(member) return dependent_nodes + + +class ExporterReview(object): + """ + Base class object for generating review data from Nuke + + Args: + klass (pyblish.plugin): pyblish plugin parent + instance (pyblish.instance): instance of pyblish context + + """ + data = None + + def __init__(self, + klass, + instance, + multiple_presets=True + ): + + self.log = klass.log + self.instance = instance + self.multiple_presets = multiple_presets + self.path_in = self.instance.data.get("path", None) + self.staging_dir = self.instance.data["stagingDir"] + self.collection = self.instance.data.get("collection", None) + self.data = dict({ + "representations": list() + }) + + def get_file_info(self): + if self.collection: + self.log.debug("Collection: `{}`".format(self.collection)) + # get path + self.fname = os.path.basename(self.collection.format( + "{head}{padding}{tail}")) + self.fhead = self.collection.format("{head}") + + # get first and last frame + self.first_frame = min(self.collection.indexes) + self.last_frame = max(self.collection.indexes) + if "slate" in self.instance.data["families"]: + self.first_frame += 1 + else: + self.fname = os.path.basename(self.path_in) + self.fhead = os.path.splitext(self.fname)[0] + "." + self.first_frame = self.instance.data.get("frameStartHandle", None) + self.last_frame = self.instance.data.get("frameEndHandle", None) + + if "#" in self.fhead: + self.fhead = self.fhead.replace("#", "")[:-1] + + def get_representation_data(self, tags=None, range=False): + add_tags = tags or [] + repre = { + "name": self.name, + "ext": self.ext, + "files": self.file, + "stagingDir": self.staging_dir, + "tags": [self.name.replace("_", "-")] + add_tags + } + + if range: + repre.update({ + "frameStart": self.first_frame, + "frameEnd": self.last_frame, + }) + + if self.multiple_presets: + repre["outputName"] = self.name + + self.data["representations"].append(repre) + + def get_view_input_process_node(self): + """ + Will get any active view process. + + Arguments: + self (class): in object definition + + Returns: + nuke.Node: copy node of Input Process node + """ + anlib.reset_selection() + ipn_orig = None + for v in nuke.allNodes(filter="Viewer"): + ip = v["input_process"].getValue() + ipn = v["input_process_node"].getValue() + if "VIEWER_INPUT" not in ipn and ip: + ipn_orig = nuke.toNode(ipn) + ipn_orig.setSelected(True) + + if ipn_orig: + # copy selected to clipboard + nuke.nodeCopy("%clipboard%") + # reset selection + anlib.reset_selection() + # paste node and selection is on it only + nuke.nodePaste("%clipboard%") + # assign to variable + ipn = nuke.selectedNode() + + return ipn + + def get_imageio_baking_profile(self): + from . import lib as opnlib + nuke_imageio = opnlib.get_nuke_imageio_settings() + + # TODO: this is only securing backward compatibility lets remove + # this once all projects's anotomy are upated to newer config + if "baking" in nuke_imageio.keys(): + return nuke_imageio["baking"]["viewerProcess"] + else: + return nuke_imageio["viewer"]["viewerProcess"] + + + + +class ExporterReviewLut(ExporterReview): + """ + Generator object for review lut from Nuke + + Args: + klass (pyblish.plugin): pyblish plugin parent + instance (pyblish.instance): instance of pyblish context + + + """ + _temp_nodes = [] + + def __init__(self, + klass, + instance, + name=None, + ext=None, + cube_size=None, + lut_size=None, + lut_style=None, + multiple_presets=True): + # initialize parent class + super(ExporterReviewLut, self).__init__( + klass, instance, multiple_presets) + + # deal with now lut defined in viewer lut + if hasattr(klass, "viewer_lut_raw"): + self.viewer_lut_raw = klass.viewer_lut_raw + else: + self.viewer_lut_raw = False + + self.name = name or "baked_lut" + self.ext = ext or "cube" + self.cube_size = cube_size or 32 + self.lut_size = lut_size or 1024 + self.lut_style = lut_style or "linear" + + # set frame start / end and file name to self + self.get_file_info() + + self.log.info("File info was set...") + + self.file = self.fhead + self.name + ".{}".format(self.ext) + self.path = os.path.join( + self.staging_dir, self.file).replace("\\", "/") + + def clean_nodes(self): + for node in self._temp_nodes: + nuke.delete(node) + self._temp_nodes = [] + self.log.info("Deleted nodes...") + + def generate_lut(self): + bake_viewer_process = kwargs["bake_viewer_process"] + bake_viewer_input_process_node = kwargs[ + "bake_viewer_input_process"] + + # ---------- start nodes creation + + # CMSTestPattern + cms_node = nuke.createNode("CMSTestPattern") + cms_node["cube_size"].setValue(self.cube_size) + # connect + self._temp_nodes.append(cms_node) + self.previous_node = cms_node + self.log.debug("CMSTestPattern... `{}`".format(self._temp_nodes)) + + if bake_viewer_process: + # Node View Process + if bake_viewer_input_process_node: + ipn = self.get_view_input_process_node() + if ipn is not None: + # connect + ipn.setInput(0, self.previous_node) + self._temp_nodes.append(ipn) + self.previous_node = ipn + self.log.debug( + "ViewProcess... `{}`".format(self._temp_nodes)) + + if not self.viewer_lut_raw: + # OCIODisplay + dag_node = nuke.createNode("OCIODisplay") + # connect + dag_node.setInput(0, self.previous_node) + self._temp_nodes.append(dag_node) + self.previous_node = dag_node + self.log.debug("OCIODisplay... `{}`".format(self._temp_nodes)) + + # GenerateLUT + gen_lut_node = nuke.createNode("GenerateLUT") + gen_lut_node["file"].setValue(self.path) + gen_lut_node["file_type"].setValue(".{}".format(self.ext)) + gen_lut_node["lut1d"].setValue(self.lut_size) + gen_lut_node["style1d"].setValue(self.lut_style) + # connect + gen_lut_node.setInput(0, self.previous_node) + self._temp_nodes.append(gen_lut_node) + self.log.debug("GenerateLUT... `{}`".format(self._temp_nodes)) + + # ---------- end nodes creation + + # Export lut file + nuke.execute( + gen_lut_node.name(), + int(self.first_frame), + int(self.first_frame)) + + self.log.info("Exported...") + + # ---------- generate representation data + self.get_representation_data() + + self.log.debug("Representation... `{}`".format(self.data)) + + # ---------- Clean up + self.clean_nodes() + + return self.data + + +class ExporterReviewMov(ExporterReview): + """ + Metaclass for generating review mov files + + Args: + klass (pyblish.plugin): pyblish plugin parent + instance (pyblish.instance): instance of pyblish context + + """ + _temp_nodes = {} + + def __init__(self, + klass, + instance, + name=None, + ext=None, + multiple_presets=True + ): + # initialize parent class + super(ExporterReviewMov, self).__init__( + klass, instance, multiple_presets) + # passing presets for nodes to self + self.nodes = klass.nodes if hasattr(klass, "nodes") else {} + + # deal with now lut defined in viewer lut + self.viewer_lut_raw = klass.viewer_lut_raw + self.write_colorspace = instance.data["colorspace"] + + self.name = name or "baked" + self.ext = ext or "mov" + + # set frame start / end and file name to self + self.get_file_info() + + self.log.info("File info was set...") + + self.file = self.fhead + self.name + ".{}".format(self.ext) + self.path = os.path.join( + self.staging_dir, self.file).replace("\\", "/") + + def clean_nodes(self, node_name): + for node in self._temp_nodes[node_name]: + nuke.delete(node) + self._temp_nodes[node_name] = [] + self.log.info("Deleted nodes...") + + def render(self, render_node_name): + self.log.info("Rendering... ") + # Render Write node + nuke.execute( + render_node_name, + int(self.first_frame), + int(self.last_frame)) + + self.log.info("Rendered...") + + def save_file(self): + import shutil + with anlib.maintained_selection(): + self.log.info("Saving nodes as file... ") + # create nk path + path = os.path.splitext(self.path)[0] + ".nk" + # save file to the path + shutil.copyfile(self.instance.context.data["currentFile"], path) + + self.log.info("Nodes exported...") + return path + + def generate_mov(self, farm=False, **kwargs): + bake_viewer_process = kwargs["bake_viewer_process"] + bake_viewer_input_process_node = kwargs[ + "bake_viewer_input_process"] + viewer_process_override = kwargs[ + "viewer_process_override"] + + baking_view_profile = ( + viewer_process_override or self.get_imageio_baking_profile()) + + fps = self.instance.context.data["fps"] + + self.log.debug(">> baking_view_profile `{}`".format( + baking_view_profile)) + + add_tags = kwargs.get("add_tags", []) + + self.log.info( + "__ add_tags: `{0}`".format(add_tags)) + + subset = self.instance.data["subset"] + self._temp_nodes[subset] = [] + # ---------- start nodes creation + + # Read node + r_node = nuke.createNode("Read") + r_node["file"].setValue(self.path_in) + r_node["first"].setValue(self.first_frame) + r_node["origfirst"].setValue(self.first_frame) + r_node["last"].setValue(self.last_frame) + r_node["origlast"].setValue(self.last_frame) + r_node["colorspace"].setValue(self.write_colorspace) + + # connect + self._temp_nodes[subset].append(r_node) + self.previous_node = r_node + self.log.debug("Read... `{}`".format(self._temp_nodes[subset])) + + # only create colorspace baking if toggled on + if bake_viewer_process: + if bake_viewer_input_process_node: + # View Process node + ipn = self.get_view_input_process_node() + if ipn is not None: + # connect + ipn.setInput(0, self.previous_node) + self._temp_nodes[subset].append(ipn) + self.previous_node = ipn + self.log.debug( + "ViewProcess... `{}`".format( + self._temp_nodes[subset])) + + if not self.viewer_lut_raw: + # OCIODisplay + dag_node = nuke.createNode("OCIODisplay") + dag_node["view"].setValue(str(baking_view_profile)) + + # connect + dag_node.setInput(0, self.previous_node) + self._temp_nodes[subset].append(dag_node) + self.previous_node = dag_node + self.log.debug("OCIODisplay... `{}`".format( + self._temp_nodes[subset])) + + # Write node + write_node = nuke.createNode("Write") + self.log.debug("Path: {}".format(self.path)) + write_node["file"].setValue(str(self.path)) + write_node["file_type"].setValue(str(self.ext)) + + # Knobs `meta_codec` and `mov64_codec` are not available on centos. + # TODO should't this come from settings on outputs? + try: + write_node["meta_codec"].setValue("ap4h") + except Exception: + self.log.info("`meta_codec` knob was not found") + + try: + write_node["mov64_codec"].setValue("ap4h") + write_node["mov64_fps"].setValue(float(fps)) + except Exception: + self.log.info("`mov64_codec` knob was not found") + + write_node["mov64_write_timecode"].setValue(1) + write_node["raw"].setValue(1) + # connect + write_node.setInput(0, self.previous_node) + self._temp_nodes[subset].append(write_node) + self.log.debug("Write... `{}`".format(self._temp_nodes[subset])) + # ---------- end nodes creation + + # ---------- render or save to nk + if farm: + nuke.scriptSave() + path_nk = self.save_file() + self.data.update({ + "bakeScriptPath": path_nk, + "bakeWriteNodeName": write_node.name(), + "bakeRenderPath": self.path + }) + else: + self.render(write_node.name()) + # ---------- generate representation data + self.get_representation_data( + tags=["review", "delete"] + add_tags, + range=True + ) + + self.log.debug("Representation... `{}`".format(self.data)) + + self.clean_nodes(subset) + nuke.scriptSave() + + return self.data diff --git a/openpype/hosts/nuke/plugins/create/create_model.py b/openpype/hosts/nuke/plugins/create/create_model.py new file mode 100644 index 0000000000..4e30860e05 --- /dev/null +++ b/openpype/hosts/nuke/plugins/create/create_model.py @@ -0,0 +1,85 @@ +from avalon.nuke import lib as anlib +from openpype.hosts.nuke.api import plugin +import nuke + + +class CreateModel(plugin.PypeCreator): + """Add Publishable Model Geometry""" + + name = "model" + label = "Create 3d Model" + family = "model" + icon = "cube" + defaults = ["Main"] + + def __init__(self, *args, **kwargs): + super(CreateModel, self).__init__(*args, **kwargs) + self.nodes = nuke.selectedNodes() + self.node_color = "0xff3200ff" + return + + def process(self): + nodes = list() + if (self.options or {}).get("useSelection"): + nodes = self.nodes + for n in nodes: + n['selected'].setValue(0) + end_nodes = list() + + # get the latest nodes in tree for selecion + for n in nodes: + x = n + end = 0 + while end == 0: + try: + x = x.dependent()[0] + except: + end_node = x + end = 1 + end_nodes.append(end_node) + + # set end_nodes + end_nodes = list(set(end_nodes)) + + # check if nodes is 3d nodes + for n in end_nodes: + n['selected'].setValue(1) + sn = nuke.createNode("Scene") + if not sn.input(0): + end_nodes.remove(n) + nuke.delete(sn) + + # loop over end nodes + for n in end_nodes: + n['selected'].setValue(1) + + self.nodes = nuke.selectedNodes() + nodes = self.nodes + if len(nodes) >= 1: + # loop selected nodes + for n in nodes: + data = self.data.copy() + if len(nodes) > 1: + # rename subset name only if more + # then one node are selected + subset = self.family + n["name"].value().capitalize() + data["subset"] = subset + + # change node color + n["tile_color"].setValue(int(self.node_color, 16)) + # add avalon knobs + anlib.set_avalon_knob_data(n, data) + return True + else: + msg = str("Please select nodes you " + "wish to add to a container") + self.log.error(msg) + nuke.message(msg) + return + else: + # if selected is off then create one node + model_node = nuke.createNode("WriteGeo") + model_node["tile_color"].setValue(int(self.node_color, 16)) + # add avalon knobs + instance = anlib.set_avalon_knob_data(model_node, self.data) + return instance diff --git a/openpype/hosts/nuke/plugins/load/load_backdrop.py b/openpype/hosts/nuke/plugins/load/load_backdrop.py index e615af51ff..9148260e9e 100644 --- a/openpype/hosts/nuke/plugins/load/load_backdrop.py +++ b/openpype/hosts/nuke/plugins/load/load_backdrop.py @@ -4,7 +4,6 @@ import nukescripts from openpype.hosts.nuke.api import lib as pnlib from avalon.nuke import lib as anlib from avalon.nuke import containerise, update_container -reload(pnlib) class LoadBackdropNodes(api.Loader): """Loading Published Backdrop nodes (workfile, nukenodes)""" diff --git a/openpype/hosts/nuke/plugins/load/load_clip.py b/openpype/hosts/nuke/plugins/load/load_clip.py index f8fc5e3928..4ad2246e21 100644 --- a/openpype/hosts/nuke/plugins/load/load_clip.py +++ b/openpype/hosts/nuke/plugins/load/load_clip.py @@ -67,6 +67,9 @@ class LoadClip(plugin.NukeLoader): def load(self, context, name, namespace, options): + # reste container id so it is always unique for each instance + self.reset_container_id() + is_sequence = len(context["representation"]["files"]) > 1 file = self.fname.replace("\\", "/") @@ -251,8 +254,7 @@ class LoadClip(plugin.NukeLoader): "handleStart": str(self.handle_start), "handleEnd": str(self.handle_end), "fps": str(version_data.get("fps")), - "author": version_data.get("author"), - "outputDir": version_data.get("outputDir"), + "author": version_data.get("author") } # change color of read_node diff --git a/openpype/hosts/nuke/plugins/load/load_image.py b/openpype/hosts/nuke/plugins/load/load_image.py index 2af44d6eba..02a5b55c18 100644 --- a/openpype/hosts/nuke/plugins/load/load_image.py +++ b/openpype/hosts/nuke/plugins/load/load_image.py @@ -217,8 +217,7 @@ class LoadImage(api.Loader): "colorspace": version_data.get("colorspace"), "source": version_data.get("source"), "fps": str(version_data.get("fps")), - "author": version_data.get("author"), - "outputDir": version_data.get("outputDir"), + "author": version_data.get("author") }) # change color of node diff --git a/openpype/hosts/nuke/plugins/load/load_model.py b/openpype/hosts/nuke/plugins/load/load_model.py new file mode 100644 index 0000000000..15fa4fa35c --- /dev/null +++ b/openpype/hosts/nuke/plugins/load/load_model.py @@ -0,0 +1,187 @@ +from avalon import api, io +from avalon.nuke import lib as anlib +from avalon.nuke import containerise, update_container +import nuke + + +class AlembicModelLoader(api.Loader): + """ + This will load alembic model into script. + """ + + families = ["model"] + representations = ["abc"] + + label = "Load Alembic Model" + icon = "cube" + color = "orange" + node_color = "0x4ecd91ff" + + def load(self, context, name, namespace, data): + # get main variables + version = context['version'] + version_data = version.get("data", {}) + vname = version.get("name", None) + first = version_data.get("frameStart", None) + last = version_data.get("frameEnd", None) + fps = version_data.get("fps") or nuke.root()["fps"].getValue() + namespace = namespace or context['asset']['name'] + object_name = "{}_{}".format(name, namespace) + + # prepare data for imprinting + # add additional metadata from the version to imprint to Avalon knob + add_keys = ["source", "author", "fps"] + + data_imprint = {"frameStart": first, + "frameEnd": last, + "version": vname, + "objectName": object_name} + + for k in add_keys: + data_imprint.update({k: version_data[k]}) + + # getting file path + file = self.fname.replace("\\", "/") + + with anlib.maintained_selection(): + model_node = nuke.createNode( + "ReadGeo2", + "name {} file {} ".format( + object_name, file), + inpanel=False + ) + model_node.forceValidate() + model_node["frame_rate"].setValue(float(fps)) + + # workaround because nuke's bug is not adding + # animation keys properly + xpos = model_node.xpos() + ypos = model_node.ypos() + nuke.nodeCopy("%clipboard%") + nuke.delete(model_node) + nuke.nodePaste("%clipboard%") + model_node = nuke.toNode(object_name) + model_node.setXYpos(xpos, ypos) + + # color node by correct color by actual version + self.node_version_color(version, model_node) + + return containerise( + node=model_node, + name=name, + namespace=namespace, + context=context, + loader=self.__class__.__name__, + data=data_imprint) + + def update(self, container, representation): + """ + Called by Scene Inventory when look should be updated to current + version. + If any reference edits cannot be applied, eg. shader renamed and + material not present, reference is unloaded and cleaned. + All failed edits are highlighted to the user via message box. + + Args: + container: object that has look to be updated + representation: (dict): relationship data to get proper + representation from DB and persisted + data in .json + Returns: + None + """ + # Get version from io + version = io.find_one({ + "type": "version", + "_id": representation["parent"] + }) + object_name = container['objectName'] + # get corresponding node + model_node = nuke.toNode(object_name) + + # get main variables + version_data = version.get("data", {}) + vname = version.get("name", None) + first = version_data.get("frameStart", None) + last = version_data.get("frameEnd", None) + fps = version_data.get("fps") or nuke.root()["fps"].getValue() + + # prepare data for imprinting + # add additional metadata from the version to imprint to Avalon knob + add_keys = ["source", "author", "fps"] + + data_imprint = {"representation": str(representation["_id"]), + "frameStart": first, + "frameEnd": last, + "version": vname, + "objectName": object_name} + + for k in add_keys: + data_imprint.update({k: version_data[k]}) + + # getting file path + file = api.get_representation_path(representation).replace("\\", "/") + + with anlib.maintained_selection(): + model_node = nuke.toNode(object_name) + model_node['selected'].setValue(True) + + # collect input output dependencies + dependencies = model_node.dependencies() + dependent = model_node.dependent() + + model_node["frame_rate"].setValue(float(fps)) + model_node["file"].setValue(file) + + # workaround because nuke's bug is + # not adding animation keys properly + xpos = model_node.xpos() + ypos = model_node.ypos() + nuke.nodeCopy("%clipboard%") + nuke.delete(model_node) + nuke.nodePaste("%clipboard%") + model_node = nuke.toNode(object_name) + model_node.setXYpos(xpos, ypos) + + # link to original input nodes + for i, input in enumerate(dependencies): + model_node.setInput(i, input) + # link to original output nodes + for d in dependent: + index = next((i for i, dpcy in enumerate( + d.dependencies()) + if model_node is dpcy), 0) + d.setInput(index, model_node) + + # color node by correct color by actual version + self.node_version_color(version, model_node) + + self.log.info("udated to version: {}".format(version.get("name"))) + + return update_container(model_node, data_imprint) + + def node_version_color(self, version, node): + """ Coloring a node by correct color by actual version + """ + # get all versions in list + versions = io.find({ + "type": "version", + "parent": version["parent"] + }).distinct('name') + + max_version = max(versions) + + # change color of node + if version.get("name") not in [max_version]: + node["tile_color"].setValue(int("0xd88467ff", 16)) + else: + node["tile_color"].setValue(int(self.node_color, 16)) + + def switch(self, container, representation): + self.update(container, representation) + + def remove(self, container): + from avalon.nuke import viewer_update_and_undo_stop + node = nuke.toNode(container['objectName']) + with viewer_update_and_undo_stop(): + nuke.delete(node) diff --git a/openpype/hosts/nuke/plugins/load/load_script_precomp.py b/openpype/hosts/nuke/plugins/load/load_script_precomp.py index 310157f099..7444dd6e96 100644 --- a/openpype/hosts/nuke/plugins/load/load_script_precomp.py +++ b/openpype/hosts/nuke/plugins/load/load_script_precomp.py @@ -135,8 +135,7 @@ class LinkAsGroup(api.Loader): "source": version["data"].get("source"), "handles": version["data"].get("handles"), "fps": version["data"].get("fps"), - "author": version["data"].get("author"), - "outputDir": version["data"].get("outputDir"), + "author": version["data"].get("author") }) # Update the imprinted representation diff --git a/openpype/hosts/nuke/plugins/publish/collect_model.py b/openpype/hosts/nuke/plugins/publish/collect_model.py new file mode 100644 index 0000000000..5fca240553 --- /dev/null +++ b/openpype/hosts/nuke/plugins/publish/collect_model.py @@ -0,0 +1,49 @@ +import pyblish.api +import nuke + + +@pyblish.api.log +class CollectModel(pyblish.api.InstancePlugin): + """Collect Model node instance and its content + """ + + order = pyblish.api.CollectorOrder + 0.22 + label = "Collect Model" + hosts = ["nuke"] + families = ["model"] + + def process(self, instance): + + grpn = instance[0] + + # add family to familiess + instance.data["families"].insert(0, instance.data["family"]) + # make label nicer + instance.data["label"] = grpn.name() + + # Get frame range + handle_start = instance.context.data["handleStart"] + handle_end = instance.context.data["handleEnd"] + first_frame = int(nuke.root()["first_frame"].getValue()) + last_frame = int(nuke.root()["last_frame"].getValue()) + + # Add version data to instance + version_data = { + "handles": handle_start, + "handleStart": handle_start, + "handleEnd": handle_end, + "frameStart": first_frame + handle_start, + "frameEnd": last_frame - handle_end, + "colorspace": nuke.root().knob('workingSpaceLUT').value(), + "families": [instance.data["family"]] + instance.data["families"], + "subset": instance.data["subset"], + "fps": instance.context.data["fps"] + } + + instance.data.update({ + "versionData": version_data, + "frameStart": first_frame, + "frameEnd": last_frame + }) + self.log.info("Model content collected: `{}`".format(instance[:])) + self.log.info("Model instance collected: `{}`".format(instance)) diff --git a/openpype/hosts/nuke/plugins/publish/extract_backdrop.py b/openpype/hosts/nuke/plugins/publish/extract_backdrop.py index 13f8656005..0747c15ea7 100644 --- a/openpype/hosts/nuke/plugins/publish/extract_backdrop.py +++ b/openpype/hosts/nuke/plugins/publish/extract_backdrop.py @@ -4,7 +4,6 @@ from openpype.hosts.nuke.api import lib as pnlib import nuke import os import openpype -reload(pnlib) class ExtractBackdropNode(openpype.api.Extractor): """Extracting content of backdrop nodes diff --git a/openpype/hosts/nuke/plugins/publish/extract_model.py b/openpype/hosts/nuke/plugins/publish/extract_model.py new file mode 100644 index 0000000000..43214bf3e9 --- /dev/null +++ b/openpype/hosts/nuke/plugins/publish/extract_model.py @@ -0,0 +1,103 @@ +import nuke +import os +import pyblish.api +import openpype.api +from avalon.nuke import lib as anlib +from pprint import pformat + + +class ExtractModel(openpype.api.Extractor): + """ 3D model exctractor + """ + label = 'Exctract Model' + order = pyblish.api.ExtractorOrder + families = ["model"] + hosts = ["nuke"] + + # presets + write_geo_knobs = [ + ("file_type", "abc"), + ("storageFormat", "Ogawa"), + ("writeGeometries", True), + ("writePointClouds", False), + ("writeAxes", False) + ] + + def process(self, instance): + handle_start = instance.context.data["handleStart"] + handle_end = instance.context.data["handleEnd"] + first_frame = int(nuke.root()["first_frame"].getValue()) + last_frame = int(nuke.root()["last_frame"].getValue()) + + self.log.info("instance.data: `{}`".format( + pformat(instance.data))) + + rm_nodes = list() + model_node = instance[0] + self.log.info("Crating additional nodes") + subset = instance.data["subset"] + staging_dir = self.staging_dir(instance) + + extension = next((k[1] for k in self.write_geo_knobs + if k[0] == "file_type"), None) + if not extension: + raise RuntimeError( + "Bad config for extension in presets. " + "Talk to your supervisor or pipeline admin") + + # create file name and path + filename = subset + ".{}".format(extension) + file_path = os.path.join(staging_dir, filename).replace("\\", "/") + + with anlib.maintained_selection(): + # select model node + anlib.select_nodes([model_node]) + + # create write geo node + wg_n = nuke.createNode("WriteGeo") + wg_n["file"].setValue(file_path) + # add path to write to + for k, v in self.write_geo_knobs: + wg_n[k].setValue(v) + rm_nodes.append(wg_n) + + # write out model + nuke.execute( + wg_n, + int(first_frame), + int(last_frame) + ) + # erase additional nodes + for n in rm_nodes: + nuke.delete(n) + + self.log.info(file_path) + + # create representation data + if "representations" not in instance.data: + instance.data["representations"] = [] + + representation = { + 'name': extension, + 'ext': extension, + 'files': filename, + "stagingDir": staging_dir, + "frameStart": first_frame, + "frameEnd": last_frame + } + instance.data["representations"].append(representation) + + instance.data.update({ + "path": file_path, + "outputDir": staging_dir, + "ext": extension, + "handleStart": handle_start, + "handleEnd": handle_end, + "frameStart": first_frame + handle_start, + "frameEnd": last_frame - handle_end, + "frameStartHandle": first_frame, + "frameEndHandle": last_frame, + }) + + self.log.info("Extracted instance '{0}' to: {1}".format( + instance.name, file_path)) diff --git a/openpype/hosts/nuke/plugins/publish/extract_review_data_lut.py b/openpype/hosts/nuke/plugins/publish/extract_review_data_lut.py index a0f1c9a087..8ba746a3c4 100644 --- a/openpype/hosts/nuke/plugins/publish/extract_review_data_lut.py +++ b/openpype/hosts/nuke/plugins/publish/extract_review_data_lut.py @@ -1,16 +1,9 @@ import os import pyblish.api from avalon.nuke import lib as anlib -from openpype.hosts.nuke.api import lib as pnlib +from openpype.hosts.nuke.api import plugin import openpype -try: - from __builtin__ import reload -except ImportError: - from importlib import reload - -reload(pnlib) - class ExtractReviewDataLut(openpype.api.Extractor): """Extracts movie and thumbnail with baked in luts @@ -45,7 +38,7 @@ class ExtractReviewDataLut(openpype.api.Extractor): # generate data with anlib.maintained_selection(): - exporter = pnlib.ExporterReviewLut( + exporter = plugin.ExporterReviewLut( self, instance ) data = exporter.generate_lut() diff --git a/openpype/hosts/nuke/plugins/publish/extract_review_data_mov.py b/openpype/hosts/nuke/plugins/publish/extract_review_data_mov.py index f4fbc2d0e4..261fca6583 100644 --- a/openpype/hosts/nuke/plugins/publish/extract_review_data_mov.py +++ b/openpype/hosts/nuke/plugins/publish/extract_review_data_mov.py @@ -1,16 +1,9 @@ import os import pyblish.api from avalon.nuke import lib as anlib -from openpype.hosts.nuke.api import lib as pnlib +from openpype.hosts.nuke.api import plugin import openpype -try: - from __builtin__ import reload -except ImportError: - from importlib import reload - -reload(pnlib) - class ExtractReviewDataMov(openpype.api.Extractor): """Extracts movie and thumbnail with baked in luts @@ -27,46 +20,104 @@ class ExtractReviewDataMov(openpype.api.Extractor): # presets viewer_lut_raw = None - bake_colorspace_fallback = None - bake_colorspace_main = None + outputs = {} def process(self, instance): families = instance.data["families"] + task_type = instance.context.data["taskType"] self.log.info("Creating staging dir...") if "representations" not in instance.data: - instance.data["representations"] = list() + instance.data["representations"] = [] staging_dir = os.path.normpath( - os.path.dirname(instance.data['path'])) + os.path.dirname(instance.data["path"])) instance.data["stagingDir"] = staging_dir self.log.info( "StagingDir `{0}`...".format(instance.data["stagingDir"])) + self.log.info(self.outputs) + # generate data with anlib.maintained_selection(): - exporter = pnlib.ExporterReviewMov( - self, instance) + for o_name, o_data in self.outputs.items(): + f_families = o_data["filter"]["families"] + f_task_types = o_data["filter"]["task_types"] - if "render.farm" in families: - instance.data["families"].remove("review") - data = exporter.generate_mov(farm=True) + # test if family found in context + test_families = any([ + # first if exact family set is mathing + # make sure only interesetion of list is correct + bool(set(families).intersection(f_families)), + # and if famiies are set at all + # if not then return True because we want this preset + # to be active if nothig is set + bool(not f_families) + ]) - self.log.debug( - "_ data: {}".format(data)) + # test task types from filter + test_task_types = any([ + # check if actual task type is defined in task types + # set in preset's filter + bool(task_type in f_task_types), + # and if taskTypes are defined in preset filter + # if not then return True, because we want this filter + # to be active if no taskType is set + bool(not f_task_types) + ]) - instance.data.update({ - "bakeRenderPath": data.get("bakeRenderPath"), - "bakeScriptPath": data.get("bakeScriptPath"), - "bakeWriteNodeName": data.get("bakeWriteNodeName") - }) - else: - data = exporter.generate_mov() + # we need all filters to be positive for this + # preset to be activated + test_all = all([ + test_families, + test_task_types + ]) - # assign to representations - instance.data["representations"] += data["representations"] + # if it is not positive then skip this preset + if not test_all: + continue + + self.log.info( + "Baking output `{}` with settings: {}".format( + o_name, o_data)) + + # check if settings have more then one preset + # so we dont need to add outputName to representation + # in case there is only one preset + multiple_presets = bool(len(self.outputs.keys()) > 1) + + # create exporter instance + exporter = plugin.ExporterReviewMov( + self, instance, o_name, o_data["extension"], + multiple_presets) + + if "render.farm" in families: + if "review" in instance.data["families"]: + instance.data["families"].remove("review") + + data = exporter.generate_mov(farm=True, **o_data) + + self.log.debug( + "_ data: {}".format(data)) + + if not instance.data.get("bakingNukeScripts"): + instance.data["bakingNukeScripts"] = [] + + instance.data["bakingNukeScripts"].append({ + "bakeRenderPath": data.get("bakeRenderPath"), + "bakeScriptPath": data.get("bakeScriptPath"), + "bakeWriteNodeName": data.get("bakeWriteNodeName") + }) + else: + data = exporter.generate_mov(**o_data) + + self.log.info(data["representations"]) + + # assign to representations + instance.data["representations"] += data["representations"] self.log.debug( - "_ representations: {}".format(instance.data["representations"])) + "_ representations: {}".format( + instance.data["representations"])) diff --git a/openpype/hosts/photoshop/api/__init__.py b/openpype/hosts/photoshop/api/__init__.py index 81942c3b2a..d978d6ecc1 100644 --- a/openpype/hosts/photoshop/api/__init__.py +++ b/openpype/hosts/photoshop/api/__init__.py @@ -2,9 +2,10 @@ import os import sys import logging +from Qt import QtWidgets + from avalon import io from avalon import api as avalon -from avalon.vendor import Qt from openpype import lib from pyblish import api as pyblish import openpype.hosts.photoshop @@ -38,10 +39,10 @@ def check_inventory(): # Warn about outdated containers. print("Starting new QApplication..") - app = Qt.QtWidgets.QApplication(sys.argv) + app = QtWidgets.QApplication(sys.argv) - message_box = Qt.QtWidgets.QMessageBox() - message_box.setIcon(Qt.QtWidgets.QMessageBox.Warning) + message_box = QtWidgets.QMessageBox() + message_box.setIcon(QtWidgets.QMessageBox.Warning) msg = "There are outdated containers in the scene." message_box.setText(msg) message_box.exec_() diff --git a/openpype/hosts/photoshop/plugins/create/create_image.py b/openpype/hosts/photoshop/plugins/create/create_image.py index 967a704ccf..657d41aa93 100644 --- a/openpype/hosts/photoshop/plugins/create/create_image.py +++ b/openpype/hosts/photoshop/plugins/create/create_image.py @@ -1,5 +1,5 @@ +from Qt import QtWidgets import openpype.api -from avalon.vendor import Qt from avalon import photoshop @@ -26,21 +26,21 @@ class CreateImage(openpype.api.Creator): if len(selection) > 1: # Ask user whether to create one image or image per selected # item. - msg_box = Qt.QtWidgets.QMessageBox() - msg_box.setIcon(Qt.QtWidgets.QMessageBox.Warning) + msg_box = QtWidgets.QMessageBox() + msg_box.setIcon(QtWidgets.QMessageBox.Warning) msg_box.setText( "Multiple layers selected." "\nDo you want to make one image per layer?" ) msg_box.setStandardButtons( - Qt.QtWidgets.QMessageBox.Yes | - Qt.QtWidgets.QMessageBox.No | - Qt.QtWidgets.QMessageBox.Cancel + QtWidgets.QMessageBox.Yes | + QtWidgets.QMessageBox.No | + QtWidgets.QMessageBox.Cancel ) ret = msg_box.exec_() - if ret == Qt.QtWidgets.QMessageBox.Yes: + if ret == QtWidgets.QMessageBox.Yes: multiple_instances = True - elif ret == Qt.QtWidgets.QMessageBox.Cancel: + elif ret == QtWidgets.QMessageBox.Cancel: return if multiple_instances: diff --git a/openpype/hosts/resolve/api/menu.py b/openpype/hosts/resolve/api/menu.py index 262ce739dd..0d5930d275 100644 --- a/openpype/hosts/resolve/api/menu.py +++ b/openpype/hosts/resolve/api/menu.py @@ -61,6 +61,9 @@ class OpenPypeMenu(QtWidgets.QWidget): inventory_btn = QtWidgets.QPushButton("Inventory ...", self) subsetm_btn = QtWidgets.QPushButton("Subset Manager ...", self) libload_btn = QtWidgets.QPushButton("Library ...", self) + experimental_btn = QtWidgets.QPushButton( + "Experimental tools ...", self + ) # rename_btn = QtWidgets.QPushButton("Rename", self) # set_colorspace_btn = QtWidgets.QPushButton( # "Set colorspace from presets", self @@ -91,6 +94,8 @@ class OpenPypeMenu(QtWidgets.QWidget): # layout.addWidget(set_colorspace_btn) # layout.addWidget(reset_resolution_btn) + layout.addWidget(Spacer(15, self)) + layout.addWidget(experimental_btn) self.setLayout(layout) @@ -104,6 +109,7 @@ class OpenPypeMenu(QtWidgets.QWidget): # rename_btn.clicked.connect(self.on_rename_clicked) # set_colorspace_btn.clicked.connect(self.on_set_colorspace_clicked) # reset_resolution_btn.clicked.connect(self.on_reset_resolution_clicked) + experimental_btn.clicked.connect(self.on_experimental_clicked) def on_workfile_clicked(self): print("Clicked Workfile") @@ -142,6 +148,9 @@ class OpenPypeMenu(QtWidgets.QWidget): def on_reset_resolution_clicked(self): print("Clicked Reset Resolution") + def on_experimental_clicked(self): + host_tools.show_experimental_tools_dialog() + def launch_pype_menu(): app = QtWidgets.QApplication(sys.argv) diff --git a/openpype/hosts/standalonepublisher/plugins/publish/collect_editorial_resources.py b/openpype/hosts/standalonepublisher/plugins/publish/collect_editorial_resources.py index ffa24cfd93..36bacceb1c 100644 --- a/openpype/hosts/standalonepublisher/plugins/publish/collect_editorial_resources.py +++ b/openpype/hosts/standalonepublisher/plugins/publish/collect_editorial_resources.py @@ -238,7 +238,7 @@ class CollectInstanceResources(pyblish.api.InstancePlugin): }) # exception for mp4 preview - if ".mp4" in _reminding_file: + if ext in ["mp4", "mov"]: frame_start = 0 frame_end = ( (instance_data["frameEnd"] - instance_data["frameStart"]) @@ -255,6 +255,7 @@ class CollectInstanceResources(pyblish.api.InstancePlugin): "step": 1, "fps": self.context.data.get("fps"), "name": "review", + "thumbnail": True, "tags": ["review", "ftrackreview", "delete"], }) diff --git a/openpype/hosts/standalonepublisher/plugins/publish/collect_harmony_scenes.py b/openpype/hosts/standalonepublisher/plugins/publish/collect_harmony_scenes.py index a4fed3bc3f..48c36aa067 100644 --- a/openpype/hosts/standalonepublisher/plugins/publish/collect_harmony_scenes.py +++ b/openpype/hosts/standalonepublisher/plugins/publish/collect_harmony_scenes.py @@ -49,10 +49,22 @@ class CollectHarmonyScenes(pyblish.api.InstancePlugin): # fix anatomy data anatomy_data_new = copy.deepcopy(anatomy_data) + + project_entity = context.data["projectEntity"] + asset_entity = context.data["assetEntity"] + + task_type = asset_entity["data"]["tasks"].get(task, {}).get("type") + project_task_types = project_entity["config"]["tasks"] + task_code = project_task_types.get(task_type, {}).get("short_name") + # updating hierarchy data anatomy_data_new.update({ "asset": asset_data["name"], - "task": task, + "task": { + "name": task, + "type": task_type, + "short": task_code, + }, "subset": subset_name }) diff --git a/openpype/hosts/standalonepublisher/plugins/publish/collect_harmony_zips.py b/openpype/hosts/standalonepublisher/plugins/publish/collect_harmony_zips.py index 93eff85486..40a969f8df 100644 --- a/openpype/hosts/standalonepublisher/plugins/publish/collect_harmony_zips.py +++ b/openpype/hosts/standalonepublisher/plugins/publish/collect_harmony_zips.py @@ -27,6 +27,7 @@ class CollectHarmonyZips(pyblish.api.InstancePlugin): anatomy_data = instance.context.data["anatomyData"] repres = instance.data["representations"] files = repres[0]["files"] + project_entity = context.data["projectEntity"] if files.endswith(".zip"): # A zip file was dropped @@ -45,14 +46,24 @@ class CollectHarmonyZips(pyblish.api.InstancePlugin): self.log.info("Copied data: {}".format(new_instance.data)) + task_type = asset_data["data"]["tasks"].get(task, {}).get("type") + project_task_types = project_entity["config"]["tasks"] + task_code = project_task_types.get(task_type, {}).get("short_name") + # fix anatomy data anatomy_data_new = copy.deepcopy(anatomy_data) # updating hierarchy data - anatomy_data_new.update({ - "asset": asset_data["name"], - "task": task, - "subset": subset_name - }) + anatomy_data_new.update( + { + "asset": asset_data["name"], + "task": { + "name": task, + "type": task_type, + "short": task_code, + }, + "subset": subset_name + } + ) new_instance.data["label"] = f"{instance_name}" new_instance.data["subset"] = subset_name diff --git a/openpype/hosts/standalonepublisher/plugins/publish/extract_harmony_zip.py b/openpype/hosts/standalonepublisher/plugins/publish/extract_harmony_zip.py deleted file mode 100644 index adbac6ef09..0000000000 --- a/openpype/hosts/standalonepublisher/plugins/publish/extract_harmony_zip.py +++ /dev/null @@ -1,415 +0,0 @@ -# -*- coding: utf-8 -*- -"""Extract Harmony scene from zip file.""" -import glob -import os -import shutil -import six -import sys -import tempfile -import zipfile - -import pyblish.api -from avalon import api, io -import openpype.api -from openpype.lib import get_workfile_template_key_from_context - - -class ExtractHarmonyZip(openpype.api.Extractor): - """Extract Harmony zip.""" - - # Pyblish settings - label = "Extract Harmony zip" - order = pyblish.api.ExtractorOrder + 0.02 - hosts = ["standalonepublisher"] - families = ["scene"] - - # Properties - session = None - task_types = None - task_statuses = None - assetversion_statuses = None - - # Presets - create_workfile = True - default_task = "harmonyIngest" - default_task_type = "Ingest" - default_task_status = "Ingested" - assetversion_status = "Ingested" - - def process(self, instance): - """Plugin entry point.""" - context = instance.context - self.session = context.data["ftrackSession"] - asset_doc = context.data["assetEntity"] - # asset_name = instance.data["asset"] - subset_name = instance.data["subset"] - instance_name = instance.data["name"] - family = instance.data["family"] - task = context.data["anatomyData"]["task"] or self.default_task - project_entity = instance.context.data["projectEntity"] - ftrack_id = asset_doc["data"]["ftrackId"] - repres = instance.data["representations"] - submitted_staging_dir = repres[0]["stagingDir"] - submitted_files = repres[0]["files"] - - # Get all the ftrack entities needed - - # Asset Entity - query = 'AssetBuild where id is "{}"'.format(ftrack_id) - asset_entity = self.session.query(query).first() - - # Project Entity - query = 'Project where full_name is "{}"'.format( - project_entity["name"] - ) - project_entity = self.session.query(query).one() - - # Get Task types and Statuses for creation if needed - self.task_types = self._get_all_task_types(project_entity) - self.task_statuses = self._get_all_task_statuses(project_entity) - - # Get Statuses of AssetVersions - self.assetversion_statuses = self._get_all_assetversion_statuses( - project_entity - ) - - # Setup the status that we want for the AssetVersion - if self.assetversion_status: - instance.data["assetversion_status"] = self.assetversion_status - - # Create the default_task if it does not exist - if task == self.default_task: - existing_tasks = [] - entity_children = asset_entity.get('children', []) - for child in entity_children: - if child.entity_type.lower() == 'task': - existing_tasks.append(child['name'].lower()) - - if task.lower() in existing_tasks: - print("Task {} already exists".format(task)) - - else: - self.create_task( - name=task, - task_type=self.default_task_type, - task_status=self.default_task_status, - parent=asset_entity, - ) - - # Find latest version - latest_version = self._find_last_version(subset_name, asset_doc) - version_number = 1 - if latest_version is not None: - version_number += latest_version - - self.log.info( - "Next version of instance \"{}\" will be {}".format( - instance_name, version_number - ) - ) - - # update instance info - instance.data["task"] = task - instance.data["version_name"] = "{}_{}".format(subset_name, task) - instance.data["family"] = family - instance.data["subset"] = subset_name - instance.data["version"] = version_number - instance.data["latestVersion"] = latest_version - instance.data["anatomyData"].update({ - "subset": subset_name, - "family": family, - "version": version_number - }) - - # Copy `families` and check if `family` is not in current families - families = instance.data.get("families") or list() - if families: - families = list(set(families)) - - instance.data["families"] = families - - # Prepare staging dir for new instance and zip + sanitize scene name - staging_dir = tempfile.mkdtemp(prefix="pyblish_tmp_") - - # Handle if the representation is a .zip and not an .xstage - pre_staged = False - if submitted_files.endswith(".zip"): - submitted_zip_file = os.path.join(submitted_staging_dir, - submitted_files - ).replace("\\", "/") - - pre_staged = self.sanitize_prezipped_project(instance, - submitted_zip_file, - staging_dir) - - # Get the file to work with - source_dir = str(repres[0]["stagingDir"]) - source_file = str(repres[0]["files"]) - - staging_scene_dir = os.path.join(staging_dir, "scene") - staging_scene = os.path.join(staging_scene_dir, source_file) - - # If the file is an .xstage / directory, we must stage it - if not pre_staged: - shutil.copytree(source_dir, staging_scene_dir) - - # Rename this latest file as 'scene.xstage' - # This is is determined in the collector from the latest scene in a - # submitted directory / directory the submitted .xstage is in. - # In the case of a zip file being submitted, this is determined within - # the self.sanitize_project() method in this extractor. - os.rename(staging_scene, - os.path.join(staging_scene_dir, "scene.xstage") - ) - - # Required to set the current directory where the zip will end up - os.chdir(staging_dir) - - # Create the zip file - zip_filepath = shutil.make_archive(os.path.basename(source_dir), - "zip", - staging_scene_dir - ) - - zip_filename = os.path.basename(zip_filepath) - - self.log.info("Zip file: {}".format(zip_filepath)) - - # Setup representation - new_repre = { - "name": "zip", - "ext": "zip", - "files": zip_filename, - "stagingDir": staging_dir - } - - self.log.debug( - "Creating new representation: {}".format(new_repre) - ) - instance.data["representations"] = [new_repre] - - self.log.debug("Completed prep of zipped Harmony scene: {}" - .format(zip_filepath) - ) - - # If this extractor is setup to also extract a workfile... - if self.create_workfile: - workfile_path = self.extract_workfile(instance, - staging_scene - ) - - self.log.debug("Extracted Workfile to: {}".format(workfile_path)) - - def extract_workfile(self, instance, staging_scene): - """Extract a valid workfile for this corresponding publish. - - Args: - instance (:class:`pyblish.api.Instance`): Instance data. - staging_scene (str): path of staging scene. - - Returns: - str: Path to workdir. - - """ - # Since the staging scene was renamed to "scene.xstage" for publish - # rename the staging scene in the temp stagingdir - staging_scene = os.path.join(os.path.dirname(staging_scene), - "scene.xstage") - - # Setup the data needed to form a valid work path filename - anatomy = openpype.api.Anatomy() - project_entity = instance.context.data["projectEntity"] - - data = { - "root": api.registered_root(), - "project": { - "name": project_entity["name"], - "code": project_entity["data"].get("code", '') - }, - "asset": instance.data["asset"], - "hierarchy": openpype.api.get_hierarchy(instance.data["asset"]), - "family": instance.data["family"], - "task": instance.data.get("task"), - "subset": instance.data["subset"], - "version": 1, - "ext": "zip", - } - host_name = "harmony" - template_name = get_workfile_template_key_from_context( - instance.data["asset"], - instance.data.get("task"), - host_name, - project_name=project_entity["name"], - dbcon=io - ) - - # Get a valid work filename first with version 1 - file_template = anatomy.templates[template_name]["file"] - anatomy_filled = anatomy.format(data) - work_path = anatomy_filled[template_name]["path"] - - # Get the final work filename with the proper version - data["version"] = api.last_workfile_with_version( - os.path.dirname(work_path), - file_template, - data, - api.HOST_WORKFILE_EXTENSIONS[host_name] - )[1] - - base_name = os.path.splitext(os.path.basename(work_path))[0] - - staging_work_path = os.path.join(os.path.dirname(staging_scene), - base_name + ".xstage" - ) - - # Rename this latest file after the workfile path filename - os.rename(staging_scene, staging_work_path) - - # Required to set the current directory where the zip will end up - os.chdir(os.path.dirname(os.path.dirname(staging_scene))) - - # Create the zip file - zip_filepath = shutil.make_archive(base_name, - "zip", - os.path.dirname(staging_scene) - ) - self.log.info(staging_scene) - self.log.info(work_path) - self.log.info(staging_work_path) - self.log.info(os.path.dirname(os.path.dirname(staging_scene))) - self.log.info(base_name) - self.log.info(zip_filepath) - - # Create the work path on disk if it does not exist - os.makedirs(os.path.dirname(work_path), exist_ok=True) - shutil.copy(zip_filepath, work_path) - - return work_path - - def sanitize_prezipped_project( - self, instance, zip_filepath, staging_dir): - """Fix when a zip contains a folder. - - Handle zip file root contains folder instead of the project. - - Args: - instance (:class:`pyblish.api.Instance`): Instance data. - zip_filepath (str): Path to zip. - staging_dir (str): Path to staging directory. - - """ - zip = zipfile.ZipFile(zip_filepath) - zip_contents = zipfile.ZipFile.namelist(zip) - - # Determine if any xstage file is in root of zip - project_in_root = [pth for pth in zip_contents - if "/" not in pth and pth.endswith(".xstage")] - - staging_scene_dir = os.path.join(staging_dir, "scene") - - # The project is nested, so we must extract and move it - if not project_in_root: - - staging_tmp_dir = os.path.join(staging_dir, "tmp") - - with zipfile.ZipFile(zip_filepath, "r") as zip_ref: - zip_ref.extractall(staging_tmp_dir) - - nested_project_folder = os.path.join(staging_tmp_dir, - zip_contents[0] - ) - - shutil.copytree(nested_project_folder, staging_scene_dir) - - else: - # The project is not nested, so we just extract to scene folder - with zipfile.ZipFile(zip_filepath, "r") as zip_ref: - zip_ref.extractall(staging_scene_dir) - - latest_file = max(glob.iglob(staging_scene_dir + "/*.xstage"), - key=os.path.getctime).replace("\\", "/") - - instance.data["representations"][0]["stagingDir"] = staging_scene_dir - instance.data["representations"][0]["files"] = os.path.basename( - latest_file) - - # We have staged the scene already so return True - return True - - def _find_last_version(self, subset_name, asset_doc): - """Find last version of subset.""" - subset_doc = io.find_one({ - "type": "subset", - "name": subset_name, - "parent": asset_doc["_id"] - }) - - if subset_doc is None: - self.log.debug("Subset entity does not exist yet.") - else: - version_doc = io.find_one( - { - "type": "version", - "parent": subset_doc["_id"] - }, - sort=[("name", -1)] - ) - if version_doc: - return int(version_doc["name"]) - return None - - def _get_all_task_types(self, project): - """Get all task types.""" - tasks = {} - proj_template = project['project_schema'] - temp_task_types = proj_template['_task_type_schema']['types'] - - for type in temp_task_types: - if type['name'] not in tasks: - tasks[type['name']] = type - - return tasks - - def _get_all_task_statuses(self, project): - """Get all statuses of tasks.""" - statuses = {} - proj_template = project['project_schema'] - temp_task_statuses = proj_template.get_statuses("Task") - - for status in temp_task_statuses: - if status['name'] not in statuses: - statuses[status['name']] = status - - return statuses - - def _get_all_assetversion_statuses(self, project): - """Get statuses of all asset versions.""" - statuses = {} - proj_template = project['project_schema'] - temp_task_statuses = proj_template.get_statuses("AssetVersion") - - for status in temp_task_statuses: - if status['name'] not in statuses: - statuses[status['name']] = status - - return statuses - - def _create_task(self, name, task_type, parent, task_status): - """Create task.""" - task_data = { - 'name': name, - 'parent': parent, - } - self.log.info(task_type) - task_data['type'] = self.task_types[task_type] - task_data['status'] = self.task_statuses[task_status] - self.log.info(task_data) - task = self.session.create('Task', task_data) - try: - self.session.commit() - except Exception: - tp, value, tb = sys.exc_info() - self.session.rollback() - six.reraise(tp, value, tb) - - return task diff --git a/openpype/hosts/tvpaint/lib.py b/openpype/hosts/tvpaint/lib.py new file mode 100644 index 0000000000..513bb2d952 --- /dev/null +++ b/openpype/hosts/tvpaint/lib.py @@ -0,0 +1,682 @@ +import os +import shutil +import collections +from PIL import Image, ImageDraw + + +def backwards_id_conversion(data_by_layer_id): + """Convert layer ids to strings from integers.""" + for key in tuple(data_by_layer_id.keys()): + if not isinstance(key, str): + data_by_layer_id[str(key)] = data_by_layer_id.pop(key) + + +def get_frame_filename_template(frame_end, filename_prefix=None, ext=None): + """Get file template with frame key for rendered files. + + This is simple template contains `{frame}{ext}` for sequential outputs + and `single_file{ext}` for single file output. Output is rendered to + temporary folder so filename should not matter as integrator change + them. + """ + frame_padding = 4 + frame_end_str_len = len(str(frame_end)) + if frame_end_str_len > frame_padding: + frame_padding = frame_end_str_len + + ext = ext or ".png" + filename_prefix = filename_prefix or "" + + return "{}{{frame:0>{}}}{}".format(filename_prefix, frame_padding, ext) + + +def get_layer_pos_filename_template(range_end, filename_prefix=None, ext=None): + filename_prefix = filename_prefix or "" + new_filename_prefix = filename_prefix + "pos_{pos}." + return get_frame_filename_template(range_end, new_filename_prefix, ext) + + +def _calculate_pre_behavior_copy( + range_start, exposure_frames, pre_beh, + layer_frame_start, layer_frame_end, + output_idx_by_frame_idx +): + """Calculate frames before first exposure frame based on pre behavior. + + Function may skip whole processing if first exposure frame is before + layer's first frame. In that case pre behavior does not make sense. + + Args: + range_start(int): First frame of range which should be rendered. + exposure_frames(list): List of all exposure frames on layer. + pre_beh(str): Pre behavior of layer (enum of 4 strings). + layer_frame_start(int): First frame of layer. + layer_frame_end(int): Last frame of layer. + output_idx_by_frame_idx(dict): References to already prepared frames + and where result will be stored. + """ + # Check if last layer frame is after range end + if layer_frame_start < range_start: + return + + first_exposure_frame = min(exposure_frames) + # Skip if last exposure frame is after range end + if first_exposure_frame < range_start: + return + + # Calculate frame count of layer + frame_count = layer_frame_end - layer_frame_start + 1 + + if pre_beh == "none": + # Just fill all frames from last exposure frame to range end with None + for frame_idx in range(range_start, layer_frame_start): + output_idx_by_frame_idx[frame_idx] = None + + elif pre_beh == "hold": + # Keep first frame for whole time + for frame_idx in range(range_start, layer_frame_start): + output_idx_by_frame_idx[frame_idx] = first_exposure_frame + + elif pre_beh in ("loop", "repeat"): + # Loop backwards from last frame of layer + for frame_idx in reversed(range(range_start, layer_frame_start)): + eq_frame_idx_offset = ( + (layer_frame_end - frame_idx) % frame_count + ) + eq_frame_idx = layer_frame_end - eq_frame_idx_offset + output_idx_by_frame_idx[frame_idx] = eq_frame_idx + + elif pre_beh == "pingpong": + half_seq_len = frame_count - 1 + seq_len = half_seq_len * 2 + for frame_idx in reversed(range(range_start, layer_frame_start)): + eq_frame_idx_offset = (layer_frame_start - frame_idx) % seq_len + if eq_frame_idx_offset > half_seq_len: + eq_frame_idx_offset = (seq_len - eq_frame_idx_offset) + eq_frame_idx = layer_frame_start + eq_frame_idx_offset + output_idx_by_frame_idx[frame_idx] = eq_frame_idx + + +def _calculate_post_behavior_copy( + range_end, exposure_frames, post_beh, + layer_frame_start, layer_frame_end, + output_idx_by_frame_idx +): + """Calculate frames after last frame of layer based on post behavior. + + Function may skip whole processing if last layer frame is after range_end. + In that case post behavior does not make sense. + + Args: + range_end(int): Last frame of range which should be rendered. + exposure_frames(list): List of all exposure frames on layer. + post_beh(str): Post behavior of layer (enum of 4 strings). + layer_frame_start(int): First frame of layer. + layer_frame_end(int): Last frame of layer. + output_idx_by_frame_idx(dict): References to already prepared frames + and where result will be stored. + """ + # Check if last layer frame is after range end + if layer_frame_end >= range_end: + return + + last_exposure_frame = max(exposure_frames) + # Skip if last exposure frame is after range end + # - this is probably irrelevant with layer frame end check? + if last_exposure_frame >= range_end: + return + + # Calculate frame count of layer + frame_count = layer_frame_end - layer_frame_start + 1 + + if post_beh == "none": + # Just fill all frames from last exposure frame to range end with None + for frame_idx in range(layer_frame_end + 1, range_end + 1): + output_idx_by_frame_idx[frame_idx] = None + + elif post_beh == "hold": + # Keep last exposure frame to the end + for frame_idx in range(layer_frame_end + 1, range_end + 1): + output_idx_by_frame_idx[frame_idx] = last_exposure_frame + + elif post_beh in ("loop", "repeat"): + # Loop backwards from last frame of layer + for frame_idx in range(layer_frame_end + 1, range_end + 1): + eq_frame_idx = frame_idx % frame_count + output_idx_by_frame_idx[frame_idx] = eq_frame_idx + + elif post_beh == "pingpong": + half_seq_len = frame_count - 1 + seq_len = half_seq_len * 2 + for frame_idx in range(layer_frame_end + 1, range_end + 1): + eq_frame_idx_offset = (frame_idx - layer_frame_end) % seq_len + if eq_frame_idx_offset > half_seq_len: + eq_frame_idx_offset = seq_len - eq_frame_idx_offset + eq_frame_idx = layer_frame_end - eq_frame_idx_offset + output_idx_by_frame_idx[frame_idx] = eq_frame_idx + + +def _calculate_in_range_frames( + range_start, range_end, + exposure_frames, layer_frame_end, + output_idx_by_frame_idx +): + """Calculate frame references in defined range. + + Function may skip whole processing if last layer frame is after range_end. + In that case post behavior does not make sense. + + Args: + range_start(int): First frame of range which should be rendered. + range_end(int): Last frame of range which should be rendered. + exposure_frames(list): List of all exposure frames on layer. + layer_frame_end(int): Last frame of layer. + output_idx_by_frame_idx(dict): References to already prepared frames + and where result will be stored. + """ + # Calculate in range frames + in_range_frames = [] + for frame_idx in exposure_frames: + if range_start <= frame_idx <= range_end: + output_idx_by_frame_idx[frame_idx] = frame_idx + in_range_frames.append(frame_idx) + + if in_range_frames: + first_in_range_frame = min(in_range_frames) + # Calculate frames from first exposure frames to range end or last + # frame of layer (post behavior should be calculated since that time) + previous_exposure = first_in_range_frame + for frame_idx in range(first_in_range_frame, range_end + 1): + if frame_idx > layer_frame_end: + break + + if frame_idx in exposure_frames: + previous_exposure = frame_idx + else: + output_idx_by_frame_idx[frame_idx] = previous_exposure + + # There can be frames before first exposure frame in range + # First check if we don't alreade have first range frame filled + if range_start in output_idx_by_frame_idx: + return + + first_exposure_frame = max(exposure_frames) + last_exposure_frame = max(exposure_frames) + # Check if is first exposure frame smaller than defined range + # if not then skip + if first_exposure_frame >= range_start: + return + + # Check is if last exposure frame is also before range start + # in that case we can't use fill frames before out range + if last_exposure_frame < range_start: + return + + closest_exposure_frame = first_exposure_frame + for frame_idx in exposure_frames: + if frame_idx >= range_start: + break + if frame_idx > closest_exposure_frame: + closest_exposure_frame = frame_idx + + output_idx_by_frame_idx[closest_exposure_frame] = closest_exposure_frame + for frame_idx in range(range_start, range_end + 1): + if frame_idx in output_idx_by_frame_idx: + break + output_idx_by_frame_idx[frame_idx] = closest_exposure_frame + + +def _cleanup_frame_references(output_idx_by_frame_idx): + """Cleanup frame references to frame reference. + + Cleanup not direct references to rendered frame. + ``` + // Example input + { + 1: 1, + 2: 1, + 3: 2 + } + // Result + { + 1: 1, + 2: 1, + 3: 1 // Changed reference to final rendered frame + } + ``` + Result is dictionary where keys leads to frame that should be rendered. + """ + for frame_idx in tuple(output_idx_by_frame_idx.keys()): + reference_idx = output_idx_by_frame_idx[frame_idx] + # Skip transparent frames + if reference_idx is None or reference_idx == frame_idx: + continue + + real_reference_idx = reference_idx + _tmp_reference_idx = reference_idx + while True: + _temp = output_idx_by_frame_idx[_tmp_reference_idx] + if _temp == _tmp_reference_idx: + real_reference_idx = _tmp_reference_idx + break + _tmp_reference_idx = _temp + + if real_reference_idx != reference_idx: + output_idx_by_frame_idx[frame_idx] = real_reference_idx + + +def _cleanup_out_range_frames(output_idx_by_frame_idx, range_start, range_end): + """Cleanup frame references to frames out of passed range. + + First available frame in range is used + ``` + // Example input. Range 2-3 + { + 1: 1, + 2: 1, + 3: 1 + } + // Result + { + 2: 2, // Redirect to self as is first that refence out range + 3: 2 // Redirect to first redirected frame + } + ``` + Result is dictionary where keys leads to frame that should be rendered. + """ + in_range_frames_by_out_frames = collections.defaultdict(set) + out_range_frames = set() + for frame_idx in tuple(output_idx_by_frame_idx.keys()): + # Skip frames that are already out of range + if frame_idx < range_start or frame_idx > range_end: + out_range_frames.add(frame_idx) + continue + + reference_idx = output_idx_by_frame_idx[frame_idx] + # Skip transparent frames + if reference_idx is None: + continue + + # Skip references in range + if reference_idx < range_start or reference_idx > range_end: + in_range_frames_by_out_frames[reference_idx].add(frame_idx) + + for reference_idx in tuple(in_range_frames_by_out_frames.keys()): + frame_indexes = in_range_frames_by_out_frames.pop(reference_idx) + new_reference = None + for frame_idx in frame_indexes: + if new_reference is None: + new_reference = frame_idx + output_idx_by_frame_idx[frame_idx] = new_reference + + # Finally remove out of range frames + for frame_idx in out_range_frames: + output_idx_by_frame_idx.pop(frame_idx) + + +def calculate_layer_frame_references( + range_start, range_end, + layer_frame_start, + layer_frame_end, + exposure_frames, + pre_beh, post_beh +): + """Calculate frame references for one layer based on it's data. + + Output is dictionary where key is frame index referencing to rendered frame + index. If frame index should be rendered then is referencing to self. + + ``` + // Example output + { + 1: 1, // Reference to self - will be rendered + 2: 1, // Reference to frame 1 - will be copied + 3: 1, // Reference to frame 1 - will be copied + 4: 4, // Reference to self - will be rendered + ... + 20: 4 // Reference to frame 4 - will be copied + 21: None // Has reference to None - transparent image + } + ``` + + Args: + range_start(int): First frame of range which should be rendered. + range_end(int): Last frame of range which should be rendered. + layer_frame_start(int)L First frame of layer. + layer_frame_end(int): Last frame of layer. + exposure_frames(list): List of all exposure frames on layer. + pre_beh(str): Pre behavior of layer (enum of 4 strings). + post_beh(str): Post behavior of layer (enum of 4 strings). + """ + # Output variable + output_idx_by_frame_idx = {} + # Skip if layer does not have any exposure frames + if not exposure_frames: + return output_idx_by_frame_idx + + # First calculate in range frames + _calculate_in_range_frames( + range_start, range_end, + exposure_frames, layer_frame_end, + output_idx_by_frame_idx + ) + # Calculate frames by pre behavior of layer + _calculate_pre_behavior_copy( + range_start, exposure_frames, pre_beh, + layer_frame_start, layer_frame_end, + output_idx_by_frame_idx + ) + # Calculate frames by post behavior of layer + _calculate_post_behavior_copy( + range_end, exposure_frames, post_beh, + layer_frame_start, layer_frame_end, + output_idx_by_frame_idx + ) + # Cleanup of referenced frames + _cleanup_frame_references(output_idx_by_frame_idx) + + # Remove frames out of range + _cleanup_out_range_frames(output_idx_by_frame_idx, range_start, range_end) + + return output_idx_by_frame_idx + + +def calculate_layers_extraction_data( + layers_data, + exposure_frames_by_layer_id, + behavior_by_layer_id, + range_start, + range_end, + skip_not_visible=True, + filename_prefix=None, + ext=None +): + """Calculate extraction data for passed layers data. + + ``` + { + : { + "frame_references": {...}, + "filenames_by_frame_index": {...} + }, + ... + } + ``` + + Frame references contains frame index reference to rendered frame index. + + Filename by frame index represents filename under which should be frame + stored. Directory is not handled here because each usage may need different + approach. + + Args: + layers_data(list): Layers data loaded from TVPaint. + exposure_frames_by_layer_id(dict): Exposure frames of layers stored by + layer id. + behavior_by_layer_id(dict): Pre and Post behavior of layers stored by + layer id. + range_start(int): First frame of rendered range. + range_end(int): Last frame of rendered range. + skip_not_visible(bool): Skip calculations for hidden layers (Skipped + by default). + filename_prefix(str): Prefix before filename. + ext(str): Extension which filenames will have ('.png' is default). + + Returns: + dict: Prepared data for rendering by layer position. + """ + # Make sure layer ids are strings + # backwards compatibility when layer ids were integers + backwards_id_conversion(exposure_frames_by_layer_id) + backwards_id_conversion(behavior_by_layer_id) + + layer_template = get_layer_pos_filename_template( + range_end, filename_prefix, ext + ) + output = {} + for layer_data in layers_data: + if skip_not_visible and not layer_data["visible"]: + continue + + orig_layer_id = layer_data["layer_id"] + layer_id = str(orig_layer_id) + + # Skip if does not have any exposure frames (empty layer) + exposure_frames = exposure_frames_by_layer_id[layer_id] + if not exposure_frames: + continue + + layer_position = layer_data["position"] + layer_frame_start = layer_data["frame_start"] + layer_frame_end = layer_data["frame_end"] + + layer_behavior = behavior_by_layer_id[layer_id] + + pre_behavior = layer_behavior["pre"] + post_behavior = layer_behavior["post"] + + frame_references = calculate_layer_frame_references( + range_start, range_end, + layer_frame_start, + layer_frame_end, + exposure_frames, + pre_behavior, post_behavior + ) + # All values in 'frame_references' reference to a frame that must be + # rendered out + frames_to_render = set(frame_references.values()) + # Remove 'None' reference (transparent image) + if None in frames_to_render: + frames_to_render.remove(None) + + # Skip layer if has nothing to render + if not frames_to_render: + continue + + # All filenames that should be as output (not final output) + filename_frames = ( + set(range(range_start, range_end + 1)) + | frames_to_render + ) + filenames_by_frame_index = {} + for frame_idx in filename_frames: + filenames_by_frame_index[frame_idx] = layer_template.format( + pos=layer_position, + frame=frame_idx + ) + + # Store objects under the layer id + output[orig_layer_id] = { + "frame_references": frame_references, + "filenames_by_frame_index": filenames_by_frame_index + } + return output + + +def create_transparent_image_from_source(src_filepath, dst_filepath): + """Create transparent image of same type and size as source image.""" + img_obj = Image.open(src_filepath) + painter = ImageDraw.Draw(img_obj) + painter.rectangle((0, 0, *img_obj.size), fill=(0, 0, 0, 0)) + img_obj.save(dst_filepath) + + +def fill_reference_frames(frame_references, filepaths_by_frame): + # Store path to first transparent image if there is any + for frame_idx, ref_idx in frame_references.items(): + # Frame referencing to self should be rendered and used as source + # and reference indexes with None can't be filled + if ref_idx is None or frame_idx == ref_idx: + continue + + # Get destination filepath + src_filepath = filepaths_by_frame[ref_idx] + dst_filepath = filepaths_by_frame[frame_idx] + + if hasattr(os, "link"): + os.link(src_filepath, dst_filepath) + else: + shutil.copy(src_filepath, dst_filepath) + + +def copy_render_file(src_path, dst_path): + """Create copy file of an image.""" + if hasattr(os, "link"): + os.link(src_path, dst_path) + else: + shutil.copy(src_path, dst_path) + + +def cleanup_rendered_layers(filepaths_by_layer_id): + """Delete all files for each individual layer files after compositing.""" + # Collect all filepaths from data + all_filepaths = [] + for filepaths_by_frame in filepaths_by_layer_id.values(): + all_filepaths.extend(filepaths_by_frame.values()) + + # Loop over loop + for filepath in set(all_filepaths): + if filepath is not None and os.path.exists(filepath): + os.remove(filepath) + + +def composite_rendered_layers( + layers_data, filepaths_by_layer_id, + range_start, range_end, + dst_filepaths_by_frame, cleanup=True +): + """Composite multiple rendered layers by their position. + + Result is single frame sequence with transparency matching content + created in TVPaint. Missing source filepaths are replaced with transparent + images but at least one image must be rendered and exist. + + Function can be used even if single layer was created to fill transparent + filepaths. + + Args: + layers_data(list): Layers data loaded from TVPaint. + filepaths_by_layer_id(dict): Rendered filepaths stored by frame index + per layer id. Used as source for compositing. + range_start(int): First frame of rendered range. + range_end(int): Last frame of rendered range. + dst_filepaths_by_frame(dict): Output filepaths by frame where final + image after compositing will be stored. Path must not clash with + source filepaths. + cleanup(bool): Remove all source filepaths when done with compositing. + """ + # Prepare layers by their position + # - position tells in which order will compositing happen + layer_ids_by_position = {} + for layer in layers_data: + layer_position = layer["position"] + layer_ids_by_position[layer_position] = layer["layer_id"] + + # Sort layer positions + sorted_positions = tuple(sorted(layer_ids_by_position.keys())) + # Prepare variable where filepaths without any rendered content + # - transparent will be created + transparent_filepaths = set() + # Store first final filepath + first_dst_filepath = None + for frame_idx in range(range_start, range_end + 1): + dst_filepath = dst_filepaths_by_frame[frame_idx] + src_filepaths = [] + for layer_position in sorted_positions: + layer_id = layer_ids_by_position[layer_position] + filepaths_by_frame = filepaths_by_layer_id[layer_id] + src_filepath = filepaths_by_frame.get(frame_idx) + if src_filepath is not None: + src_filepaths.append(src_filepath) + + if not src_filepaths: + transparent_filepaths.add(dst_filepath) + continue + + # Store first destionation filepath to be used for transparent images + if first_dst_filepath is None: + first_dst_filepath = dst_filepath + + if len(src_filepaths) == 1: + src_filepath = src_filepaths[0] + if cleanup: + os.rename(src_filepath, dst_filepath) + else: + copy_render_file(src_filepath, dst_filepath) + + else: + composite_images(src_filepaths, dst_filepath) + + # Store first transparent filepath to be able copy it + transparent_filepath = None + for dst_filepath in transparent_filepaths: + if transparent_filepath is None: + create_transparent_image_from_source( + first_dst_filepath, dst_filepath + ) + transparent_filepath = dst_filepath + else: + copy_render_file(transparent_filepath, dst_filepath) + + # Remove all files that were used as source for compositing + if cleanup: + cleanup_rendered_layers(filepaths_by_layer_id) + + +def composite_images(input_image_paths, output_filepath): + """Composite images in order from passed list. + + Raises: + ValueError: When entered list is empty. + """ + if not input_image_paths: + raise ValueError("Nothing to composite.") + + img_obj = None + for image_filepath in input_image_paths: + _img_obj = Image.open(image_filepath) + if img_obj is None: + img_obj = _img_obj + else: + img_obj.alpha_composite(_img_obj) + img_obj.save(output_filepath) + + +def rename_filepaths_by_frame_start( + filepaths_by_frame, range_start, range_end, new_frame_start +): + """Change frames in filenames of finished images to new frame start.""" + # Skip if source first frame is same as destination first frame + if range_start == new_frame_start: + return + + # Calculate frame end + new_frame_end = range_end + (new_frame_start - range_start) + # Create filename template + filename_template = get_frame_filename_template( + max(range_end, new_frame_end) + ) + + # Use differnet ranges based on Mark In and output Frame Start values + # - this is to make sure that filename renaming won't affect files that + # are not renamed yet + if range_start < new_frame_start: + source_range = range(range_end, range_start - 1, -1) + output_range = range(new_frame_end, new_frame_start - 1, -1) + else: + # This is less possible situation as frame start will be in most + # cases higher than Mark In. + source_range = range(range_start, range_end + 1) + output_range = range(new_frame_start, new_frame_end + 1) + + new_dst_filepaths = {} + for src_frame, dst_frame in zip(source_range, output_range): + src_filepath = filepaths_by_frame[src_frame] + src_dirpath = os.path.dirname(src_filepath) + dst_filename = filename_template.format(frame=dst_frame) + dst_filepath = os.path.join(src_dirpath, dst_filename) + + os.rename(src_filepath, dst_filepath) + + new_dst_filepaths[dst_frame] = dst_filepath + return new_dst_filepaths diff --git a/openpype/hosts/tvpaint/plugins/load/load_workfile.py b/openpype/hosts/tvpaint/plugins/load/load_workfile.py new file mode 100644 index 0000000000..f410a1ab9d --- /dev/null +++ b/openpype/hosts/tvpaint/plugins/load/load_workfile.py @@ -0,0 +1,102 @@ +import getpass +import os + +from avalon.tvpaint import lib, pipeline, get_current_workfile_context +from avalon import api, io +from openpype.lib import ( + get_workfile_template_key_from_context, + get_workdir_data +) +from openpype.api import Anatomy + + +class LoadWorkfile(pipeline.Loader): + """Load workfile.""" + + families = ["workfile"] + representations = ["tvpp"] + + label = "Load Workfile" + + def load(self, context, name, namespace, options): + # Load context of current workfile as first thing + # - which context and extension has + host = api.registered_host() + current_file = host.current_file() + + context = get_current_workfile_context() + + filepath = self.fname.replace("\\", "/") + + if not os.path.exists(filepath): + raise FileExistsError( + "The loaded file does not exist. Try downloading it first." + ) + + george_script = "tv_LoadProject '\"'\"{}\"'\"'".format( + filepath + ) + lib.execute_george_through_file(george_script) + + # Save workfile. + host_name = "tvpaint" + asset_name = context.get("asset") + task_name = context.get("task") + # Far cases when there is workfile without context + if not asset_name: + asset_name = io.Session["AVALON_ASSET"] + task_name = io.Session["AVALON_TASK"] + + project_doc = io.find_one({ + "type": "project" + }) + asset_doc = io.find_one({ + "type": "asset", + "name": asset_name + }) + project_name = project_doc["name"] + + template_key = get_workfile_template_key_from_context( + asset_name, + task_name, + host_name, + project_name=project_name, + dbcon=io + ) + anatomy = Anatomy(project_name) + + data = get_workdir_data(project_doc, asset_doc, task_name, host_name) + data["root"] = anatomy.roots + data["user"] = getpass.getuser() + + template = anatomy.templates[template_key]["file"] + + # Define saving file extension + if current_file: + # Match the extension of current file + _, extension = os.path.splitext(current_file) + else: + # Fall back to the first extension supported for this host. + extension = host.file_extensions()[0] + + data["ext"] = extension + + work_root = api.format_template_with_optional_keys( + data, anatomy.templates[template_key]["folder"] + ) + version = api.last_workfile_with_version( + work_root, template, data, host.file_extensions() + )[1] + + if version is None: + version = 1 + else: + version += 1 + + data["version"] = version + + path = os.path.join( + work_root, + api.format_template_with_optional_keys(data, template) + ) + host.save_file(path) diff --git a/openpype/hosts/tvpaint/plugins/publish/extract_sequence.py b/openpype/hosts/tvpaint/plugins/publish/extract_sequence.py index c45ff53c3c..6235b6211d 100644 --- a/openpype/hosts/tvpaint/plugins/publish/extract_sequence.py +++ b/openpype/hosts/tvpaint/plugins/publish/extract_sequence.py @@ -1,12 +1,18 @@ import os -import shutil import copy import tempfile import pyblish.api from avalon.tvpaint import lib from openpype.hosts.tvpaint.api.lib import composite_images -from PIL import Image, ImageDraw +from openpype.hosts.tvpaint.lib import ( + calculate_layers_extraction_data, + get_frame_filename_template, + fill_reference_frames, + composite_rendered_layers, + rename_filepaths_by_frame_start +) +from PIL import Image class ExtractSequence(pyblish.api.Extractor): @@ -111,14 +117,6 @@ class ExtractSequence(pyblish.api.Extractor): # ------------------------------------------------------------------- - filename_template = self._get_filename_template( - # Use the biggest number - max(mark_out, frame_end) - ) - ext = os.path.splitext(filename_template)[1].replace(".", "") - - self.log.debug("Using file template \"{}\"".format(filename_template)) - # Save to staging dir output_dir = instance.data.get("stagingDir") if not output_dir: @@ -133,30 +131,30 @@ class ExtractSequence(pyblish.api.Extractor): ) if instance.data["family"] == "review": - output_filenames, thumbnail_fullpath = self.render_review( - filename_template, output_dir, mark_in, mark_out, - scene_bg_color + result = self.render_review( + output_dir, mark_in, mark_out, scene_bg_color ) else: # Render output - output_filenames, thumbnail_fullpath = self.render( - filename_template, output_dir, - mark_in, mark_out, - filtered_layers + result = self.render( + output_dir, mark_in, mark_out, filtered_layers ) + output_filepaths_by_frame_idx, thumbnail_fullpath = result + # Change scene frame Start back to previous value lib.execute_george("tv_startframe {}".format(scene_start_frame)) # Sequence of one frame - if not output_filenames: + if not output_filepaths_by_frame_idx: self.log.warning("Extractor did not create any output.") return repre_files = self._rename_output_files( - filename_template, output_dir, - mark_in, mark_out, - output_frame_start, output_frame_end + output_filepaths_by_frame_idx, + mark_in, + mark_out, + output_frame_start ) # Fill tags and new families @@ -169,9 +167,11 @@ class ExtractSequence(pyblish.api.Extractor): if single_file: repre_files = repre_files[0] + # Extension is harcoded + # - changing extension would require change code new_repre = { - "name": ext, - "ext": ext, + "name": "png", + "ext": "png", "files": repre_files, "stagingDir": output_dir, "tags": tags @@ -206,69 +206,28 @@ class ExtractSequence(pyblish.api.Extractor): } instance.data["representations"].append(thumbnail_repre) - def _get_filename_template(self, frame_end): - """Get filetemplate for rendered files. - - This is simple template contains `{frame}{ext}` for sequential outputs - and `single_file{ext}` for single file output. Output is rendered to - temporary folder so filename should not matter as integrator change - them. - """ - frame_padding = 4 - frame_end_str_len = len(str(frame_end)) - if frame_end_str_len > frame_padding: - frame_padding = frame_end_str_len - - return "{{frame:0>{}}}".format(frame_padding) + ".png" - def _rename_output_files( - self, filename_template, output_dir, - mark_in, mark_out, output_frame_start, output_frame_end + self, filepaths_by_frame, mark_in, mark_out, output_frame_start ): - # Use differnet ranges based on Mark In and output Frame Start values - # - this is to make sure that filename renaming won't affect files that - # are not renamed yet - mark_start_is_less = bool(mark_in < output_frame_start) - if mark_start_is_less: - marks_range = range(mark_out, mark_in - 1, -1) - frames_range = range(output_frame_end, output_frame_start - 1, -1) - else: - # This is less possible situation as frame start will be in most - # cases higher than Mark In. - marks_range = range(mark_in, mark_out + 1) - frames_range = range(output_frame_start, output_frame_end + 1) + new_filepaths_by_frame = rename_filepaths_by_frame_start( + filepaths_by_frame, mark_in, mark_out, output_frame_start + ) - repre_filepaths = [] - for mark, frame in zip(marks_range, frames_range): - new_filename = filename_template.format(frame=frame) - new_filepath = os.path.join(output_dir, new_filename) + repre_filenames = [] + for filepath in new_filepaths_by_frame.values(): + repre_filenames.append(os.path.basename(filepath)) - repre_filepaths.append(new_filepath) + if mark_in < output_frame_start: + repre_filenames = list(reversed(repre_filenames)) - if mark != frame: - old_filename = filename_template.format(frame=mark) - old_filepath = os.path.join(output_dir, old_filename) - os.rename(old_filepath, new_filepath) - - # Reverse repre files order if output - if mark_start_is_less: - repre_filepaths = list(reversed(repre_filepaths)) - - return [ - os.path.basename(path) - for path in repre_filepaths - ] + return repre_filenames def render_review( - self, filename_template, output_dir, mark_in, mark_out, scene_bg_color + self, output_dir, mark_in, mark_out, scene_bg_color ): """ Export images from TVPaint using `tv_savesequence` command. Args: - filename_template (str): Filename template of an output. Template - should already contain extension. Template may contain only - keyword argument `{frame}` or index argument (for same value). - Extension in template must match `save_mode`. output_dir (str): Directory where files will be stored. mark_in (int): Starting frame index from which export will begin. mark_out (int): On which frame index export will end. @@ -279,6 +238,8 @@ class ExtractSequence(pyblish.api.Extractor): tuple: With 2 items first is list of filenames second is path to thumbnail. """ + filename_template = get_frame_filename_template(mark_out) + self.log.debug("Preparing data for rendering.") first_frame_filepath = os.path.join( output_dir, @@ -313,12 +274,13 @@ class ExtractSequence(pyblish.api.Extractor): lib.execute_george_through_file("\n".join(george_script_lines)) first_frame_filepath = None - output_filenames = [] - for frame in range(mark_in, mark_out + 1): - filename = filename_template.format(frame=frame) - output_filenames.append(filename) - + output_filepaths_by_frame_idx = {} + for frame_idx in range(mark_in, mark_out + 1): + filename = filename_template.format(frame=frame_idx) filepath = os.path.join(output_dir, filename) + + output_filepaths_by_frame_idx[frame_idx] = filepath + if not os.path.exists(filepath): raise AssertionError( "Output was not rendered. File was not found {}".format( @@ -337,16 +299,12 @@ class ExtractSequence(pyblish.api.Extractor): source_img = source_img.convert("RGB") source_img.save(thumbnail_filepath) - return output_filenames, thumbnail_filepath + return output_filepaths_by_frame_idx, thumbnail_filepath - def render(self, filename_template, output_dir, mark_in, mark_out, layers): + def render(self, output_dir, mark_in, mark_out, layers): """ Export images from TVPaint. Args: - filename_template (str): Filename template of an output. Template - should already contain extension. Template may contain only - keyword argument `{frame}` or index argument (for same value). - Extension in template must match `save_mode`. output_dir (str): Directory where files will be stored. mark_in (int): Starting frame index from which export will begin. mark_out (int): On which frame index export will end. @@ -360,12 +318,15 @@ class ExtractSequence(pyblish.api.Extractor): # Map layers by position layers_by_position = {} + layers_by_id = {} layer_ids = [] for layer in layers: + layer_id = layer["layer_id"] position = layer["position"] layers_by_position[position] = layer + layers_by_id[layer_id] = layer - layer_ids.append(layer["layer_id"]) + layer_ids.append(layer_id) # Sort layer positions in reverse order sorted_positions = list(reversed(sorted(layers_by_position.keys()))) @@ -374,59 +335,45 @@ class ExtractSequence(pyblish.api.Extractor): self.log.debug("Collecting pre/post behavior of individual layers.") behavior_by_layer_id = lib.get_layers_pre_post_behavior(layer_ids) - - tmp_filename_template = "pos_{pos}." + filename_template - - files_by_position = {} - for position in sorted_positions: - layer = layers_by_position[position] - behavior = behavior_by_layer_id[layer["layer_id"]] - - files_by_frames = self._render_layer( - layer, - tmp_filename_template, - output_dir, - behavior, - mark_in, - mark_out - ) - if files_by_frames: - files_by_position[position] = files_by_frames - else: - self.log.warning(( - "Skipped layer \"{}\". Probably out of Mark In/Out range." - ).format(layer["name"])) - - if not files_by_position: - layer_names = set(layer["name"] for layer in layers) - joined_names = ", ".join( - ["\"{}\"".format(name) for name in layer_names] - ) - self.log.warning( - "Layers {} do not have content in range {} - {}".format( - joined_names, mark_in, mark_out - ) - ) - return [], None - - output_filepaths = self._composite_files( - files_by_position, - mark_in, - mark_out, - filename_template, - output_dir + exposure_frames_by_layer_id = lib.get_layers_exposure_frames( + layer_ids, layers ) - self._cleanup_tmp_files(files_by_position) - - output_filenames = [ - os.path.basename(filepath) - for filepath in output_filepaths - ] + extraction_data_by_layer_id = calculate_layers_extraction_data( + layers, + exposure_frames_by_layer_id, + behavior_by_layer_id, + mark_in, + mark_out + ) + # Render layers + filepaths_by_layer_id = {} + for layer_id, render_data in extraction_data_by_layer_id.items(): + layer = layers_by_id[layer_id] + filepaths_by_layer_id[layer_id] = self._render_layer( + render_data, layer, output_dir + ) + # Prepare final filepaths where compositing should store result + output_filepaths_by_frame = {} thumbnail_src_filepath = None - if output_filepaths: - thumbnail_src_filepath = output_filepaths[0] + finale_template = get_frame_filename_template(mark_out) + for frame_idx in range(mark_in, mark_out + 1): + filename = finale_template.format(frame=frame_idx) + filepath = os.path.join(output_dir, filename) + output_filepaths_by_frame[frame_idx] = filepath + + if thumbnail_src_filepath is None: + thumbnail_src_filepath = filepath + + self.log.info("Started compositing of layer frames.") + composite_rendered_layers( + layers, filepaths_by_layer_id, + mark_in, mark_out, + output_filepaths_by_frame + ) + + self.log.info("Compositing finished") thumbnail_filepath = None if thumbnail_src_filepath and os.path.exists(thumbnail_src_filepath): source_img = Image.open(thumbnail_src_filepath) @@ -449,7 +396,7 @@ class ExtractSequence(pyblish.api.Extractor): ).format(source_img.mode)) source_img.save(thumbnail_filepath) - return output_filenames, thumbnail_filepath + return output_filepaths_by_frame, thumbnail_filepath def _get_review_bg_color(self): red = green = blue = 255 @@ -460,338 +407,43 @@ class ExtractSequence(pyblish.api.Extractor): red, green, blue = self.review_bg return (red, green, blue) - def _render_layer( - self, - layer, - tmp_filename_template, - output_dir, - behavior, - mark_in_index, - mark_out_index - ): + def _render_layer(self, render_data, layer, output_dir): + frame_references = render_data["frame_references"] + filenames_by_frame_index = render_data["filenames_by_frame_index"] + layer_id = layer["layer_id"] - frame_start_index = layer["frame_start"] - frame_end_index = layer["frame_end"] - - pre_behavior = behavior["pre"] - post_behavior = behavior["post"] - - # Check if layer is before mark in - if frame_end_index < mark_in_index: - # Skip layer if post behavior is "none" - if post_behavior == "none": - return {} - - # Check if layer is after mark out - elif frame_start_index > mark_out_index: - # Skip layer if pre behavior is "none" - if pre_behavior == "none": - return {} - - exposure_frames = lib.get_exposure_frames( - layer_id, frame_start_index, frame_end_index - ) - - if frame_start_index not in exposure_frames: - exposure_frames.append(frame_start_index) - - layer_files_by_frame = {} george_script_lines = [ + "tv_layerset {}".format(layer_id), "tv_SaveMode \"PNG\"" ] - layer_position = layer["position"] - for frame_idx in exposure_frames: - filename = tmp_filename_template.format( - pos=layer_position, - frame=frame_idx - ) + filepaths_by_frame = {} + frames_to_render = [] + for frame_idx, ref_idx in frame_references.items(): + # None reference is skipped because does not have source + if ref_idx is None: + filepaths_by_frame[frame_idx] = None + continue + filename = filenames_by_frame_index[frame_idx] dst_path = "/".join([output_dir, filename]) - layer_files_by_frame[frame_idx] = os.path.normpath(dst_path) + filepaths_by_frame[frame_idx] = dst_path + if frame_idx != ref_idx: + continue + frames_to_render.append(str(frame_idx)) # Go to frame george_script_lines.append("tv_layerImage {}".format(frame_idx)) # Store image to output george_script_lines.append("tv_saveimage \"{}\"".format(dst_path)) self.log.debug("Rendering Exposure frames {} of layer {} ({})".format( - str(exposure_frames), layer_id, layer["name"] + ",".join(frames_to_render), layer_id, layer["name"] )) # Let TVPaint render layer's image lib.execute_george_through_file("\n".join(george_script_lines)) # Fill frames between `frame_start_index` and `frame_end_index` - self.log.debug(( - "Filling frames between first and last frame of layer ({} - {})." - ).format(frame_start_index + 1, frame_end_index + 1)) + self.log.debug("Filling frames not rendered frames.") + fill_reference_frames(frame_references, filepaths_by_frame) - _debug_filled_frames = [] - prev_filepath = None - for frame_idx in range(frame_start_index, frame_end_index + 1): - if frame_idx in layer_files_by_frame: - prev_filepath = layer_files_by_frame[frame_idx] - continue - - if prev_filepath is None: - raise ValueError("BUG: First frame of layer was not rendered!") - _debug_filled_frames.append(frame_idx) - filename = tmp_filename_template.format( - pos=layer_position, - frame=frame_idx - ) - new_filepath = "/".join([output_dir, filename]) - self._copy_image(prev_filepath, new_filepath) - layer_files_by_frame[frame_idx] = new_filepath - - self.log.debug("Filled frames {}".format(str(_debug_filled_frames))) - - # Fill frames by pre/post behavior of layer - self.log.debug(( - "Completing image sequence of layer by pre/post behavior." - " PRE: {} | POST: {}" - ).format(pre_behavior, post_behavior)) - - # Pre behavior - self._fill_frame_by_pre_behavior( - layer, - pre_behavior, - mark_in_index, - layer_files_by_frame, - tmp_filename_template, - output_dir - ) - self._fill_frame_by_post_behavior( - layer, - post_behavior, - mark_out_index, - layer_files_by_frame, - tmp_filename_template, - output_dir - ) - return layer_files_by_frame - - def _fill_frame_by_pre_behavior( - self, - layer, - pre_behavior, - mark_in_index, - layer_files_by_frame, - filename_template, - output_dir - ): - layer_position = layer["position"] - frame_start_index = layer["frame_start"] - frame_end_index = layer["frame_end"] - frame_count = frame_end_index - frame_start_index + 1 - if mark_in_index >= frame_start_index: - self.log.debug(( - "Skipping pre-behavior." - " All frames after Mark In are rendered." - )) - return - - if pre_behavior == "none": - # Empty frames are handled during `_composite_files` - pass - - elif pre_behavior == "hold": - # Keep first frame for whole time - eq_frame_filepath = layer_files_by_frame[frame_start_index] - for frame_idx in range(mark_in_index, frame_start_index): - filename = filename_template.format( - pos=layer_position, - frame=frame_idx - ) - new_filepath = "/".join([output_dir, filename]) - self._copy_image(eq_frame_filepath, new_filepath) - layer_files_by_frame[frame_idx] = new_filepath - - elif pre_behavior in ("loop", "repeat"): - # Loop backwards from last frame of layer - for frame_idx in reversed(range(mark_in_index, frame_start_index)): - eq_frame_idx_offset = ( - (frame_end_index - frame_idx) % frame_count - ) - eq_frame_idx = frame_end_index - eq_frame_idx_offset - eq_frame_filepath = layer_files_by_frame[eq_frame_idx] - - filename = filename_template.format( - pos=layer_position, - frame=frame_idx - ) - new_filepath = "/".join([output_dir, filename]) - self._copy_image(eq_frame_filepath, new_filepath) - layer_files_by_frame[frame_idx] = new_filepath - - elif pre_behavior == "pingpong": - half_seq_len = frame_count - 1 - seq_len = half_seq_len * 2 - for frame_idx in reversed(range(mark_in_index, frame_start_index)): - eq_frame_idx_offset = (frame_start_index - frame_idx) % seq_len - if eq_frame_idx_offset > half_seq_len: - eq_frame_idx_offset = (seq_len - eq_frame_idx_offset) - eq_frame_idx = frame_start_index + eq_frame_idx_offset - - eq_frame_filepath = layer_files_by_frame[eq_frame_idx] - - filename = filename_template.format( - pos=layer_position, - frame=frame_idx - ) - new_filepath = "/".join([output_dir, filename]) - self._copy_image(eq_frame_filepath, new_filepath) - layer_files_by_frame[frame_idx] = new_filepath - - def _fill_frame_by_post_behavior( - self, - layer, - post_behavior, - mark_out_index, - layer_files_by_frame, - filename_template, - output_dir - ): - layer_position = layer["position"] - frame_start_index = layer["frame_start"] - frame_end_index = layer["frame_end"] - frame_count = frame_end_index - frame_start_index + 1 - if mark_out_index <= frame_end_index: - self.log.debug(( - "Skipping post-behavior." - " All frames up to Mark Out are rendered." - )) - return - - if post_behavior == "none": - # Empty frames are handled during `_composite_files` - pass - - elif post_behavior == "hold": - # Keep first frame for whole time - eq_frame_filepath = layer_files_by_frame[frame_end_index] - for frame_idx in range(frame_end_index + 1, mark_out_index + 1): - filename = filename_template.format( - pos=layer_position, - frame=frame_idx - ) - new_filepath = "/".join([output_dir, filename]) - self._copy_image(eq_frame_filepath, new_filepath) - layer_files_by_frame[frame_idx] = new_filepath - - elif post_behavior in ("loop", "repeat"): - # Loop backwards from last frame of layer - for frame_idx in range(frame_end_index + 1, mark_out_index + 1): - eq_frame_idx = frame_idx % frame_count - eq_frame_filepath = layer_files_by_frame[eq_frame_idx] - - filename = filename_template.format( - pos=layer_position, - frame=frame_idx - ) - new_filepath = "/".join([output_dir, filename]) - self._copy_image(eq_frame_filepath, new_filepath) - layer_files_by_frame[frame_idx] = new_filepath - - elif post_behavior == "pingpong": - half_seq_len = frame_count - 1 - seq_len = half_seq_len * 2 - for frame_idx in range(frame_end_index + 1, mark_out_index + 1): - eq_frame_idx_offset = (frame_idx - frame_end_index) % seq_len - if eq_frame_idx_offset > half_seq_len: - eq_frame_idx_offset = seq_len - eq_frame_idx_offset - eq_frame_idx = frame_end_index - eq_frame_idx_offset - - eq_frame_filepath = layer_files_by_frame[eq_frame_idx] - - filename = filename_template.format( - pos=layer_position, - frame=frame_idx - ) - new_filepath = "/".join([output_dir, filename]) - self._copy_image(eq_frame_filepath, new_filepath) - layer_files_by_frame[frame_idx] = new_filepath - - def _composite_files( - self, files_by_position, frame_start, frame_end, - filename_template, output_dir - ): - """Composite frames when more that one layer was exported. - - This method is used when more than one layer is rendered out so and - output should be composition of each frame of rendered layers. - Missing frames are filled with transparent images. - """ - self.log.debug("Preparing files for compisiting.") - # Prepare paths to images by frames into list where are stored - # in order of compositing. - images_by_frame = {} - for frame_idx in range(frame_start, frame_end + 1): - images_by_frame[frame_idx] = [] - for position in sorted(files_by_position.keys(), reverse=True): - position_data = files_by_position[position] - if frame_idx in position_data: - filepath = position_data[frame_idx] - images_by_frame[frame_idx].append(filepath) - - output_filepaths = [] - missing_frame_paths = [] - random_frame_path = None - for frame_idx in sorted(images_by_frame.keys()): - image_filepaths = images_by_frame[frame_idx] - output_filename = filename_template.format(frame=frame_idx) - output_filepath = os.path.join(output_dir, output_filename) - output_filepaths.append(output_filepath) - - # Store information about missing frame and skip - if not image_filepaths: - missing_frame_paths.append(output_filepath) - continue - - # Just rename the file if is no need of compositing - if len(image_filepaths) == 1: - os.rename(image_filepaths[0], output_filepath) - - # Composite images - else: - composite_images(image_filepaths, output_filepath) - - # Store path of random output image that will 100% exist after all - # multiprocessing as mockup for missing frames - if random_frame_path is None: - random_frame_path = output_filepath - - self.log.debug( - "Creating transparent images for frames without render {}.".format( - str(missing_frame_paths) - ) - ) - # Fill the sequence with transparent frames - transparent_filepath = None - for filepath in missing_frame_paths: - if transparent_filepath is None: - img_obj = Image.open(random_frame_path) - painter = ImageDraw.Draw(img_obj) - painter.rectangle((0, 0, *img_obj.size), fill=(0, 0, 0, 0)) - img_obj.save(filepath) - transparent_filepath = filepath - else: - self._copy_image(transparent_filepath, filepath) - return output_filepaths - - def _cleanup_tmp_files(self, files_by_position): - """Remove temporary files that were used for compositing.""" - for data in files_by_position.values(): - for filepath in data.values(): - if os.path.exists(filepath): - os.remove(filepath) - - def _copy_image(self, src_path, dst_path): - """Create a copy of an image. - - This was added to be able easier change copy method. - """ - # Create hardlink of image instead of copying if possible - if hasattr(os, "link"): - os.link(src_path, dst_path) - else: - shutil.copy(src_path, dst_path) + return filepaths_by_frame diff --git a/openpype/hosts/tvpaint/worker/__init__.py b/openpype/hosts/tvpaint/worker/__init__.py new file mode 100644 index 0000000000..69208a7566 --- /dev/null +++ b/openpype/hosts/tvpaint/worker/__init__.py @@ -0,0 +1,21 @@ +from .worker_job import ( + JobFailed, + ExecuteSimpleGeorgeScript, + ExecuteGeorgeScript, + CollectSceneData, + SenderTVPaintCommands, + ProcessTVPaintCommands +) + +from .worker import main + +__all__ = ( + "JobFailed", + "ExecuteSimpleGeorgeScript", + "ExecuteGeorgeScript", + "CollectSceneData", + "SenderTVPaintCommands", + "ProcessTVPaintCommands", + + "main" +) diff --git a/openpype/hosts/tvpaint/worker/worker.py b/openpype/hosts/tvpaint/worker/worker.py new file mode 100644 index 0000000000..738656fa91 --- /dev/null +++ b/openpype/hosts/tvpaint/worker/worker.py @@ -0,0 +1,133 @@ +import signal +import time +import asyncio + +from avalon.tvpaint.communication_server import ( + BaseCommunicator, + CommunicationWrapper +) +from openpype_modules.job_queue.job_workers import WorkerJobsConnection + +from .worker_job import ProcessTVPaintCommands + + +class TVPaintWorkerCommunicator(BaseCommunicator): + """Modified commuicator which cares about processing jobs. + + Received jobs are send to TVPaint by parsing 'ProcessTVPaintCommands'. + """ + def __init__(self, server_url): + super().__init__() + + self.return_code = 1 + self._server_url = server_url + self._worker_connection = None + + def _start_webserver(self): + """Create connection to workers server before TVPaint server.""" + loop = self.websocket_server.loop + self._worker_connection = WorkerJobsConnection( + self._server_url, "tvpaint", loop + ) + asyncio.ensure_future( + self._worker_connection.main_loop(register_worker=False), + loop=loop + ) + + super()._start_webserver() + + def _on_client_connect(self, *args, **kwargs): + super()._on_client_connect(*args, **kwargs) + # Register as "ready to work" worker + self._worker_connection.register_as_worker() + + def stop(self): + """Stop worker connection and TVPaint server.""" + self._worker_connection.stop() + self.return_code = 0 + super().stop() + + @property + def current_job(self): + """Retrieve job which should be processed.""" + if self._worker_connection: + return self._worker_connection.current_job + return None + + def _check_process(self): + if self.process is None: + return True + + if self.process.poll() is not None: + asyncio.ensure_future( + self._worker_connection.disconnect(), + loop=self.websocket_server.loop + ) + self._exit() + return False + return True + + def _process_job(self): + job = self.current_job + if job is None: + return + + # Prepare variables used for sendig + success = False + message = "Unknown function" + data = None + job_data = job["data"] + workfile = job_data["workfile"] + # Currently can process only "commands" function + if job_data.get("function") == "commands": + try: + commands = ProcessTVPaintCommands( + workfile, job_data["commands"], self + ) + commands.execute() + data = commands.response_data() + success = True + message = "Executed" + + except Exception as exc: + message = "Error on worker: {}".format(str(exc)) + + self._worker_connection.finish_job(success, message, data) + + def main_loop(self): + """Main loop where jobs are processed. + + Server is stopped by killing this process or TVPaint process. + """ + while self.server_is_running: + if self._check_process(): + self._process_job() + time.sleep(1) + + return self.return_code + + +def _start_tvpaint(tvpaint_executable_path, server_url): + communicator = TVPaintWorkerCommunicator(server_url) + CommunicationWrapper.set_communicator(communicator) + communicator.launch([tvpaint_executable_path]) + + +def main(tvpaint_executable_path, server_url): + # Register terminal signal handler + def signal_handler(*_args): + print("Termination signal received. Stopping.") + if CommunicationWrapper.communicator is not None: + CommunicationWrapper.communicator.stop() + + signal.signal(signal.SIGINT, signal_handler) + signal.signal(signal.SIGTERM, signal_handler) + + _start_tvpaint(tvpaint_executable_path, server_url) + + communicator = CommunicationWrapper.communicator + if communicator is None: + print("Communicator is not set") + return 1 + + return communicator.main_loop() diff --git a/openpype/hosts/tvpaint/worker/worker_job.py b/openpype/hosts/tvpaint/worker/worker_job.py new file mode 100644 index 0000000000..c3893b6f2e --- /dev/null +++ b/openpype/hosts/tvpaint/worker/worker_job.py @@ -0,0 +1,537 @@ +import os +import tempfile +import inspect +import copy +import json +import time +from uuid import uuid4 +from abc import ABCMeta, abstractmethod, abstractproperty + +import six + +from openpype.api import PypeLogger +from openpype.modules import ModulesManager + + +TMP_FILE_PREFIX = "opw_tvp_" + + +class JobFailed(Exception): + """Raised when job was sent and finished unsuccessfully.""" + def __init__(self, job_status): + job_state = job_status["state"] + job_message = job_status["message"] or "Unknown issue" + error_msg = ( + "Job didn't finish properly." + " Job state: \"{}\" | Job message: \"{}\"" + ).format(job_state, job_message) + + self.job_status = job_status + + super().__init__(error_msg) + + +@six.add_metaclass(ABCMeta) +class BaseCommand: + """Abstract TVPaint command which can be executed through worker. + + Each command must have unique name and implemented 'execute' and + 'from_existing' methods. + + Command also have id which is created on command creation. + + The idea is that command is just a data container on sender side send + througth server to a worker where is replicated one by one, executed and + result sent back to sender through server. + """ + @abstractproperty + def name(self): + """Command name (must be unique).""" + pass + + def __init__(self, data=None): + if data is None: + data = {} + else: + data = copy.deepcopy(data) + + # Use 'id' from data when replicating on process side + command_id = data.get("id") + if command_id is None: + command_id = str(uuid4()) + data["id"] = command_id + data["command"] = self.name + + self._parent = None + self._result = None + self._command_data = data + self._done = False + + def job_queue_root(self): + """Access to job queue root. + + Job queue root is shared access point to files shared across senders + and workers. + """ + if self._parent is None: + return None + return self._parent.job_queue_root() + + def set_parent(self, parent): + self._parent = parent + + @property + def id(self): + """Command id.""" + return self._command_data["id"] + + @property + def parent(self): + """Parent of command expected type of 'TVPaintCommands'.""" + return self._parent + + @property + def communicator(self): + """TVPaint communicator. + + Available only on worker side. + """ + return self._parent.communicator + + @property + def done(self): + """Is command done.""" + return self._done + + def set_done(self): + """Change state of done.""" + self._done = True + + def set_result(self, result): + """Set result of executed command.""" + self._result = result + + def result(self): + """Result of command.""" + return copy.deepcopy(self._result) + + def response_data(self): + """Data send as response to sender.""" + return { + "id": self.id, + "result": self._result, + "done": self._done + } + + def command_data(self): + """Raw command data.""" + return copy.deepcopy(self._command_data) + + @abstractmethod + def execute(self): + """Execute command on worker side.""" + pass + + @classmethod + @abstractmethod + def from_existing(cls, data): + """Recreate object based on passed data.""" + pass + + def execute_george(self, george_script): + """Execute george script in TVPaint.""" + return self.parent.execute_george(george_script) + + def execute_george_through_file(self, george_script): + """Execute george script through temp file in TVPaint.""" + return self.parent.execute_george_through_file(george_script) + + +class ExecuteSimpleGeorgeScript(BaseCommand): + """Execute simple george script in TVPaint. + + Args: + script(str): Script that will be executed. + """ + name = "execute_george_simple" + + def __init__(self, script, data=None): + data = data or {} + data["script"] = script + self._script = script + super().__init__(data) + + def execute(self): + self._result = self.execute_george(self._script) + + @classmethod + def from_existing(cls, data): + script = data.pop("script") + return cls(script, data) + + +class ExecuteGeorgeScript(BaseCommand): + """Execute multiline george script in TVPaint. + + Args: + script_lines(list): Lines that will be executed in george script + through temp george file. + tmp_file_keys(list): List of formatting keys in george script that + require replacement with path to a temp file where result will be + stored. The content of file is stored to result by the key. + root_dir_key(str): Formatting key that will be replaced in george + script with job queue root which can be different on worker side. + data(dict): Raw data about command. + """ + name = "execute_george_through_file" + + def __init__( + self, script_lines, tmp_file_keys=None, root_dir_key=None, data=None + ): + data = data or {} + if not tmp_file_keys: + tmp_file_keys = data.get("tmp_file_keys") or [] + + data["script_lines"] = script_lines + data["tmp_file_keys"] = tmp_file_keys + data["root_dir_key"] = root_dir_key + self._script_lines = script_lines + self._tmp_file_keys = tmp_file_keys + self._root_dir_key = root_dir_key + super().__init__(data) + + def execute(self): + filepath_by_key = {} + script = self._script_lines + if isinstance(script, list): + script = "\n".join(script) + + # Replace temporary files in george script + for key in self._tmp_file_keys: + output_file = tempfile.NamedTemporaryFile( + mode="w", prefix=TMP_FILE_PREFIX, suffix=".txt", delete=False + ) + output_file.close() + format_key = "{" + key + "}" + output_path = output_file.name.replace("\\", "/") + script = script.replace(format_key, output_path) + filepath_by_key[key] = output_path + + # Replace job queue root in script + if self._root_dir_key: + job_queue_root = self.job_queue_root() + format_key = "{" + self._root_dir_key + "}" + script = script.replace( + format_key, job_queue_root.replace("\\", "/") + ) + + # Execute the script + self.execute_george_through_file(script) + + # Store result of temporary files + result = {} + for key, filepath in filepath_by_key.items(): + with open(filepath, "r") as stream: + data = stream.read() + result[key] = data + os.remove(filepath) + + self._result = result + + @classmethod + def from_existing(cls, data): + """Recreate the object from data.""" + script_lines = data.pop("script_lines") + tmp_file_keys = data.pop("tmp_file_keys", None) + root_dir_key = data.pop("root_dir_key", None) + return cls(script_lines, tmp_file_keys, root_dir_key, data) + + +class CollectSceneData(BaseCommand): + """Helper command which will collect all usefull info about workfile. + + Result is dictionary with all layers data, exposure frames by layer ids + pre/post behavior of layers by their ids, group information and scene data. + """ + name = "collect_scene_data" + + def execute(self): + from avalon.tvpaint.lib import ( + get_layers_data, + get_groups_data, + get_layers_pre_post_behavior, + get_layers_exposure_frames, + get_scene_data + ) + + groups_data = get_groups_data(communicator=self.communicator) + layers_data = get_layers_data(communicator=self.communicator) + layer_ids = [ + layer_data["layer_id"] + for layer_data in layers_data + ] + pre_post_beh_by_layer_id = get_layers_pre_post_behavior( + layer_ids, communicator=self.communicator + ) + exposure_frames_by_layer_id = get_layers_exposure_frames( + layer_ids, layers_data, communicator=self.communicator + ) + + self._result = { + "layers_data": layers_data, + "exposure_frames_by_layer_id": exposure_frames_by_layer_id, + "pre_post_beh_by_layer_id": pre_post_beh_by_layer_id, + "groups_data": groups_data, + "scene_data": get_scene_data(self.communicator) + } + + @classmethod + def from_existing(cls, data): + return cls(data) + + +@six.add_metaclass(ABCMeta) +class TVPaintCommands: + """Wrapper around TVPaint commands to be able send multiple commands. + + Commands may send one or multiple commands at once. Also gives api access + for commands info. + + Base for sender and receiver which are extending the logic for their + purposes. One of differences is preparation of workfile path. + + Args: + workfile(str): Path to workfile. + job_queue_module(JobQueueModule): Object of OpenPype module JobQueue. + """ + def __init__(self, workfile, job_queue_module=None): + self._log = None + self._commands = [] + self._command_classes_by_name = None + if job_queue_module is None: + manager = ModulesManager() + job_queue_module = manager.modules_by_name["job_queue"] + self._job_queue_module = job_queue_module + + self._workfile = self._prepare_workfile(workfile) + + @abstractmethod + def _prepare_workfile(self, workfile): + """Modification of workfile path on initialization to match platorm.""" + pass + + def job_queue_root(self): + """Job queue root for current platform using current settings.""" + return self._job_queue_module.get_jobs_root_from_settings() + + @property + def log(self): + """Access to logger object.""" + if self._log is None: + self._log = PypeLogger.get_logger(self.__class__.__name__) + return self._log + + @property + def classes_by_name(self): + """Prepare commands classes for validation and recreation of commands. + + It is expected that all commands are defined in this python file so + we're looking for all implementation of BaseCommand in globals. + """ + if self._command_classes_by_name is None: + command_classes_by_name = {} + for attr in globals().values(): + if ( + not inspect.isclass(attr) + or not issubclass(attr, BaseCommand) + or attr is BaseCommand + ): + continue + + if inspect.isabstract(attr): + self.log.debug( + "Skipping abstract class {}".format(attr.__name__) + ) + command_classes_by_name[attr.name] = attr + self._command_classes_by_name = command_classes_by_name + + return self._command_classes_by_name + + def add_command(self, command): + """Add command to process.""" + command.set_parent(self) + self._commands.append(command) + + def result(self): + """Result of commands in list in which they were processed.""" + return [ + command.result() + for command in self._commands + ] + + def response_data(self): + """Data which should be send from worker.""" + return [ + command.response_data() + for command in self._commands + ] + + +class SenderTVPaintCommands(TVPaintCommands): + """Sender implementation of TVPaint Commands.""" + def _prepare_workfile(self, workfile): + """Remove job queue root from workfile path. + + It is expected that worker will add it's root before passed workfile. + """ + new_workfile = workfile.replace("\\", "/") + job_queue_root = self.job_queue_root().replace("\\", "/") + if job_queue_root not in new_workfile: + raise ValueError(( + "Workfile is not located in JobQueue root." + " Workfile path: \"{}\". JobQueue root: \"{}\"" + ).format(workfile, job_queue_root)) + return new_workfile.replace(job_queue_root, "") + + def commands_data(self): + """Commands data to be able recreate them.""" + return [ + command.command_data() + for command in self._commands + ] + + def to_job_data(self): + """Convert commands to job data before sending to workers server.""" + return { + "workfile": self._workfile, + "function": "commands", + "commands": self.commands_data() + } + + def set_result(self, result): + commands_by_id = { + command.id: command + for command in self._commands + } + + for item in result: + command = commands_by_id[item["id"]] + command.set_result(item["result"]) + command.set_done() + + def _send_job(self): + """Send job to a workers server.""" + # Send job data to job queue server + job_data = self.to_job_data() + self.log.debug("Sending job to JobQueue server.\n{}".format( + json.dumps(job_data, indent=4) + )) + job_id = self._job_queue_module.send_job("tvpaint", job_data) + self.log.info(( + "Job sent to JobQueue server and got id \"{}\"." + " Waiting for finishing the job." + ).format(job_id)) + + return job_id + + def send_job_and_wait(self): + """Send job to workers server and wait for response. + + Result of job is stored into the object. + + Raises: + JobFailed: When job was finished but not successfully. + """ + job_id = self._send_job() + while True: + job_status = self._job_queue_module.get_job_status(job_id) + if job_status["done"]: + break + time.sleep(1) + + # Check if job state is done + if job_status["state"] != "done": + raise JobFailed(job_status) + + self.set_result(job_status["result"]) + + self.log.debug("Job is done and result is stored.") + + +class ProcessTVPaintCommands(TVPaintCommands): + """Worker side of TVPaint Commands. + + It is expected this object is created only on worker's side from existing + data loaded from job. + + Workfile path logic is based on 'SenderTVPaintCommands'. + """ + def __init__(self, workfile, commands, communicator): + super(ProcessTVPaintCommands, self).__init__(workfile) + + self._communicator = communicator + + self.commands_from_data(commands) + + def _prepare_workfile(self, workfile): + """Preprend job queue root before passed workfile.""" + workfile = workfile.replace("\\", "/") + job_queue_root = self.job_queue_root().replace("\\", "/") + new_workfile = "/".join([job_queue_root, workfile]) + while "//" in new_workfile: + new_workfile = new_workfile.replace("//", "/") + return os.path.normpath(new_workfile) + + @property + def communicator(self): + """Access to TVPaint communicator.""" + return self._communicator + + def commands_from_data(self, commands_data): + """Recreate command from passed data.""" + for command_data in commands_data: + command_name = command_data["command"] + + klass = self.classes_by_name[command_name] + command = klass.from_existing(command_data) + self.add_command(command) + + def execute_george(self, george_script): + """Helper method to execute george script.""" + return self.communicator.execute_george(george_script) + + def execute_george_through_file(self, george_script): + """Helper method to execute george script through temp file.""" + temporary_file = tempfile.NamedTemporaryFile( + mode="w", prefix=TMP_FILE_PREFIX, suffix=".grg", delete=False + ) + temporary_file.write(george_script) + temporary_file.close() + temp_file_path = temporary_file.name.replace("\\", "/") + self.execute_george("tv_runscript {}".format(temp_file_path)) + os.remove(temp_file_path) + + def _open_workfile(self): + """Open workfile in TVPaint.""" + workfile = self._workfile + print("Opening workfile {}".format(workfile)) + george_script = "tv_LoadProject '\"'\"{}\"'\"'".format(workfile) + self.execute_george_through_file(george_script) + + def _close_workfile(self): + """Close workfile in TVPaint.""" + print("Closing workfile") + self.execute_george_through_file("tv_projectclose") + + def execute(self): + """Execute commands.""" + # First open the workfile + self._open_workfile() + # Execute commands one by one + # TODO maybe stop processing when command fails? + print("Commands execution started ({})".format(len(self._commands))) + for command in self._commands: + command.execute() + command.set_done() + # Finally close workfile + self._close_workfile() diff --git a/openpype/hosts/webpublisher/plugins/publish/collect_tvpaint_instances.py b/openpype/hosts/webpublisher/plugins/publish/collect_tvpaint_instances.py new file mode 100644 index 0000000000..c533403e5f --- /dev/null +++ b/openpype/hosts/webpublisher/plugins/publish/collect_tvpaint_instances.py @@ -0,0 +1,255 @@ +""" +Requires: + CollectTVPaintWorkfileData + +Provides: + Instances +""" +import os +import re +import copy +import pyblish.api + +from openpype.lib import get_subset_name_with_asset_doc + + +class CollectTVPaintInstances(pyblish.api.ContextPlugin): + label = "Collect TVPaint Instances" + order = pyblish.api.CollectorOrder + 0.2 + hosts = ["webpublisher"] + targets = ["tvpaint_worker"] + + workfile_family = "workfile" + workfile_variant = "" + review_family = "review" + review_variant = "Main" + render_pass_family = "renderPass" + render_layer_family = "renderLayer" + render_layer_pass_name = "beauty" + + # Set by settings + # Regex must constain 'layer' and 'variant' groups which are extracted from + # name when instances are created + layer_name_regex = r"(?PL[0-9]{3}_\w+)_(?P.+)" + + def process(self, context): + # Prepare compiled regex + layer_name_regex = re.compile(self.layer_name_regex) + + layers_data = context.data["layersData"] + + host_name = "tvpaint" + task_name = context.data.get("task") + asset_doc = context.data["assetEntity"] + project_doc = context.data["projectEntity"] + project_name = project_doc["name"] + + new_instances = [] + + # Workfile instance + workfile_subset_name = get_subset_name_with_asset_doc( + self.workfile_family, + self.workfile_variant, + task_name, + asset_doc, + project_name, + host_name + ) + workfile_instance = self._create_workfile_instance( + context, workfile_subset_name + ) + new_instances.append(workfile_instance) + + # Review instance + review_subset_name = get_subset_name_with_asset_doc( + self.review_family, + self.review_variant, + task_name, + asset_doc, + project_name, + host_name + ) + review_instance = self._create_review_instance( + context, review_subset_name + ) + new_instances.append(review_instance) + + # Get render layers and passes from TVPaint layers + # - it's based on regex extraction + layers_by_layer_and_pass = {} + for layer in layers_data: + # Filter only visible layers + if not layer["visible"]: + continue + + result = layer_name_regex.search(layer["name"]) + # Layer name not matching layer name regex + # should raise an exception? + if result is None: + continue + render_layer = result.group("layer") + render_pass = result.group("pass") + + render_pass_maping = layers_by_layer_and_pass.get( + render_layer + ) + if render_pass_maping is None: + render_pass_maping = {} + layers_by_layer_and_pass[render_layer] = render_pass_maping + + if render_pass not in render_pass_maping: + render_pass_maping[render_pass] = [] + render_pass_maping[render_pass].append(copy.deepcopy(layer)) + + layers_by_render_layer = {} + for render_layer, render_passes in layers_by_layer_and_pass.items(): + render_layer_layers = [] + layers_by_render_layer[render_layer] = render_layer_layers + for render_pass, layers in render_passes.items(): + render_layer_layers.extend(copy.deepcopy(layers)) + dynamic_data = { + "render_pass": render_pass, + "render_layer": render_layer, + # Override family for subset name + "family": "render" + } + + subset_name = get_subset_name_with_asset_doc( + self.render_pass_family, + render_pass, + task_name, + asset_doc, + project_name, + host_name, + dynamic_data=dynamic_data + ) + + instance = self._create_render_pass_instance( + context, layers, subset_name + ) + new_instances.append(instance) + + for render_layer, layers in layers_by_render_layer.items(): + variant = render_layer + dynamic_data = { + "render_pass": self.render_layer_pass_name, + "render_layer": render_layer, + # Override family for subset name + "family": "render" + } + subset_name = get_subset_name_with_asset_doc( + self.render_pass_family, + variant, + task_name, + asset_doc, + project_name, + host_name, + dynamic_data=dynamic_data + ) + instance = self._create_render_layer_instance( + context, layers, subset_name + ) + new_instances.append(instance) + + # Set data same for all instances + frame_start = context.data.get("frameStart") + frame_end = context.data.get("frameEnd") + + for instance in new_instances: + if ( + instance.data.get("frameStart") is None + or instance.data.get("frameEnd") is None + ): + instance.data["frameStart"] = frame_start + instance.data["frameEnd"] = frame_end + + if instance.data.get("asset") is None: + instance.data["asset"] = asset_doc["name"] + + if instance.data.get("task") is None: + instance.data["task"] = task_name + + if "representations" not in instance.data: + instance.data["representations"] = [] + + if "source" not in instance.data: + instance.data["source"] = "webpublisher" + + def _create_workfile_instance(self, context, subset_name): + workfile_path = context.data["workfilePath"] + staging_dir = os.path.dirname(workfile_path) + filename = os.path.basename(workfile_path) + ext = os.path.splitext(filename)[-1] + + return context.create_instance(**{ + "name": subset_name, + "label": subset_name, + "subset": subset_name, + "family": self.workfile_family, + "families": [], + "stagingDir": staging_dir, + "representations": [{ + "name": ext.lstrip("."), + "ext": ext.lstrip("."), + "files": filename, + "stagingDir": staging_dir + }] + }) + + def _create_review_instance(self, context, subset_name): + staging_dir = self._create_staging_dir(context, subset_name) + layers_data = context.data["layersData"] + # Filter hidden layers + filtered_layers_data = [ + copy.deepcopy(layer) + for layer in layers_data + if layer["visible"] + ] + return context.create_instance(**{ + "name": subset_name, + "label": subset_name, + "subset": subset_name, + "family": self.review_family, + "families": [], + "layers": filtered_layers_data, + "stagingDir": staging_dir + }) + + def _create_render_pass_instance(self, context, layers, subset_name): + staging_dir = self._create_staging_dir(context, subset_name) + # Global instance data modifications + # Fill families + return context.create_instance(**{ + "name": subset_name, + "subset": subset_name, + "label": subset_name, + "family": self.render_pass_family, + # Add `review` family for thumbnail integration + "families": [self.render_pass_family, "review"], + "representations": [], + "layers": layers, + "stagingDir": staging_dir + }) + + def _create_render_layer_instance(self, context, layers, subset_name): + staging_dir = self._create_staging_dir(context, subset_name) + # Global instance data modifications + # Fill families + return context.create_instance(**{ + "name": subset_name, + "subset": subset_name, + "label": subset_name, + "family": self.render_pass_family, + # Add `review` family for thumbnail integration + "families": [self.render_pass_family, "review"], + "representations": [], + "layers": layers, + "stagingDir": staging_dir + }) + + def _create_staging_dir(self, context, subset_name): + context_staging_dir = context.data["contextStagingDir"] + staging_dir = os.path.join(context_staging_dir, subset_name) + if not os.path.exists(staging_dir): + os.makedirs(staging_dir) + return staging_dir diff --git a/openpype/hosts/webpublisher/plugins/publish/collect_tvpaint_workfile_data.py b/openpype/hosts/webpublisher/plugins/publish/collect_tvpaint_workfile_data.py new file mode 100644 index 0000000000..f0f29260a2 --- /dev/null +++ b/openpype/hosts/webpublisher/plugins/publish/collect_tvpaint_workfile_data.py @@ -0,0 +1,142 @@ +""" +Requires: + CollectPublishedFiles + CollectModules + +Provides: + workfilePath - Path to tvpaint workfile + sceneData - Scene data loaded from the workfile + groupsData - + layersData + layersExposureFrames + layersPrePostBehavior +""" +import os +import uuid +import json +import shutil +import pyblish.api +from openpype.lib.plugin_tools import parse_json +from openpype.hosts.tvpaint.worker import ( + SenderTVPaintCommands, + CollectSceneData +) + + +class CollectTVPaintWorkfileData(pyblish.api.ContextPlugin): + label = "Collect TVPaint Workfile data" + order = pyblish.api.CollectorOrder - 0.4 + hosts = ["webpublisher"] + targets = ["tvpaint_worker"] + + def process(self, context): + # Get JobQueue module + modules = context.data["openPypeModules"] + job_queue_module = modules["job_queue"] + jobs_root = job_queue_module.get_jobs_root() + if not jobs_root: + raise ValueError("Job Queue root is not set.") + + context.data["jobsRoot"] = jobs_root + + context_staging_dir = self._create_context_staging_dir(jobs_root) + workfile_path = self._extract_workfile_path( + context, context_staging_dir + ) + context.data["contextStagingDir"] = context_staging_dir + context.data["workfilePath"] = workfile_path + + # Prepare tvpaint command + collect_scene_data_command = CollectSceneData() + # Create TVPaint sender commands + commands = SenderTVPaintCommands(workfile_path, job_queue_module) + commands.add_command(collect_scene_data_command) + + # Send job and wait for answer + commands.send_job_and_wait() + + collected_data = collect_scene_data_command.result() + layers_data = collected_data["layers_data"] + groups_data = collected_data["groups_data"] + scene_data = collected_data["scene_data"] + exposure_frames_by_layer_id = ( + collected_data["exposure_frames_by_layer_id"] + ) + pre_post_beh_by_layer_id = ( + collected_data["pre_post_beh_by_layer_id"] + ) + + # Store results + # scene data store the same way as TVPaint collector + scene_data = { + "sceneWidth": scene_data["width"], + "sceneHeight": scene_data["height"], + "scenePixelAspect": scene_data["pixel_aspect"], + "sceneFps": scene_data["fps"], + "sceneFieldOrder": scene_data["field_order"], + "sceneMarkIn": scene_data["mark_in"], + # scene_data["mark_in_state"], + "sceneMarkInState": scene_data["mark_in_set"], + "sceneMarkOut": scene_data["mark_out"], + # scene_data["mark_out_state"], + "sceneMarkOutState": scene_data["mark_out_set"], + "sceneStartFrame": scene_data["start_frame"], + "sceneBgColor": scene_data["bg_color"] + } + context.data["sceneData"] = scene_data + # Store only raw data + context.data["groupsData"] = groups_data + context.data["layersData"] = layers_data + context.data["layersExposureFrames"] = exposure_frames_by_layer_id + context.data["layersPrePostBehavior"] = pre_post_beh_by_layer_id + + self.log.debug( + ( + "Collected data" + "\nScene data: {}" + "\nLayers data: {}" + "\nExposure frames: {}" + "\nPre/Post behavior: {}" + ).format( + json.dumps(scene_data, indent=4), + json.dumps(layers_data, indent=4), + json.dumps(exposure_frames_by_layer_id, indent=4), + json.dumps(pre_post_beh_by_layer_id, indent=4) + ) + ) + + def _create_context_staging_dir(self, jobs_root): + if not os.path.exists(jobs_root): + os.makedirs(jobs_root) + + random_folder_name = str(uuid.uuid4()) + full_path = os.path.join(jobs_root, random_folder_name) + if not os.path.exists(full_path): + os.makedirs(full_path) + return full_path + + def _extract_workfile_path(self, context, context_staging_dir): + """Find first TVPaint file in tasks and use it.""" + batch_dir = context.data["batchDir"] + batch_data = context.data["batchData"] + src_workfile_path = None + for task_id in batch_data["tasks"]: + if src_workfile_path is not None: + break + task_dir = os.path.join(batch_dir, task_id) + task_manifest_path = os.path.join(task_dir, "manifest.json") + task_data = parse_json(task_manifest_path) + task_files = task_data["files"] + for filename in task_files: + _, ext = os.path.splitext(filename) + if ext.lower() == ".tvpp": + src_workfile_path = os.path.join(task_dir, filename) + break + + # Copy workfile to job queue work root + new_workfile_path = os.path.join( + context_staging_dir, os.path.basename(src_workfile_path) + ) + shutil.copy(src_workfile_path, new_workfile_path) + + return new_workfile_path diff --git a/openpype/hosts/webpublisher/plugins/publish/extract_tvpaint_workfile.py b/openpype/hosts/webpublisher/plugins/publish/extract_tvpaint_workfile.py new file mode 100644 index 0000000000..85c8526c83 --- /dev/null +++ b/openpype/hosts/webpublisher/plugins/publish/extract_tvpaint_workfile.py @@ -0,0 +1,535 @@ +import os +import copy + +from openpype.hosts.tvpaint.worker import ( + SenderTVPaintCommands, + ExecuteSimpleGeorgeScript, + ExecuteGeorgeScript +) + +import pyblish.api +from openpype.hosts.tvpaint.lib import ( + calculate_layers_extraction_data, + get_frame_filename_template, + fill_reference_frames, + composite_rendered_layers, + rename_filepaths_by_frame_start +) +from PIL import Image + + +class ExtractTVPaintSequences(pyblish.api.Extractor): + label = "Extract TVPaint Sequences" + hosts = ["webpublisher"] + targets = ["tvpaint_worker"] + + # Context plugin does not have families filtering + families_filter = ["review", "renderPass", "renderLayer"] + + job_queue_root_key = "jobs_root" + + # Modifiable with settings + review_bg = [255, 255, 255, 255] + + def process(self, context): + # Get workfle path + workfile_path = context.data["workfilePath"] + jobs_root = context.data["jobsRoot"] + jobs_root_slashed = jobs_root.replace("\\", "/") + + # Prepare scene data + scene_data = context.data["sceneData"] + scene_mark_in = scene_data["sceneMarkIn"] + scene_mark_out = scene_data["sceneMarkOut"] + scene_start_frame = scene_data["sceneStartFrame"] + scene_bg_color = scene_data["sceneBgColor"] + + # Prepare layers behavior + behavior_by_layer_id = context.data["layersPrePostBehavior"] + exposure_frames_by_layer_id = context.data["layersExposureFrames"] + + # Handles are not stored per instance but on Context + handle_start = context.data["handleStart"] + handle_end = context.data["handleEnd"] + + # Get JobQueue module + modules = context.data["openPypeModules"] + job_queue_module = modules["job_queue"] + + tvpaint_commands = SenderTVPaintCommands( + workfile_path, job_queue_module + ) + + # Change scene Start Frame to 0 to prevent frame index issues + # - issue is that TVPaint versions deal with frame indexes in a + # different way when Start Frame is not `0` + # NOTE It will be set back after rendering + tvpaint_commands.add_command( + ExecuteSimpleGeorgeScript("tv_startframe 0") + ) + + root_key_replacement = "{" + self.job_queue_root_key + "}" + after_render_instances = [] + for instance in context: + instance_families = set(instance.data.get("families", [])) + instance_families.add(instance.data["family"]) + valid = False + for family in instance_families: + if family in self.families_filter: + valid = True + break + + if not valid: + continue + + self.log.info("* Preparing commands for instance \"{}\"".format( + instance.data["label"] + )) + # Get all layers and filter out not visible + layers = instance.data["layers"] + filtered_layers = [layer for layer in layers if layer["visible"]] + if not filtered_layers: + self.log.info( + "None of the layers from the instance" + " are visible. Extraction skipped." + ) + continue + + joined_layer_names = ", ".join([ + "\"{}\"".format(str(layer["name"])) + for layer in filtered_layers + ]) + self.log.debug( + "Instance has {} layers with names: {}".format( + len(filtered_layers), joined_layer_names + ) + ) + + # Staging dir must be created during collection + staging_dir = instance.data["stagingDir"].replace("\\", "/") + + job_root_template = staging_dir.replace( + jobs_root_slashed, root_key_replacement + ) + + # Frame start/end may be stored as float + frame_start = int(instance.data["frameStart"]) + frame_end = int(instance.data["frameEnd"]) + + # Prepare output frames + output_frame_start = frame_start - handle_start + output_frame_end = frame_end + handle_end + + # Change output frame start to 0 if handles cause it's negative + # number + if output_frame_start < 0: + self.log.warning(( + "Frame start with handles has negative value." + " Changed to \"0\". Frames start: {}, Handle Start: {}" + ).format(frame_start, handle_start)) + output_frame_start = 0 + + # Create copy of scene Mark In/Out + mark_in, mark_out = scene_mark_in, scene_mark_out + + # Fix possible changes of output frame + mark_out, output_frame_end = self._fix_range_changes( + mark_in, mark_out, output_frame_start, output_frame_end + ) + filename_template = get_frame_filename_template( + max(scene_mark_out, output_frame_end) + ) + + # ----------------------------------------------------------------- + self.log.debug( + "Files will be rendered to folder: {}".format(staging_dir) + ) + + output_filepaths_by_frame_idx = {} + for frame_idx in range(mark_in, mark_out + 1): + filename = filename_template.format(frame=frame_idx) + filepath = os.path.join(staging_dir, filename) + output_filepaths_by_frame_idx[frame_idx] = filepath + + # Prepare data for post render processing + post_render_data = { + "output_dir": staging_dir, + "layers": filtered_layers, + "output_filepaths_by_frame_idx": output_filepaths_by_frame_idx, + "instance": instance, + "is_layers_render": False, + "output_frame_start": output_frame_start, + "output_frame_end": output_frame_end + } + # Store them to list + after_render_instances.append(post_render_data) + + # Review rendering + if instance.data["family"] == "review": + self.add_render_review_command( + tvpaint_commands, mark_in, mark_out, scene_bg_color, + job_root_template, filename_template + ) + continue + + # Layers rendering + extraction_data_by_layer_id = calculate_layers_extraction_data( + filtered_layers, + exposure_frames_by_layer_id, + behavior_by_layer_id, + mark_in, + mark_out + ) + filepaths_by_layer_id = self.add_render_command( + tvpaint_commands, + job_root_template, + staging_dir, + filtered_layers, + extraction_data_by_layer_id + ) + # Add more data to post render processing + post_render_data.update({ + "is_layers_render": True, + "extraction_data_by_layer_id": extraction_data_by_layer_id, + "filepaths_by_layer_id": filepaths_by_layer_id + }) + + # Change scene frame Start back to previous value + tvpaint_commands.add_command( + ExecuteSimpleGeorgeScript( + "tv_startframe {}".format(scene_start_frame) + ) + ) + self.log.info("Sending the job and waiting for response...") + tvpaint_commands.send_job_and_wait() + self.log.info("Render job finished") + + for post_render_data in after_render_instances: + self._post_render_processing(post_render_data, mark_in, mark_out) + + def _fix_range_changes( + self, mark_in, mark_out, output_frame_start, output_frame_end + ): + # Check Marks range and output range + output_range = output_frame_end - output_frame_start + marks_range = mark_out - mark_in + + # Lower Mark Out if mark range is bigger than output + # - do not rendered not used frames + if output_range < marks_range: + new_mark_out = mark_out - (marks_range - output_range) + self.log.warning(( + "Lowering render range to {} frames. Changed Mark Out {} -> {}" + ).format(marks_range + 1, mark_out, new_mark_out)) + # Assign new mark out to variable + mark_out = new_mark_out + + # Lower output frame end so representation has right `frameEnd` value + elif output_range > marks_range: + new_output_frame_end = ( + output_frame_end - (output_range - marks_range) + ) + self.log.warning(( + "Lowering representation range to {} frames." + " Changed frame end {} -> {}" + ).format(output_range + 1, mark_out, new_output_frame_end)) + output_frame_end = new_output_frame_end + return mark_out, output_frame_end + + def _post_render_processing(self, post_render_data, mark_in, mark_out): + # Unpack values + instance = post_render_data["instance"] + output_filepaths_by_frame_idx = ( + post_render_data["output_filepaths_by_frame_idx"] + ) + is_layers_render = post_render_data["is_layers_render"] + output_dir = post_render_data["output_dir"] + layers = post_render_data["layers"] + output_frame_start = post_render_data["output_frame_start"] + output_frame_end = post_render_data["output_frame_end"] + + # Trigger post processing of layers rendering + # - only few frames were rendered this will complete the sequence + # - multiple layers can be in single instance they must be composite + # over each other + if is_layers_render: + self._finish_layer_render( + layers, + post_render_data["extraction_data_by_layer_id"], + post_render_data["filepaths_by_layer_id"], + mark_in, + mark_out, + output_filepaths_by_frame_idx + ) + + # Create thumbnail + thumbnail_filepath = os.path.join(output_dir, "thumbnail.jpg") + thumbnail_src_path = output_filepaths_by_frame_idx[mark_in] + self._create_thumbnail(thumbnail_src_path, thumbnail_filepath) + + # Rename filepaths to final frames + repre_files = self._rename_output_files( + output_filepaths_by_frame_idx, + mark_in, + mark_out, + output_frame_start + ) + + # Fill tags and new families + family_lowered = instance.data["family"].lower() + tags = [] + if family_lowered in ("review", "renderlayer"): + tags.append("review") + + # Sequence of one frame + single_file = len(repre_files) == 1 + if single_file: + repre_files = repre_files[0] + + # Extension is harcoded + # - changing extension would require change code + new_repre = { + "name": "png", + "ext": "png", + "files": repre_files, + "stagingDir": output_dir, + "tags": tags + } + + if not single_file: + new_repre["frameStart"] = output_frame_start + new_repre["frameEnd"] = output_frame_end + + self.log.debug("Creating new representation: {}".format(new_repre)) + + instance.data["representations"].append(new_repre) + + if family_lowered in ("renderpass", "renderlayer"): + # Change family to render + instance.data["family"] = "render" + + thumbnail_ext = os.path.splitext(thumbnail_filepath)[1] + # Create thumbnail representation + thumbnail_repre = { + "name": "thumbnail", + "ext": thumbnail_ext.replace(".", ""), + "outputName": "thumb", + "files": os.path.basename(thumbnail_filepath), + "stagingDir": output_dir, + "tags": ["thumbnail"] + } + instance.data["representations"].append(thumbnail_repre) + + def _rename_output_files( + self, filepaths_by_frame, mark_in, mark_out, output_frame_start + ): + new_filepaths_by_frame = rename_filepaths_by_frame_start( + filepaths_by_frame, mark_in, mark_out, output_frame_start + ) + + repre_filenames = [] + for filepath in new_filepaths_by_frame.values(): + repre_filenames.append(os.path.basename(filepath)) + + if mark_in < output_frame_start: + repre_filenames = list(reversed(repre_filenames)) + + return repre_filenames + + def add_render_review_command( + self, + tvpaint_commands, + mark_in, + mark_out, + scene_bg_color, + job_root_template, + filename_template + ): + """ Export images from TVPaint using `tv_savesequence` command. + + Args: + output_dir (str): Directory where files will be stored. + mark_in (int): Starting frame index from which export will begin. + mark_out (int): On which frame index export will end. + scene_bg_color (list): Bg color set in scene. Result of george + script command `tv_background`. + """ + self.log.debug("Preparing data for rendering.") + bg_color = self._get_review_bg_color() + first_frame_filepath = "/".join([ + job_root_template, + filename_template.format(frame=mark_in) + ]) + + george_script_lines = [ + # Change bg color to color from settings + "tv_background \"color\" {} {} {}".format(*bg_color), + "tv_SaveMode \"PNG\"", + "export_path = \"{}\"".format( + first_frame_filepath.replace("\\", "/") + ), + "tv_savesequence '\"'export_path'\"' {} {}".format( + mark_in, mark_out + ) + ] + if scene_bg_color: + # Change bg color back to previous scene bg color + _scene_bg_color = copy.deepcopy(scene_bg_color) + bg_type = _scene_bg_color.pop(0) + orig_color_command = [ + "tv_background", + "\"{}\"".format(bg_type) + ] + orig_color_command.extend(_scene_bg_color) + + george_script_lines.append(" ".join(orig_color_command)) + + tvpaint_commands.add_command( + ExecuteGeorgeScript( + george_script_lines, + root_dir_key=self.job_queue_root_key + ) + ) + + def add_render_command( + self, + tvpaint_commands, + job_root_template, + staging_dir, + layers, + extraction_data_by_layer_id + ): + """ Export images from TVPaint. + + Args: + output_dir (str): Directory where files will be stored. + mark_in (int): Starting frame index from which export will begin. + mark_out (int): On which frame index export will end. + layers (list): List of layers to be exported. + + Retruns: + tuple: With 2 items first is list of filenames second is path to + thumbnail. + """ + # Map layers by position + layers_by_id = { + layer["layer_id"]: layer + for layer in layers + } + + # Render layers + filepaths_by_layer_id = {} + for layer_id, render_data in extraction_data_by_layer_id.items(): + layer = layers_by_id[layer_id] + frame_references = render_data["frame_references"] + filenames_by_frame_index = render_data["filenames_by_frame_index"] + + filepaths_by_frame = {} + command_filepath_by_frame = {} + for frame_idx, ref_idx in frame_references.items(): + # None reference is skipped because does not have source + if ref_idx is None: + filepaths_by_frame[frame_idx] = None + continue + filename = filenames_by_frame_index[frame_idx] + + filepaths_by_frame[frame_idx] = os.path.join( + staging_dir, filename + ) + if frame_idx == ref_idx: + command_filepath_by_frame[frame_idx] = "/".join( + [job_root_template, filename] + ) + + self._add_render_layer_command( + tvpaint_commands, layer, command_filepath_by_frame + ) + filepaths_by_layer_id[layer_id] = filepaths_by_frame + + return filepaths_by_layer_id + + def _add_render_layer_command( + self, tvpaint_commands, layer, filepaths_by_frame + ): + george_script_lines = [ + # Set current layer by position + "tv_layergetid {}".format(layer["position"]), + "layer_id = result", + "tv_layerset layer_id", + "tv_SaveMode \"PNG\"" + ] + + for frame_idx, filepath in filepaths_by_frame.items(): + if filepath is None: + continue + + # Go to frame + george_script_lines.append("tv_layerImage {}".format(frame_idx)) + # Store image to output + george_script_lines.append( + "tv_saveimage \"{}\"".format(filepath.replace("\\", "/")) + ) + + tvpaint_commands.add_command( + ExecuteGeorgeScript( + george_script_lines, + root_dir_key=self.job_queue_root_key + ) + ) + + def _finish_layer_render( + self, + layers, + extraction_data_by_layer_id, + filepaths_by_layer_id, + mark_in, + mark_out, + output_filepaths_by_frame_idx + ): + # Fill frames between `frame_start_index` and `frame_end_index` + self.log.debug("Filling frames not rendered frames.") + for layer_id, render_data in extraction_data_by_layer_id.items(): + frame_references = render_data["frame_references"] + filepaths_by_frame = filepaths_by_layer_id[layer_id] + fill_reference_frames(frame_references, filepaths_by_frame) + + # Prepare final filepaths where compositing should store result + self.log.info("Started compositing of layer frames.") + composite_rendered_layers( + layers, filepaths_by_layer_id, + mark_in, mark_out, + output_filepaths_by_frame_idx + ) + + def _create_thumbnail(self, thumbnail_src_path, thumbnail_filepath): + if not os.path.exists(thumbnail_src_path): + return + + source_img = Image.open(thumbnail_src_path) + + # Composite background only on rgba images + # - just making sure + if source_img.mode.lower() == "rgba": + bg_color = self._get_review_bg_color() + self.log.debug("Adding thumbnail background color {}.".format( + " ".join([str(val) for val in bg_color]) + )) + bg_image = Image.new("RGBA", source_img.size, bg_color) + thumbnail_obj = Image.alpha_composite(bg_image, source_img) + thumbnail_obj.convert("RGB").save(thumbnail_filepath) + + else: + self.log.info(( + "Source for thumbnail has mode \"{}\" (Expected: RGBA)." + " Can't use thubmanail background color." + ).format(source_img.mode)) + source_img.save(thumbnail_filepath) + + def _get_review_bg_color(self): + red = green = blue = 255 + if self.review_bg: + if len(self.review_bg) == 4: + red, green, blue, _ = self.review_bg + elif len(self.review_bg) == 3: + red, green, blue = self.review_bg + return (red, green, blue) diff --git a/openpype/hosts/webpublisher/plugins/publish/others_cleanup_job_root.py b/openpype/hosts/webpublisher/plugins/publish/others_cleanup_job_root.py new file mode 100644 index 0000000000..fc5cd1ea9a --- /dev/null +++ b/openpype/hosts/webpublisher/plugins/publish/others_cleanup_job_root.py @@ -0,0 +1,31 @@ +# -*- coding: utf-8 -*- +"""Cleanup leftover files from publish.""" +import os +import shutil +import pyblish.api + + +class CleanUpJobRoot(pyblish.api.ContextPlugin): + """Cleans up the job root directory after a successful publish. + + Remove all files in job root as all of them should be published. + """ + + order = pyblish.api.IntegratorOrder + 1 + label = "Clean Up Job Root" + optional = True + active = True + + def process(self, context): + context_staging_dir = context.data.get("contextStagingDir") + if not context_staging_dir: + self.log.info("Key 'contextStagingDir' is empty.") + + elif not os.path.exists(context_staging_dir): + self.log.info(( + "Job root directory for this publish does not" + " exists anymore \"{}\"." + ).format(context_staging_dir)) + else: + self.log.info("Deleting job root with all files.") + shutil.rmtree(context_staging_dir) diff --git a/openpype/hosts/webpublisher/plugins/publish/validate_tvpaint_workfile_data.py b/openpype/hosts/webpublisher/plugins/publish/validate_tvpaint_workfile_data.py new file mode 100644 index 0000000000..eec6ef1004 --- /dev/null +++ b/openpype/hosts/webpublisher/plugins/publish/validate_tvpaint_workfile_data.py @@ -0,0 +1,35 @@ +import pyblish.api + + +class ValidateWorkfileData(pyblish.api.ContextPlugin): + """Validate mark in and out are enabled and it's duration. + + Mark In/Out does not have to match frameStart and frameEnd but duration is + important. + """ + + label = "Validate Workfile Data" + order = pyblish.api.ValidatorOrder + + def process(self, context): + # Data collected in `CollectAvalonEntities` + frame_start = context.data["frameStart"] + frame_end = context.data["frameEnd"] + handle_start = context.data["handleStart"] + handle_end = context.data["handleEnd"] + + scene_data = context.data["sceneData"] + scene_mark_in = scene_data["sceneMarkIn"] + scene_mark_out = scene_data["sceneMarkOut"] + + expected_range = ( + (frame_end - frame_start + 1) + + handle_start + + handle_end + ) + marks_range = scene_mark_out - scene_mark_in + 1 + if expected_range != marks_range: + raise AssertionError(( + "Wrong Mark In/Out range." + " Expected range is {} frames got {} frames" + ).format(expected_range, marks_range)) diff --git a/openpype/hosts/webpublisher/webserver_service/webpublish_routes.py b/openpype/hosts/webpublisher/webserver_service/webpublish_routes.py index d474c96ff9..30399a6ba7 100644 --- a/openpype/hosts/webpublisher/webserver_service/webpublish_routes.py +++ b/openpype/hosts/webpublisher/webserver_service/webpublish_routes.py @@ -198,6 +198,15 @@ class WebpublisherBatchPublishEndpoint(_RestApiEndpoint): # - filter defines command and can extend arguments dictionary # This is used only if 'studio_processing' is enabled on batch studio_processing_filters = [ + # TVPaint filter + { + "extensions": [".tvpp"], + "command": "remotepublish", + "arguments": { + "targets": ["tvpaint_worker"] + }, + "add_to_queue": False + }, # Photoshop filter { "extensions": [".psd", ".psb"], diff --git a/openpype/lib/anatomy.py b/openpype/lib/anatomy.py index 7a4a55363c..66ecbd66d1 100644 --- a/openpype/lib/anatomy.py +++ b/openpype/lib/anatomy.py @@ -89,8 +89,10 @@ class Anatomy: self.project_name = project_name - self._data = get_anatomy_settings(project_name, site_name) - + self._data = self._prepare_anatomy_data( + get_anatomy_settings(project_name, site_name) + ) + self._site_name = site_name self._templates_obj = Templates(self) self._roots_obj = Roots(self) @@ -121,9 +123,36 @@ class Anatomy: """ return get_default_anatomy_settings(clear_metadata=False) + @staticmethod + def _prepare_anatomy_data(anatomy_data): + """Prepare anatomy data for futher processing. + + Method added to replace `{task}` with `{task[name]}` in templates. + """ + templates_data = anatomy_data.get("templates") + if templates_data: + # Replace `{task}` with `{task[name]}` in templates + value_queue = collections.deque() + value_queue.append(templates_data) + while value_queue: + item = value_queue.popleft() + if not isinstance(item, dict): + continue + + for key in tuple(item.keys()): + value = item[key] + if isinstance(value, dict): + value_queue.append(value) + + elif isinstance(value, StringType): + item[key] = value.replace("{task}", "{task[name]}") + return anatomy_data + def reset(self): """Reset values of cached data in templates and roots objects.""" - self._data = get_anatomy_settings(self.project_name) + self._data = self._prepare_anatomy_data( + get_anatomy_settings(self.project_name, self._site_name) + ) self.templates_obj.reset() self.roots_obj.reset() @@ -981,6 +1010,14 @@ class Templates: TemplateResult: Filled or partially filled template containing all data needed or missing for filling template. """ + task_data = data.get("task") + if ( + isinstance(task_data, StringType) + and "{task[name]}" in orig_template + ): + # Change task to dictionary if template expect dictionary + data["task"] = {"name": task_data} + template, missing_optional, invalid_optional = ( self._filter_optional(orig_template, data) ) @@ -989,6 +1026,7 @@ class Templates: invalid_required = [] missing_required = [] replace_keys = [] + for group in self.key_pattern.findall(template): orig_key = group[1:-1] key = str(orig_key) @@ -1074,6 +1112,10 @@ class Templates: output = collections.defaultdict(dict) for key, orig_value in templates.items(): if isinstance(orig_value, StringType): + # Replace {task} by '{task[name]}' for backward compatibility + if '{task}' in orig_value: + orig_value = orig_value.replace('{task}', '{task[name]}') + output[key] = self._format(orig_value, data) continue diff --git a/openpype/lib/applications.py b/openpype/lib/applications.py index b9bcecd3a0..30be92e886 100644 --- a/openpype/lib/applications.py +++ b/openpype/lib/applications.py @@ -1280,23 +1280,12 @@ def prepare_context_environments(data): anatomy = data["anatomy"] - asset_tasks = asset_doc.get("data", {}).get("tasks") or {} - task_info = asset_tasks.get(task_name) or {} - task_type = task_info.get("type") + task_type = workdir_data["task"]["type"] # Temp solution how to pass task type to `_prepare_last_workfile` data["task_type"] = task_type - workfile_template_key = get_workfile_template_key( - task_type, - app.host_name, - project_name=project_name, - project_settings=project_settings - ) - try: - workdir = get_workdir_with_workdir_data( - workdir_data, anatomy, template_key=workfile_template_key - ) + workdir = get_workdir_with_workdir_data(workdir_data, anatomy) except Exception as exc: raise ApplicationLaunchFailed( @@ -1329,10 +1318,10 @@ def prepare_context_environments(data): ) data["env"].update(context_env) - _prepare_last_workfile(data, workdir, workfile_template_key) + _prepare_last_workfile(data, workdir) -def _prepare_last_workfile(data, workdir, workfile_template_key): +def _prepare_last_workfile(data, workdir): """last workfile workflow preparation. Function check if should care about last workfile workflow and tries @@ -1395,6 +1384,10 @@ def _prepare_last_workfile(data, workdir, workfile_template_key): anatomy = data["anatomy"] # Find last workfile file_template = anatomy.templates["work"]["file"] + # Replace {task} by '{task[name]}' for backward compatibility + if '{task}' in file_template: + file_template = file_template.replace('{task}', '{task[name]}') + workdir_data.update({ "version": 1, "user": get_openpype_username(), diff --git a/openpype/lib/avalon_context.py b/openpype/lib/avalon_context.py index b043cbfdb4..a8340d7d09 100644 --- a/openpype/lib/avalon_context.py +++ b/openpype/lib/avalon_context.py @@ -7,6 +7,7 @@ import platform import logging import collections import functools +import getpass from openpype.settings import get_project_settings from .anatomy import Anatomy @@ -257,19 +258,48 @@ def get_hierarchy(asset_name=None): return "/".join(hierarchy_items) -@with_avalon -def get_linked_assets(asset_entity): - """Return linked assets for `asset_entity` from DB +def get_linked_asset_ids(asset_doc): + """Return linked asset ids for `asset_doc` from DB - Args: - asset_entity (dict): asset document from DB + Args: + asset_doc (dict): Asset document from DB. - Returns: - (list) of MongoDB documents + Returns: + (list): MongoDB ids of input links. """ - inputs = asset_entity["data"].get("inputs", []) - inputs = [avalon.io.find_one({"_id": x}) for x in inputs] - return inputs + output = [] + if not asset_doc: + return output + + input_links = asset_doc["data"].get("inputLinks") or [] + if input_links: + for item in input_links: + # Backwards compatibility for "_id" key which was replaced with + # "id" + if "_id" in item: + link_id = item["_id"] + else: + link_id = item["id"] + output.append(link_id) + + return output + + +@with_avalon +def get_linked_assets(asset_doc): + """Return linked assets for `asset_doc` from DB + + Args: + asset_doc (dict): Asset document from DB + + Returns: + (list) Asset documents of input links for passed asset doc. + """ + link_ids = get_linked_asset_ids(asset_doc) + if not link_ids: + return [] + + return list(avalon.io.find({"_id": {"$in": link_ids}})) @with_avalon @@ -464,6 +494,7 @@ def get_workfile_template_key( return default +# TODO rename function as is not just "work" specific def get_workdir_data(project_doc, asset_doc, task_name, host_name): """Prepare data for workdir template filling from entered information. @@ -479,22 +510,31 @@ def get_workdir_data(project_doc, asset_doc, task_name, host_name): """ hierarchy = "/".join(asset_doc["data"]["parents"]) + task_type = asset_doc['data']['tasks'].get(task_name, {}).get('type') + + project_task_types = project_doc["config"]["tasks"] + task_code = project_task_types.get(task_type, {}).get("short_name") + data = { "project": { "name": project_doc["name"], "code": project_doc["data"].get("code") }, - "task": task_name, + "task": { + "name": task_name, + "type": task_type, + "short": task_code, + }, "asset": asset_doc["name"], "app": host_name, - "hierarchy": hierarchy + "user": getpass.getuser(), + "hierarchy": hierarchy, } return data def get_workdir_with_workdir_data( - workdir_data, anatomy=None, project_name=None, - template_key=None, dbcon=None + workdir_data, anatomy=None, project_name=None, template_key=None ): """Fill workdir path from entered data and project's anatomy. @@ -529,12 +569,10 @@ def get_workdir_with_workdir_data( anatomy = Anatomy(project_name) if not template_key: - template_key = get_workfile_template_key_from_context( - workdir_data["asset"], - workdir_data["task"], + template_key = get_workfile_template_key( + workdir_data["task"]["type"], workdir_data["app"], - project_name=workdir_data["project"]["name"], - dbcon=dbcon + project_name=workdir_data["project"]["name"] ) anatomy_filled = anatomy.format(workdir_data) @@ -648,7 +686,7 @@ def create_workfile_doc(asset_doc, task_name, filename, workdir, dbcon=None): anatomy = Anatomy(project_doc["name"]) # Get workdir path (result is anatomy.TemplateResult) template_workdir = get_workdir_with_workdir_data( - workdir_data, anatomy, dbcon=dbcon + workdir_data, anatomy ) template_workdir_path = str(template_workdir).replace("\\", "/") diff --git a/openpype/lib/delivery.py b/openpype/lib/delivery.py index c89e2e7ae0..01fcc907ed 100644 --- a/openpype/lib/delivery.py +++ b/openpype/lib/delivery.py @@ -60,12 +60,13 @@ def path_from_representation(representation, anatomy): path = pipeline.format_template_with_optional_keys( context, template ) + path = os.path.normpath(path.replace("/", "\\")) except KeyError: # Template references unavailable data return None - return os.path.normpath(path) + return path def copy_file(src_path, dst_path): @@ -179,9 +180,11 @@ def process_single_file( Returns: (collections.defaultdict , int) """ + # Make sure path is valid for all platforms + src_path = os.path.normpath(src_path.replace("\\", "/")) + if not os.path.exists(src_path): - msg = "{} doesn't exist for {}".format(src_path, - repre["_id"]) + msg = "{} doesn't exist for {}".format(src_path, repre["_id"]) report_items["Source file was not found"].append(msg) return report_items, 0 @@ -192,8 +195,10 @@ def process_single_file( else: delivery_path = anatomy_filled["delivery"][template_name] - # context.representation could be .psd + # Backwards compatibility when extension contained `.` delivery_path = delivery_path.replace("..", ".") + # Make sure path is valid for all platforms + delivery_path = os.path.normpath(delivery_path.replace("\\", "/")) delivery_folder = os.path.dirname(delivery_path) if not os.path.exists(delivery_folder): @@ -230,14 +235,14 @@ def process_sequence( Returns: (collections.defaultdict , int) """ + src_path = os.path.normpath(src_path.replace("\\", "/")) def hash_path_exist(myPath): res = myPath.replace('#', '*') glob_search_results = glob.glob(res) if len(glob_search_results) > 0: return True - else: - return False + return False if not hash_path_exist(src_path): msg = "{} doesn't exist for {}".format(src_path, @@ -307,6 +312,7 @@ def process_sequence( else: delivery_path = anatomy_filled["delivery"][template_name] + delivery_path = os.path.normpath(delivery_path.replace("\\", "/")) delivery_folder = os.path.dirname(delivery_path) dst_head, dst_tail = delivery_path.split(frame_indicator) dst_padding = src_collection.padding diff --git a/openpype/lib/execute.py b/openpype/lib/execute.py index a1111fba29..ad77b2f899 100644 --- a/openpype/lib/execute.py +++ b/openpype/lib/execute.py @@ -124,7 +124,7 @@ def run_subprocess(*args, **kwargs): if full_output: full_output += "\n" full_output += _stderr - logger.warning(_stderr) + logger.info(_stderr) if proc.returncode != 0: exc_msg = "Executing arguments was not successful: \"{}\"".format(args) diff --git a/openpype/lib/local_settings.py b/openpype/lib/local_settings.py index 66dad279de..97e99b4b5a 100644 --- a/openpype/lib/local_settings.py +++ b/openpype/lib/local_settings.py @@ -522,6 +522,11 @@ def get_local_site_id(): Identifier is created if does not exists yet. """ + # override local id from environment + # used for background syncing + if os.environ.get("OPENPYPE_LOCAL_ID"): + return os.environ["OPENPYPE_LOCAL_ID"] + registry = OpenPypeSettingsRegistry() try: return registry.get_item("localId") diff --git a/openpype/lib/plugin_tools.py b/openpype/lib/plugin_tools.py index aa9e0c9b57..891163e3ae 100644 --- a/openpype/lib/plugin_tools.py +++ b/openpype/lib/plugin_tools.py @@ -531,12 +531,20 @@ def should_decompress(file_url): and we can decompress (oiiotool supported) """ if oiio_supported(): - output = run_subprocess([ - get_oiio_tools_path(), - "--info", "-v", file_url]) - return "compression: \"dwaa\"" in output or \ - "compression: \"dwab\"" in output - + try: + output = run_subprocess([ + get_oiio_tools_path(), + "--info", "-v", file_url]) + return "compression: \"dwaa\"" in output or \ + "compression: \"dwab\"" in output + except RuntimeError: + _name, ext = os.path.splitext(file_url) + # TODO: should't the list of allowed extensions be + # taken from an OIIO variable of supported formats + if ext not in [".mxf"]: + # Reraise exception + raise + return False return False diff --git a/openpype/lib/remote_publish.py b/openpype/lib/remote_publish.py index 3483898af7..4b4d233f1e 100644 --- a/openpype/lib/remote_publish.py +++ b/openpype/lib/remote_publish.py @@ -51,7 +51,8 @@ def start_webpublish_log(dbcon, batch_id, user): "batch_id": batch_id, "start_date": datetime.now(), "user": user, - "status": "in_progress" + "status": "in_progress", + "progress": 0.0 }).inserted_id diff --git a/openpype/lib/vendor_bin_utils.py b/openpype/lib/vendor_bin_utils.py index a8c75c20da..42f2b34bb2 100644 --- a/openpype/lib/vendor_bin_utils.py +++ b/openpype/lib/vendor_bin_utils.py @@ -71,18 +71,24 @@ def ffprobe_streams(path_to_file, logger=None): "Getting information about input \"{}\".".format(path_to_file) ) args = [ - "\"{}\"".format(get_ffmpeg_tool_path("ffprobe")), - "-v quiet", - "-print_format json", + get_ffmpeg_tool_path("ffprobe"), + "-hide_banner", + "-loglevel", "fatal", + "-show_error", "-show_format", "-show_streams", - "\"{}\"".format(path_to_file) + "-show_programs", + "-show_chapters", + "-show_private_data", + "-print_format", "json", + path_to_file ] - command = " ".join(args) - logger.debug("FFprobe command: \"{}\"".format(command)) + + logger.debug("FFprobe command: {}".format( + subprocess.list2cmdline(args) + )) popen = subprocess.Popen( - command, - shell=True, + args, stdout=subprocess.PIPE, stderr=subprocess.PIPE ) diff --git a/openpype/modules/default_modules/deadline/plugins/publish/submit_houdini_remote_publish.py b/openpype/modules/default_modules/deadline/plugins/publish/submit_houdini_remote_publish.py index 9ada437716..c3228bfe52 100644 --- a/openpype/modules/default_modules/deadline/plugins/publish/submit_houdini_remote_publish.py +++ b/openpype/modules/default_modules/deadline/plugins/publish/submit_houdini_remote_publish.py @@ -1,10 +1,10 @@ import os import json +import requests import hou from avalon import api, io -from avalon.vendor import requests import pyblish.api diff --git a/openpype/modules/default_modules/deadline/plugins/publish/submit_houdini_render_deadline.py b/openpype/modules/default_modules/deadline/plugins/publish/submit_houdini_render_deadline.py index f471d788b6..fa146c0d30 100644 --- a/openpype/modules/default_modules/deadline/plugins/publish/submit_houdini_render_deadline.py +++ b/openpype/modules/default_modules/deadline/plugins/publish/submit_houdini_render_deadline.py @@ -2,8 +2,8 @@ import os import json import getpass +import requests from avalon import api -from avalon.vendor import requests import pyblish.api diff --git a/openpype/modules/default_modules/deadline/plugins/publish/submit_maya_deadline.py b/openpype/modules/default_modules/deadline/plugins/publish/submit_maya_deadline.py index 2d43b0d085..e6c42374ca 100644 --- a/openpype/modules/default_modules/deadline/plugins/publish/submit_maya_deadline.py +++ b/openpype/modules/default_modules/deadline/plugins/publish/submit_maya_deadline.py @@ -288,6 +288,22 @@ class MayaSubmitDeadline(pyblish.api.InstancePlugin): "pluginInfo", {}) ) + self.limit_groups = ( + context.data["project_settings"].get( + "deadline", {}).get( + "publish", {}).get( + "MayaSubmitDeadline", {}).get( + "limit", []) + ) + + self.group = ( + context.data["project_settings"].get( + "deadline", {}).get( + "publish", {}).get( + "MayaSubmitDeadline", {}).get( + "group", "none") + ) + context = instance.context workspace = context.data["workspaceDir"] anatomy = context.data['anatomy'] diff --git a/openpype/modules/default_modules/deadline/plugins/publish/submit_nuke_deadline.py b/openpype/modules/default_modules/deadline/plugins/publish/submit_nuke_deadline.py index 4cba35963c..ae9cd985eb 100644 --- a/openpype/modules/default_modules/deadline/plugins/publish/submit_nuke_deadline.py +++ b/openpype/modules/default_modules/deadline/plugins/publish/submit_nuke_deadline.py @@ -1,10 +1,11 @@ import os +import re import json import getpass +import requests + from avalon import api -from avalon.vendor import requests -import re import pyblish.api import nuke @@ -94,24 +95,27 @@ class NukeSubmitDeadline(pyblish.api.InstancePlugin): render_path).replace("\\", "/") instance.data["publishJobState"] = "Suspended" - if instance.data.get("bakeScriptPath"): - render_path = instance.data.get("bakeRenderPath") - script_path = instance.data.get("bakeScriptPath") - exe_node_name = instance.data.get("bakeWriteNodeName") + if instance.data.get("bakingNukeScripts"): + for baking_script in instance.data["bakingNukeScripts"]: + render_path = baking_script["bakeRenderPath"] + script_path = baking_script["bakeScriptPath"] + exe_node_name = baking_script["bakeWriteNodeName"] - # exception for slate workflow - if "slate" in instance.data["families"]: - self._frame_start += 1 + # exception for slate workflow + if "slate" in instance.data["families"]: + self._frame_start += 1 - resp = self.payload_submit(instance, - script_path, - render_path, - exe_node_name, - response.json() - ) - # Store output dir for unified publisher (filesequence) - instance.data["deadlineSubmissionJob"] = resp.json() - instance.data["publishJobState"] = "Suspended" + resp = self.payload_submit( + instance, + script_path, + render_path, + exe_node_name, + response.json() + ) + + # Store output dir for unified publisher (filesequence) + instance.data["deadlineSubmissionJob"] = resp.json() + instance.data["publishJobState"] = "Suspended" # redefinition of families if "render.farm" in families: diff --git a/openpype/modules/default_modules/deadline/plugins/publish/submit_publish_job.py b/openpype/modules/default_modules/deadline/plugins/publish/submit_publish_job.py index 6b07749819..1e158bda9b 100644 --- a/openpype/modules/default_modules/deadline/plugins/publish/submit_publish_job.py +++ b/openpype/modules/default_modules/deadline/plugins/publish/submit_publish_job.py @@ -5,10 +5,11 @@ import os import json import re from copy import copy, deepcopy +import requests +import clique import openpype.api from avalon import api, io -from avalon.vendor import requests, clique import pyblish.api @@ -104,7 +105,7 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): families = ["render.farm", "prerender.farm", "renderlayer", "imagesequence", "vrayscene"] - aov_filter = {"maya": [r".*(?:\.|_)*([Bb]eauty)(?:\.|_)*.*"], + aov_filter = {"maya": [r".*(?:[\._-])*([Bb]eauty)(?:[\.|_])*.*"], "aftereffects": [r".*"], # for everything from AE "harmony": [r".*"], # for everything from AE "celaction": [r".*"]} @@ -142,8 +143,7 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): instance_transfer = { "slate": ["slateFrame"], "review": ["lutPath"], - "render2d": ["bakeScriptPath", "bakeRenderPath", - "bakeWriteNodeName", "version"], + "render2d": ["bakingNukeScripts", "version"], "renderlayer": ["convertToScanline"] } @@ -231,7 +231,8 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): args = [ 'publish', roothless_metadata_path, - "--targets {}".format("deadline") + "--targets", "deadline", + "--targets", "filesequence" ] # Generate the payload for Deadline submission @@ -505,9 +506,9 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): """ representations = [] collections, remainders = clique.assemble(exp_files) - bake_render_path = instance.get("bakeRenderPath", []) + bake_renders = instance.get("bakingNukeScripts", []) - # create representation for every collected sequence + # create representation for every collected sequento ce for collection in collections: ext = collection.tail.lstrip(".") preview = False @@ -523,7 +524,7 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): preview = True break - if bake_render_path: + if bake_renders: preview = False staging = os.path.dirname(list(collection)[0]) @@ -595,7 +596,8 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): }) self._solve_families(instance, True) - if remainder in bake_render_path: + if (bake_renders + and remainder in bake_renders[0]["bakeRenderPath"]): rep.update({ "fps": instance.get("fps"), "tags": ["review", "delete"] diff --git a/openpype/modules/default_modules/deadline/plugins/publish/validate_deadline_connection.py b/openpype/modules/default_modules/deadline/plugins/publish/validate_deadline_connection.py index ff664d9f83..d5016a4d82 100644 --- a/openpype/modules/default_modules/deadline/plugins/publish/validate_deadline_connection.py +++ b/openpype/modules/default_modules/deadline/plugins/publish/validate_deadline_connection.py @@ -1,7 +1,7 @@ -import pyblish.api - -from avalon.vendor import requests import os +import requests + +import pyblish.api class ValidateDeadlineConnection(pyblish.api.InstancePlugin): diff --git a/openpype/modules/default_modules/deadline/plugins/publish/validate_expected_and_rendered_files.py b/openpype/modules/default_modules/deadline/plugins/publish/validate_expected_and_rendered_files.py index addd4a2e80..719c7dfe3e 100644 --- a/openpype/modules/default_modules/deadline/plugins/publish/validate_expected_and_rendered_files.py +++ b/openpype/modules/default_modules/deadline/plugins/publish/validate_expected_and_rendered_files.py @@ -1,8 +1,8 @@ import os import json -import pyblish.api +import requests -from avalon.vendor import requests +import pyblish.api from openpype.lib.abstract_submit_deadline import requests_get from openpype.lib.delivery import collect_frames diff --git a/openpype/modules/default_modules/ftrack/event_handlers_server/event_sync_links.py b/openpype/modules/default_modules/ftrack/event_handlers_server/event_sync_links.py new file mode 100644 index 0000000000..83132acd85 --- /dev/null +++ b/openpype/modules/default_modules/ftrack/event_handlers_server/event_sync_links.py @@ -0,0 +1,147 @@ +from pymongo import UpdateOne +from bson.objectid import ObjectId + +from avalon.api import AvalonMongoDB + +from openpype_modules.ftrack.lib import ( + CUST_ATTR_ID_KEY, + query_custom_attributes, + + BaseEvent +) + + +class SyncLinksToAvalon(BaseEvent): + """Synchronize inpug linkts to avalon documents.""" + # Run after sync to avalon event handler + priority = 110 + + def __init__(self, session): + self.dbcon = AvalonMongoDB() + + super(SyncLinksToAvalon, self).__init__(session) + + def launch(self, session, event): + # Try to commit and if any error happen then recreate session + entities_info = event["data"]["entities"] + dependency_changes = [] + removed_entities = set() + for entity_info in entities_info: + action = entity_info.get("action") + entityType = entity_info.get("entityType") + if action not in ("remove", "add"): + continue + + if entityType == "task": + removed_entities.add(entity_info["entityId"]) + elif entityType == "dependency": + dependency_changes.append(entity_info) + + # Care only about dependency changes + if not dependency_changes: + return + + project_id = None + for entity_info in dependency_changes: + for parent_info in entity_info["parents"]: + if parent_info["entityType"] == "show": + project_id = parent_info["entityId"] + if project_id is not None: + break + + changed_to_ids = set() + for entity_info in dependency_changes: + to_id_change = entity_info["changes"]["to_id"] + if to_id_change["new"] is not None: + changed_to_ids.add(to_id_change["new"]) + + if to_id_change["old"] is not None: + changed_to_ids.add(to_id_change["old"]) + + self._update_in_links(session, changed_to_ids, project_id) + + def _update_in_links(self, session, ftrack_ids, project_id): + if not ftrack_ids or project_id is None: + return + + attr_def = session.query(( + "select id from CustomAttributeConfiguration where key is \"{}\"" + ).format(CUST_ATTR_ID_KEY)).first() + if attr_def is None: + return + + project_entity = session.query(( + "select full_name from Project where id is \"{}\"" + ).format(project_id)).first() + if not project_entity: + return + + project_name = project_entity["full_name"] + mongo_id_by_ftrack_id = self._get_mongo_ids_by_ftrack_ids( + session, attr_def["id"], ftrack_ids + ) + + filtered_ftrack_ids = tuple(mongo_id_by_ftrack_id.keys()) + context_links = session.query(( + "select from_id, to_id from TypedContextLink where to_id in ({})" + ).format(self.join_query_keys(filtered_ftrack_ids))).all() + + mapping_by_to_id = { + ftrack_id: set() + for ftrack_id in filtered_ftrack_ids + } + all_from_ids = set() + for context_link in context_links: + to_id = context_link["to_id"] + from_id = context_link["from_id"] + if from_id == to_id: + continue + all_from_ids.add(from_id) + mapping_by_to_id[to_id].add(from_id) + + mongo_id_by_ftrack_id.update(self._get_mongo_ids_by_ftrack_ids( + session, attr_def["id"], all_from_ids + )) + self.log.info(mongo_id_by_ftrack_id) + bulk_writes = [] + for to_id, from_ids in mapping_by_to_id.items(): + dst_mongo_id = mongo_id_by_ftrack_id[to_id] + links = [] + for ftrack_id in from_ids: + link_mongo_id = mongo_id_by_ftrack_id.get(ftrack_id) + if link_mongo_id is None: + continue + + links.append({ + "id": ObjectId(link_mongo_id), + "linkedBy": "ftrack", + "type": "breakdown" + }) + + bulk_writes.append(UpdateOne( + {"_id": ObjectId(dst_mongo_id)}, + {"$set": {"data.inputLinks": links}} + )) + + if bulk_writes: + self.dbcon.database[project_name].bulk_write(bulk_writes) + + def _get_mongo_ids_by_ftrack_ids(self, session, attr_id, ftrack_ids): + output = query_custom_attributes( + session, [attr_id], ftrack_ids + ) + mongo_id_by_ftrack_id = {} + for item in output: + mongo_id = item["value"] + if not mongo_id: + continue + + ftrack_id = item["entity_id"] + + mongo_id_by_ftrack_id[ftrack_id] = mongo_id + return mongo_id_by_ftrack_id + + +def register(session): + '''Register plugin. Called when used as an plugin.''' + SyncLinksToAvalon(session).register() diff --git a/openpype/modules/default_modules/ftrack/event_handlers_server/event_sync_to_avalon.py b/openpype/modules/default_modules/ftrack/event_handlers_server/event_sync_to_avalon.py index 178dfc74c7..a4982627ff 100644 --- a/openpype/modules/default_modules/ftrack/event_handlers_server/event_sync_to_avalon.py +++ b/openpype/modules/default_modules/ftrack/event_handlers_server/event_sync_to_avalon.py @@ -194,6 +194,7 @@ class SyncToAvalonEvent(BaseEvent): ftrack_id = proj["data"].get("ftrackId") if ftrack_id is None: ftrack_id = self._update_project_ftrack_id() + proj["data"]["ftrackId"] = ftrack_id self._avalon_ents_by_ftrack_id[ftrack_id] = proj for ent in ents: ftrack_id = ent["data"].get("ftrackId") @@ -584,6 +585,10 @@ class SyncToAvalonEvent(BaseEvent): continue ftrack_id = ftrack_id[0] + # Skip deleted projects + if action == "remove" and entityType == "show": + return True + # task modified, collect parent id of task, handle separately if entity_type.lower() == "task": changes = ent_info.get("changes") or {} diff --git a/openpype/modules/default_modules/ftrack/ftrack_module.py b/openpype/modules/default_modules/ftrack/ftrack_module.py index 6db80e6c4a..8a7525d65b 100644 --- a/openpype/modules/default_modules/ftrack/ftrack_module.py +++ b/openpype/modules/default_modules/ftrack/ftrack_module.py @@ -226,8 +226,8 @@ class FtrackModule( if not project_name: return - attributes_changes = changes.get("attributes") - if not attributes_changes: + new_attr_values = new_value.get("attributes") + if not new_attr_values: return import ftrack_api @@ -277,7 +277,7 @@ class FtrackModule( failed = {} missing = {} - for key, value in attributes_changes.items(): + for key, value in new_attr_values.items(): if key not in ca_keys: continue @@ -351,12 +351,24 @@ class FtrackModule( if "server_url" not in session_kwargs: session_kwargs["server_url"] = self.ftrack_url - if "api_key" not in session_kwargs or "api_user" not in session_kwargs: + api_key = session_kwargs.get("api_key") + api_user = session_kwargs.get("api_user") + # First look into environments + # - both OpenPype tray and ftrack event server should have set them + # - ftrack event server may crash when credentials are tried to load + # from keyring + if not api_key or not api_user: + api_key = os.environ.get("FTRACK_API_KEY") + api_user = os.environ.get("FTRACK_API_USER") + + if not api_key or not api_user: from .lib import credentials cred = credentials.get_credentials() - session_kwargs["api_user"] = cred.get("username") - session_kwargs["api_key"] = cred.get("api_key") + api_user = cred.get("username") + api_key = cred.get("api_key") + session_kwargs["api_user"] = api_user + session_kwargs["api_key"] = api_key return ftrack_api.Session(**session_kwargs) def tray_init(self): @@ -412,6 +424,14 @@ class FtrackModule( hours_logged = (task_entity["time_logged"] / 60) / 60 return hours_logged + def get_credentials(self): + # type: () -> tuple + """Get local Ftrack credentials.""" + from .lib import credentials + + cred = credentials.get_credentials(self.ftrack_url) + return cred.get("username"), cred.get("api_key") + def cli(self, click_group): click_group.add_command(cli_main) diff --git a/openpype/modules/default_modules/ftrack/lib/avalon_sync.py b/openpype/modules/default_modules/ftrack/lib/avalon_sync.py index 1667031f29..3ba874281a 100644 --- a/openpype/modules/default_modules/ftrack/lib/avalon_sync.py +++ b/openpype/modules/default_modules/ftrack/lib/avalon_sync.py @@ -22,7 +22,7 @@ from .custom_attributes import get_openpype_attr from bson.objectid import ObjectId from bson.errors import InvalidId -from pymongo import UpdateOne +from pymongo import UpdateOne, ReplaceOne import ftrack_api log = Logger.get_logger(__name__) @@ -328,7 +328,7 @@ class SyncEntitiesFactory: server_url=self._server_url, api_key=self._api_key, api_user=self._api_user, - auto_connect_event_hub=True + auto_connect_event_hub=False ) self.duplicates = {} @@ -341,6 +341,7 @@ class SyncEntitiesFactory: } self.create_list = [] + self.unarchive_list = [] self.updates = collections.defaultdict(dict) self.avalon_project = None @@ -1169,16 +1170,43 @@ class SyncEntitiesFactory: entity ) + def _get_input_links(self, ftrack_ids): + tupled_ids = tuple(ftrack_ids) + mapping_by_to_id = { + ftrack_id: set() + for ftrack_id in tupled_ids + } + ids_len = len(tupled_ids) + chunk_size = int(5000 / ids_len) + all_links = [] + for idx in range(0, ids_len, chunk_size): + entity_ids_joined = join_query_keys( + tupled_ids[idx:idx + chunk_size] + ) + + all_links.extend(self.session.query(( + "select from_id, to_id from" + " TypedContextLink where to_id in ({})" + ).format(entity_ids_joined)).all()) + + for context_link in all_links: + to_id = context_link["to_id"] + from_id = context_link["from_id"] + if from_id == to_id: + continue + mapping_by_to_id[to_id].add(from_id) + return mapping_by_to_id + def prepare_ftrack_ent_data(self): not_set_ids = [] - for id, entity_dict in self.entities_dict.items(): + for ftrack_id, entity_dict in self.entities_dict.items(): entity = entity_dict["entity"] if entity is None: - not_set_ids.append(id) + not_set_ids.append(ftrack_id) continue - self.entities_dict[id]["final_entity"] = {} - self.entities_dict[id]["final_entity"]["name"] = ( + self.entities_dict[ftrack_id]["final_entity"] = {} + self.entities_dict[ftrack_id]["final_entity"]["name"] = ( entity_dict["name"] ) data = {} @@ -1191,58 +1219,59 @@ class SyncEntitiesFactory: for key, val in entity_dict.get("hier_attrs", []).items(): data[key] = val - if id == self.ft_project_id: - project_name = entity["full_name"] - data["code"] = entity["name"] - self.entities_dict[id]["final_entity"]["data"] = data - self.entities_dict[id]["final_entity"]["type"] = "project" + if ftrack_id != self.ft_project_id: + ent_path_items = [ent["name"] for ent in entity["link"]] + parents = ent_path_items[1:len(ent_path_items) - 1:] - proj_schema = entity["project_schema"] - task_types = proj_schema["_task_type_schema"]["types"] - proj_apps, warnings = get_project_apps( - data.pop("applications", []) - ) - for msg, items in warnings.items(): - if not msg or not items: - continue - self.report_items["warning"][msg] = items - - current_project_anatomy_data = get_anatomy_settings( - project_name, exclude_locals=True - ) - anatomy_tasks = current_project_anatomy_data["tasks"] - tasks = {} - default_type_data = { - "short_name": "" - } - for task_type in task_types: - task_type_name = task_type["name"] - tasks[task_type_name] = copy.deepcopy( - anatomy_tasks.get(task_type_name) - or default_type_data - ) - - project_config = { - "tasks": tasks, - "apps": proj_apps - } - for key, value in current_project_anatomy_data.items(): - if key in project_config or key == "attributes": - continue - project_config[key] = value - - self.entities_dict[id]["final_entity"]["config"] = ( - project_config - ) + data["parents"] = parents + data["tasks"] = self.entities_dict[ftrack_id].pop("tasks", {}) + self.entities_dict[ftrack_id]["final_entity"]["data"] = data + self.entities_dict[ftrack_id]["final_entity"]["type"] = "asset" continue + project_name = entity["full_name"] + data["code"] = entity["name"] + self.entities_dict[ftrack_id]["final_entity"]["data"] = data + self.entities_dict[ftrack_id]["final_entity"]["type"] = ( + "project" + ) - ent_path_items = [ent["name"] for ent in entity["link"]] - parents = ent_path_items[1:len(ent_path_items) - 1:] + proj_schema = entity["project_schema"] + task_types = proj_schema["_task_type_schema"]["types"] + proj_apps, warnings = get_project_apps( + data.pop("applications", []) + ) + for msg, items in warnings.items(): + if not msg or not items: + continue + self.report_items["warning"][msg] = items - data["parents"] = parents - data["tasks"] = self.entities_dict[id].pop("tasks", {}) - self.entities_dict[id]["final_entity"]["data"] = data - self.entities_dict[id]["final_entity"]["type"] = "asset" + current_project_anatomy_data = get_anatomy_settings( + project_name, exclude_locals=True + ) + anatomy_tasks = current_project_anatomy_data["tasks"] + tasks = {} + default_type_data = { + "short_name": "" + } + for task_type in task_types: + task_type_name = task_type["name"] + tasks[task_type_name] = copy.deepcopy( + anatomy_tasks.get(task_type_name) + or default_type_data + ) + + project_config = { + "tasks": tasks, + "apps": proj_apps + } + for key, value in current_project_anatomy_data.items(): + if key in project_config or key == "attributes": + continue + project_config[key] = value + + self.entities_dict[ftrack_id]["final_entity"]["config"] = ( + project_config + ) if not_set_ids: self.log.debug(( @@ -1433,6 +1462,28 @@ class SyncEntitiesFactory: for child_id in entity_dict["children"]: children_queue.append(child_id) + def set_input_links(self): + ftrack_ids = set(self.create_ftrack_ids) | set(self.update_ftrack_ids) + + input_links_by_ftrack_id = self._get_input_links(ftrack_ids) + + for ftrack_id in ftrack_ids: + input_links = [] + final_entity = self.entities_dict[ftrack_id]["final_entity"] + final_entity["data"]["inputLinks"] = input_links + link_ids = input_links_by_ftrack_id[ftrack_id] + if not link_ids: + continue + + for ftrack_link_id in link_ids: + mongo_id = self.ftrack_avalon_mapper.get(ftrack_link_id) + if mongo_id is not None: + input_links.append({ + "id": ObjectId(mongo_id), + "linkedBy": "ftrack", + "type": "breakdown" + }) + def prepare_changes(self): self.log.debug("* Preparing changes for avalon/ftrack") hierarchy_changing_ids = [] @@ -1806,9 +1857,28 @@ class SyncEntitiesFactory: for ftrack_id in self.create_ftrack_ids: # CHECK it is possible that entity was already created # because is parent of another entity which was processed first - if ftrack_id in self.ftrack_avalon_mapper: - continue - self.create_avalon_entity(ftrack_id) + if ftrack_id not in self.ftrack_avalon_mapper: + self.create_avalon_entity(ftrack_id) + + self.set_input_links() + + unarchive_writes = [] + for item in self.unarchive_list: + mongo_id = item["_id"] + unarchive_writes.append(ReplaceOne( + {"_id": mongo_id}, + item + )) + av_ent_path_items = item["data"]["parents"] + av_ent_path_items.append(item["name"]) + av_ent_path = "/".join(av_ent_path_items) + self.log.debug( + "Entity was unarchived <{}>".format(av_ent_path) + ) + self.remove_from_archived(mongo_id) + + if unarchive_writes: + self.dbcon.bulk_write(unarchive_writes) if len(self.create_list) > 0: self.dbcon.insert_many(self.create_list) @@ -1899,14 +1969,8 @@ class SyncEntitiesFactory: if unarchive is False: self.create_list.append(item) - return - # If unarchive then replace entity data in database - self.dbcon.replace_one({"_id": new_id}, item) - self.remove_from_archived(mongo_id) - av_ent_path_items = item["data"]["parents"] - av_ent_path_items.append(item["name"]) - av_ent_path = "/".join(av_ent_path_items) - self.log.debug("Entity was unarchived <{}>".format(av_ent_path)) + else: + self.unarchive_list.append(item) def check_unarchivation(self, ftrack_id, mongo_id, name): archived_by_id = self.avalon_archived_by_id.get(mongo_id) diff --git a/openpype/modules/default_modules/ftrack/lib/ftrack_base_handler.py b/openpype/modules/default_modules/ftrack/lib/ftrack_base_handler.py index a457b886ac..2130abc20c 100644 --- a/openpype/modules/default_modules/ftrack/lib/ftrack_base_handler.py +++ b/openpype/modules/default_modules/ftrack/lib/ftrack_base_handler.py @@ -570,9 +570,15 @@ class BaseHandler(object): if low_entity_type == "assetversion": asset = entity["asset"] + parent = None if asset: parent = asset["parent"] - if parent: + + if parent: + if parent.entity_type.lower() == "project": + return parent + + if "project" in parent: return parent["project"] project_data = entity["link"][0] diff --git a/openpype/modules/default_modules/ftrack/plugins/publish/collect_local_ftrack_creds.py b/openpype/modules/default_modules/ftrack/plugins/publish/collect_local_ftrack_creds.py new file mode 100644 index 0000000000..2093ebf18a --- /dev/null +++ b/openpype/modules/default_modules/ftrack/plugins/publish/collect_local_ftrack_creds.py @@ -0,0 +1,23 @@ +# -*- coding: utf-8 -*- +"""Collect default Deadline server.""" +import pyblish.api +import os + + +class CollectLocalFtrackCreds(pyblish.api.ContextPlugin): + """Collect default Royal Render path.""" + + order = pyblish.api.CollectorOrder + 0.01 + label = "Collect local ftrack credentials" + targets = ["rr_control"] + + def process(self, context): + if os.getenv("FTRACK_API_USER") and os.getenv("FTRACK_API_KEY") and \ + os.getenv("FTRACK_SERVER"): + return + ftrack_module = context.data["openPypeModules"]["ftrack"] + if ftrack_module.enabled: + creds = ftrack_module.get_credentials() + os.environ["FTRACK_API_USER"] = creds[0] + os.environ["FTRACK_API_KEY"] = creds[1] + os.environ["FTRACK_SERVER"] = ftrack_module.ftrack_url diff --git a/openpype/modules/default_modules/ftrack/plugins/publish/collect_username.py b/openpype/modules/default_modules/ftrack/plugins/publish/collect_username.py index a5187dd52b..7ea1c1f323 100644 --- a/openpype/modules/default_modules/ftrack/plugins/publish/collect_username.py +++ b/openpype/modules/default_modules/ftrack/plugins/publish/collect_username.py @@ -27,7 +27,7 @@ class CollectUsername(pyblish.api.ContextPlugin): order = pyblish.api.CollectorOrder - 0.488 label = "Collect ftrack username" hosts = ["webpublisher", "photoshop"] - targets = ["remotepublish", "filespublish"] + targets = ["remotepublish", "filespublish", "tvpaint_worker"] _context = None diff --git a/openpype/modules/default_modules/ftrack/plugins/publish/integrate_ftrack_instances.py b/openpype/modules/default_modules/ftrack/plugins/publish/integrate_ftrack_instances.py index 93a07a9fae..8399e19184 100644 --- a/openpype/modules/default_modules/ftrack/plugins/publish/integrate_ftrack_instances.py +++ b/openpype/modules/default_modules/ftrack/plugins/publish/integrate_ftrack_instances.py @@ -1,208 +1,266 @@ -import pyblish.api -import json import os +import json +import copy +import pyblish.api class IntegrateFtrackInstance(pyblish.api.InstancePlugin): - """Collect ftrack component data + """Collect ftrack component data (not integrate yet). Add ftrack component list to instance. - - """ order = pyblish.api.IntegratorOrder + 0.48 - label = 'Integrate Ftrack Component' + label = "Integrate Ftrack Component" families = ["ftrack"] - family_mapping = {'camera': 'cam', - 'look': 'look', - 'mayaascii': 'scene', - 'model': 'geo', - 'rig': 'rig', - 'setdress': 'setdress', - 'pointcache': 'cache', - 'render': 'render', - 'render2d': 'render', - 'nukescript': 'comp', - 'write': 'render', - 'review': 'mov', - 'plate': 'img', - 'audio': 'audio', - 'workfile': 'scene', - 'animation': 'cache', - 'image': 'img', - 'reference': 'reference' - } + family_mapping = { + "camera": "cam", + "look": "look", + "mayaascii": "scene", + "model": "geo", + "rig": "rig", + "setdress": "setdress", + "pointcache": "cache", + "render": "render", + "render2d": "render", + "nukescript": "comp", + "write": "render", + "review": "mov", + "plate": "img", + "audio": "audio", + "workfile": "scene", + "animation": "cache", + "image": "img", + "reference": "reference" + } def process(self, instance): - self.ftrack_locations = {} - self.log.debug('instance {}'.format(instance)) + self.log.debug("instance {}".format(instance)) - if instance.data.get('version'): - version_number = int(instance.data.get('version')) - else: + instance_version = instance.data.get("version") + if instance_version is None: raise ValueError("Instance version not set") - family = instance.data['family'].lower() + version_number = int(instance_version) + + family = instance.data["family"] + family_low = instance.data["family"].lower() asset_type = instance.data.get("ftrackFamily") - if not asset_type and family in self.family_mapping: - asset_type = self.family_mapping[family] + if not asset_type and family_low in self.family_mapping: + asset_type = self.family_mapping[family_low] # Ignore this instance if neither "ftrackFamily" or a family mapping is # found. if not asset_type: + self.log.info(( + "Family \"{}\" does not match any asset type mapping" + ).format(family)) return - componentList = [] + instance_repres = instance.data.get("representations") + if not instance_repres: + self.log.info(( + "Skipping instance. Does not have any representations {}" + ).format(str(instance))) + return + + # Prepare FPS + instance_fps = instance.data.get("fps") + if instance_fps is None: + instance_fps = instance.context.data["fps"] + + # Base of component item data + # - create a copy of this object when want to use it + base_component_item = { + "assettype_data": { + "short": asset_type, + }, + "asset_data": { + "name": instance.data["subset"], + }, + "assetversion_data": { + "version": version_number, + "comment": instance.context.data.get("comment") or "" + }, + "component_overwrite": False, + # This can be change optionally + "thumbnail": False, + # These must be changed for each component + "component_data": None, + "component_path": None, + "component_location": None + } + ft_session = instance.context.data["ftrackSession"] - for comp in instance.data['representations']: - self.log.debug('component {}'.format(comp)) + # Filter types of representations + review_representations = [] + thumbnail_representations = [] + other_representations = [] + for repre in instance_repres: + self.log.debug("Representation {}".format(repre)) + repre_tags = repre.get("tags") or [] + if repre.get("thumbnail") or "thumbnail" in repre_tags: + thumbnail_representations.append(repre) - if comp.get('thumbnail') or ("thumbnail" in comp.get('tags', [])): - location = self.get_ftrack_location( - 'ftrack.server', ft_session - ) - component_data = { - "name": "thumbnail" # Default component name is "main". - } - comp['thumbnail'] = True - comp_files = comp["files"] + elif "ftrackreview" in repre_tags: + review_representations.append(repre) + + else: + other_representations.append(repre) + + # Prepare ftrack locations + unmanaged_location = ft_session.query( + "Location where name is \"ftrack.unmanaged\"" + ).one() + ftrack_server_location = ft_session.query( + "Location where name is \"ftrack.server\"" + ).one() + + # Components data + component_list = [] + # Components that will be duplicated to unmanaged location + src_components_to_add = [] + + # Create thumbnail components + # TODO what if there is multiple thumbnails? + first_thumbnail_component = None + for repre in thumbnail_representations: + published_path = repre.get("published_path") + if not published_path: + comp_files = repre["files"] if isinstance(comp_files, (tuple, list, set)): filename = comp_files[0] else: filename = comp_files - comp['published_path'] = os.path.join( - comp['stagingDir'], filename - ) - - elif comp.get('ftrackreview') or ("ftrackreview" in comp.get('tags', [])): - ''' - Ftrack bug requirement: - - Start frame must be 0 - - End frame must be {duration} - EXAMPLE: When mov has 55 frames: - - Start frame should be 0 - - End frame should be 55 (do not ask why please!) - ''' - start_frame = 0 - end_frame = 1 - if 'frameEndFtrack' in comp and 'frameStartFtrack' in comp: - end_frame += ( - comp['frameEndFtrack'] - comp['frameStartFtrack'] - ) - else: - end_frame += ( - instance.data["frameEnd"] - instance.data["frameStart"] - ) - - fps = comp.get('fps') - if fps is None: - fps = instance.data.get( - "fps", instance.context.data['fps'] - ) - - comp['fps'] = fps - - location = self.get_ftrack_location( - 'ftrack.server', ft_session + published_path = os.path.join( + repre["stagingDir"], filename ) - component_data = { - # Default component name is "main". - "name": "ftrackreview-mp4", - "metadata": {'ftr_meta': json.dumps({ - 'frameIn': int(start_frame), - 'frameOut': int(end_frame), - 'frameRate': float(comp['fps'])})} - } - comp['thumbnail'] = False - else: - component_data = { - "name": comp['name'] - } - location = self.get_ftrack_location( - 'ftrack.unmanaged', ft_session - ) - comp['thumbnail'] = False + if not os.path.exists(published_path): + continue + repre["published_path"] = published_path - self.log.debug('location {}'.format(location)) - - component_item = { - "assettype_data": { - "short": asset_type, - }, - "asset_data": { - "name": instance.data["subset"], - }, - "assetversion_data": { - "version": version_number, - "comment": instance.context.data.get("comment", "") - }, - "component_data": component_data, - "component_path": comp['published_path'], - 'component_location': location, - "component_overwrite": False, - "thumbnail": comp['thumbnail'] + # Create copy of base comp item and append it + thumbnail_item = copy.deepcopy(base_component_item) + thumbnail_item["component_path"] = repre["published_path"] + thumbnail_item["component_data"] = { + "name": "thumbnail" } + thumbnail_item["thumbnail"] = True + # Create copy of item before setting location + src_components_to_add.append(copy.deepcopy(thumbnail_item)) + # Create copy of first thumbnail + if first_thumbnail_component is None: + first_thumbnail_component = copy.deepcopy(thumbnail_item) + # Set location + thumbnail_item["component_location"] = ftrack_server_location + # Add item to component list + component_list.append(thumbnail_item) - # Add custom attributes for AssetVersion - assetversion_cust_attrs = {} - intent_val = instance.context.data.get("intent") - if intent_val and isinstance(intent_val, dict): - intent_val = intent_val.get("value") + # Create review components + # Change asset name of each new component for review + is_first_review_repre = True + not_first_components = [] + for repre in review_representations: + frame_start = repre.get("frameStartFtrack") + frame_end = repre.get("frameEndFtrack") + if frame_start is None or frame_end is None: + frame_start = instance.data["frameStart"] + frame_end = instance.data["frameEnd"] - if intent_val: - assetversion_cust_attrs["intent"] = intent_val + # Frame end of uploaded video file should be duration in frames + # - frame start is always 0 + # - frame end is duration in frames + duration = frame_end - frame_start + 1 - component_item["assetversion_data"]["custom_attributes"] = ( - assetversion_cust_attrs - ) + fps = repre.get("fps") + if fps is None: + fps = instance_fps - componentList.append(component_item) - # Create copy with ftrack.unmanaged location if thumb or prev - if comp.get('thumbnail') or comp.get('preview') \ - or ("preview" in comp.get('tags', [])) \ - or ("review" in comp.get('tags', [])) \ - or ("thumbnail" in comp.get('tags', [])): - unmanaged_loc = self.get_ftrack_location( - 'ftrack.unmanaged', ft_session - ) - - component_data_src = component_data.copy() - name = component_data['name'] + '_src' - component_data_src['name'] = name - - component_item_src = { - "assettype_data": { - "short": asset_type, - }, - "asset_data": { - "name": instance.data["subset"], - }, - "assetversion_data": { - "version": version_number, - }, - "component_data": component_data_src, - "component_path": comp['published_path'], - 'component_location': unmanaged_loc, - "component_overwrite": False, - "thumbnail": False + # Create copy of base comp item and append it + review_item = copy.deepcopy(base_component_item) + # Change location + review_item["component_path"] = repre["published_path"] + # Change component data + review_item["component_data"] = { + # Default component name is "main". + "name": "ftrackreview-mp4", + "metadata": { + "ftr_meta": json.dumps({ + "frameIn": 0, + "frameOut": int(duration), + "frameRate": float(fps) + }) } + } + # Create copy of item before setting location or changing asset + src_components_to_add.append(copy.deepcopy(review_item)) + if is_first_review_repre: + is_first_review_repre = False + else: + # Add representation name to asset name of "not first" review + asset_name = review_item["asset_data"]["name"] + review_item["asset_data"]["name"] = "_".join( + (asset_name, repre["name"]) + ) + not_first_components.append(review_item) - componentList.append(component_item_src) + # Set location + review_item["component_location"] = ftrack_server_location + # Add item to component list + component_list.append(review_item) - self.log.debug('componentsList: {}'.format(str(componentList))) - instance.data["ftrackComponentsList"] = componentList + # Duplicate thumbnail component for all not first reviews + if first_thumbnail_component is not None: + for component_item in not_first_components: + asset_name = component_item["asset_data"]["name"] + new_thumbnail_component = copy.deepcopy( + first_thumbnail_component + ) + new_thumbnail_component["asset_data"]["name"] = asset_name + new_thumbnail_component["component_location"] = ( + ftrack_server_location + ) + component_list.append(new_thumbnail_component) - def get_ftrack_location(self, name, session): - if name in self.ftrack_locations: - return self.ftrack_locations[name] + # Add source components for review and thubmnail components + for copy_src_item in src_components_to_add: + # Make sure thumbnail is disabled + copy_src_item["thumbnail"] = False + # Set location + copy_src_item["component_location"] = unmanaged_location + # Modify name of component to have suffix "_src" + component_data = copy_src_item["component_data"] + component_name = component_data["name"] + component_data["name"] = component_name + "_src" + component_list.append(copy_src_item) - location = session.query( - 'Location where name is "{}"'.format(name) - ).one() - self.ftrack_locations[name] = location - return location + # Add others representations as component + for repre in other_representations: + published_path = repre.get("published_path") + if not published_path: + continue + # Create copy of base comp item and append it + other_item = copy.deepcopy(base_component_item) + other_item["component_data"] = { + "name": repre["name"] + } + other_item["component_location"] = unmanaged_location + other_item["component_path"] = published_path + component_list.append(other_item) + + def json_obj_parser(obj): + return str(obj) + + self.log.debug("Components list: {}".format( + json.dumps( + component_list, + sort_keys=True, + indent=4, + default=json_obj_parser + ) + )) + instance.data["ftrackComponentsList"] = component_list diff --git a/openpype/modules/default_modules/ftrack/plugins/publish/integrate_remove_components.py b/openpype/modules/default_modules/ftrack/plugins/publish/integrate_remove_components.py deleted file mode 100644 index 26cac0f1ae..0000000000 --- a/openpype/modules/default_modules/ftrack/plugins/publish/integrate_remove_components.py +++ /dev/null @@ -1,30 +0,0 @@ -import pyblish.api -import os - - -class IntegrateCleanComponentData(pyblish.api.InstancePlugin): - """ - Cleaning up thumbnail an mov files after they have been integrated - """ - - order = pyblish.api.IntegratorOrder + 0.5 - label = 'Clean component data' - families = ["ftrack"] - optional = True - active = False - - def process(self, instance): - - for comp in instance.data['representations']: - self.log.debug('component {}'.format(comp)) - - if "%" in comp['published_path'] or "#" in comp['published_path']: - continue - - if comp.get('thumbnail') or ("thumbnail" in comp.get('tags', [])): - os.remove(comp['published_path']) - self.log.info('Thumbnail image was erased') - - elif comp.get('preview') or ("preview" in comp.get('tags', [])): - os.remove(comp['published_path']) - self.log.info('Preview mov file was erased') diff --git a/openpype/modules/default_modules/job_queue/__init__.py b/openpype/modules/default_modules/job_queue/__init__.py new file mode 100644 index 0000000000..6f2cec1b97 --- /dev/null +++ b/openpype/modules/default_modules/job_queue/__init__.py @@ -0,0 +1,6 @@ +from .module import JobQueueModule + + +__all__ = ( + "JobQueueModule", +) diff --git a/openpype/modules/default_modules/job_queue/job_server/__init__.py b/openpype/modules/default_modules/job_queue/job_server/__init__.py new file mode 100644 index 0000000000..c73d830257 --- /dev/null +++ b/openpype/modules/default_modules/job_queue/job_server/__init__.py @@ -0,0 +1,8 @@ +from .server import WebServerManager +from .utils import main + + +__all__ = ( + "WebServerManager", + "main" +) diff --git a/openpype/modules/default_modules/job_queue/job_server/job_queue_route.py b/openpype/modules/default_modules/job_queue/job_server/job_queue_route.py new file mode 100644 index 0000000000..8929e64dc5 --- /dev/null +++ b/openpype/modules/default_modules/job_queue/job_server/job_queue_route.py @@ -0,0 +1,62 @@ +import json + +from aiohttp.web_response import Response + + +class JobQueueResource: + def __init__(self, job_queue, server_manager): + self.server_manager = server_manager + + self._prefix = "/api" + + self._job_queue = job_queue + + self.endpoint_defs = ( + ("POST", "/jobs", self.post_job), + ("GET", "/jobs", self.get_jobs), + ("GET", "/jobs/{job_id}", self.get_job) + ) + + self.register() + + def register(self): + for methods, url, callback in self.endpoint_defs: + final_url = self._prefix + url + self.server_manager.add_route( + methods, final_url, callback + ) + + async def get_jobs(self, request): + jobs_data = [] + for job in self._job_queue.get_jobs(): + jobs_data.append(job.status()) + return Response(status=200, body=self.encode(jobs_data)) + + async def post_job(self, request): + data = await request.json() + host_name = data.get("host_name") + if not host_name: + return Response( + status=400, message="Key \"host_name\" not filled." + ) + + job = self._job_queue.create_job(host_name, data) + return Response(status=201, text=job.id) + + async def get_job(self, request): + job_id = request.match_info["job_id"] + content = self._job_queue.get_job_status(job_id) + if content is None: + content = {} + return Response( + status=200, + body=self.encode(content), + content_type="application/json" + ) + + @classmethod + def encode(cls, data): + return json.dumps( + data, + indent=4 + ).encode("utf-8") diff --git a/openpype/modules/default_modules/job_queue/job_server/jobs.py b/openpype/modules/default_modules/job_queue/job_server/jobs.py new file mode 100644 index 0000000000..0fc3c381d4 --- /dev/null +++ b/openpype/modules/default_modules/job_queue/job_server/jobs.py @@ -0,0 +1,240 @@ +import datetime +import collections +from uuid import uuid4 + + +class Job: + """Job related to specific host name. + + Data must contain everything needed to finish the job. + """ + # Remove done jobs each n days to clear memory + keep_in_memory_days = 3 + + def __init__(self, host_name, data, job_id=None, created_time=None): + if job_id is None: + job_id = str(uuid4()) + self._id = job_id + if created_time is None: + created_time = datetime.datetime.now() + self._created_time = created_time + self._started_time = None + self._done_time = None + self.host_name = host_name + self.data = data + self._result_data = None + + self._started = False + self._done = False + self._errored = False + self._message = None + self._deleted = False + + self._worker = None + + def keep_in_memory(self): + if self._done_time is None: + return True + + now = datetime.datetime.now() + delta = now - self._done_time + return delta.days < self.keep_in_memory_days + + @property + def id(self): + return self._id + + @property + def done(self): + return self._done + + def reset(self): + self._started = False + self._started_time = None + self._done = False + self._done_time = None + self._errored = False + self._message = None + + self._worker = None + + @property + def started(self): + return self._started + + @property + def deleted(self): + return self._deleted + + def set_deleted(self): + self._deleted = True + self.set_worker(None) + + def set_worker(self, worker): + if worker is self._worker: + return + + if self._worker is not None: + self._worker.set_current_job(None) + + self._worker = worker + if worker is not None: + worker.set_current_job(self) + + def set_started(self): + self._started_time = datetime.datetime.now() + self._started = True + + def set_done(self, success=True, message=None, data=None): + self._done = True + self._done_time = datetime.datetime.now() + self._errored = not success + self._message = message + self._result_data = data + if self._worker is not None: + self._worker.set_current_job(None) + + def status(self): + worker_id = None + if self._worker is not None: + worker_id = self._worker.id + output = { + "id": self.id, + "worker_id": worker_id, + "done": self._done + } + output["message"] = self._message or None + + state = "waiting" + if self._deleted: + state = "deleted" + elif self._errored: + state = "error" + elif self._done: + state = "done" + elif self._started: + state = "started" + + output["result"] = self._result_data + + output["state"] = state + + return output + + +class JobQueue: + """Queue holds jobs that should be done and workers that can do them. + + Also asign jobs to a worker. + """ + old_jobs_check_minutes_interval = 30 + + def __init__(self): + self._last_old_jobs_check = datetime.datetime.now() + self._jobs_by_id = {} + self._job_queue_by_host_name = collections.defaultdict( + collections.deque + ) + self._workers_by_id = {} + self._workers_by_host_name = collections.defaultdict(list) + + def workers(self): + """All currently registered workers.""" + return self._workers_by_id.values() + + def add_worker(self, worker): + host_name = worker.host_name + print("Added new worker for \"{}\"".format(host_name)) + self._workers_by_id[worker.id] = worker + self._workers_by_host_name[host_name].append(worker) + + def get_worker(self, worker_id): + return self._workers_by_id.get(worker_id) + + def remove_worker(self, worker): + # Look if worker had assigned job to do + job = worker.current_job + if job is not None and not job.done: + # Reset job + job.set_worker(None) + job.reset() + # Add job back to queue + self._job_queue_by_host_name[job.host_name].appendleft(job) + + # Remove worker from registered workers + self._workers_by_id.pop(worker.id, None) + host_name = worker.host_name + if worker in self._workers_by_host_name[host_name]: + self._workers_by_host_name[host_name].remove(worker) + + print("Removed worker for \"{}\"".format(host_name)) + + def assign_jobs(self): + """Try to assign job for each idle worker. + + Error all jobs without needed worker. + """ + available_host_names = set() + for worker in self._workers_by_id.values(): + host_name = worker.host_name + available_host_names.add(host_name) + if worker.is_idle(): + jobs = self._job_queue_by_host_name[host_name] + while jobs: + job = jobs.popleft() + if not job.deleted: + worker.set_current_job(job) + break + + for host_name in tuple(self._job_queue_by_host_name.keys()): + if host_name in available_host_names: + continue + + jobs_deque = self._job_queue_by_host_name[host_name] + message = ("Not available workers for \"{}\"").format(host_name) + while jobs_deque: + job = jobs_deque.popleft() + if not job.deleted: + job.set_done(False, message) + self._remove_old_jobs() + + def get_jobs(self): + return self._jobs_by_id.values() + + def get_job(self, job_id): + """Job by it's id.""" + return self._jobs_by_id.get(job_id) + + def create_job(self, host_name, job_data): + """Create new job from passed data and add it to queue.""" + job = Job(host_name, job_data) + self._jobs_by_id[job.id] = job + self._job_queue_by_host_name[host_name].append(job) + return job + + def _remove_old_jobs(self): + """Once in specific time look if should remove old finished jobs.""" + delta = datetime.datetime.now() - self._last_old_jobs_check + if delta.seconds < self.old_jobs_check_minutes_interval: + return + + for job_id in tuple(self._jobs_by_id.keys()): + job = self._jobs_by_id[job_id] + if not job.keep_in_memory(): + self._jobs_by_id.pop(job_id) + + def remove_job(self, job_id): + """Delete job and eventually stop it.""" + job = self._jobs_by_id.get(job_id) + if job is None: + return + + job.set_deleted() + self._jobs_by_id.pop(job.id) + + def get_job_status(self, job_id): + """Job's status based on id.""" + job = self._jobs_by_id.get(job_id) + if job is None: + return {} + return job.status() diff --git a/openpype/modules/default_modules/job_queue/job_server/server.py b/openpype/modules/default_modules/job_queue/job_server/server.py new file mode 100644 index 0000000000..cc0968b6b6 --- /dev/null +++ b/openpype/modules/default_modules/job_queue/job_server/server.py @@ -0,0 +1,154 @@ +import threading +import asyncio +import logging + +from aiohttp import web + +from .jobs import JobQueue +from .job_queue_route import JobQueueResource +from .workers_rpc_route import WorkerRpc + +log = logging.getLogger(__name__) + + +class WebServerManager: + """Manger that care about web server thread.""" + def __init__(self, port, host, loop=None): + self.port = port + self.host = host + self.app = web.Application() + if loop is None: + loop = asyncio.new_event_loop() + + # add route with multiple methods for single "external app" + self.webserver_thread = WebServerThread(self, loop) + + @property + def url(self): + return "http://{}:{}".format(self.host, self.port) + + def add_route(self, *args, **kwargs): + self.app.router.add_route(*args, **kwargs) + + def add_static(self, *args, **kwargs): + self.app.router.add_static(*args, **kwargs) + + def start_server(self): + if self.webserver_thread and not self.webserver_thread.is_alive(): + self.webserver_thread.start() + + def stop_server(self): + if not self.is_running: + return + + try: + log.debug("Stopping Web server") + self.webserver_thread.stop() + + except Exception as exc: + print("Errored", str(exc)) + log.warning( + "Error has happened during Killing Web server", + exc_info=True + ) + + @property + def is_running(self): + if self.webserver_thread is not None: + return self.webserver_thread.is_running + return False + + +class WebServerThread(threading.Thread): + """ Listener for requests in thread.""" + def __init__(self, manager, loop): + super(WebServerThread, self).__init__() + + self._is_running = False + self._stopped = False + self.manager = manager + self.loop = loop + self.runner = None + self.site = None + + job_queue = JobQueue() + self.job_queue_route = JobQueueResource(job_queue, manager) + self.workers_route = WorkerRpc(job_queue, manager, loop=loop) + + @property + def port(self): + return self.manager.port + + @property + def host(self): + return self.manager.host + + @property + def stopped(self): + return self._stopped + + @property + def is_running(self): + return self._is_running + + def run(self): + self._is_running = True + + try: + log.info("Starting WebServer server") + asyncio.set_event_loop(self.loop) + self.loop.run_until_complete(self.start_server()) + + asyncio.ensure_future(self.check_shutdown(), loop=self.loop) + self.loop.run_forever() + + except Exception: + log.warning( + "Web Server service has failed", exc_info=True + ) + finally: + self.loop.close() + + self._is_running = False + log.info("Web server stopped") + + async def start_server(self): + """ Starts runner and TCPsite """ + self.runner = web.AppRunner(self.manager.app) + await self.runner.setup() + self.site = web.TCPSite(self.runner, self.host, self.port) + await self.site.start() + + def stop(self): + """Sets _stopped flag to True, 'check_shutdown' shuts server down""" + self._stopped = True + + async def check_shutdown(self): + """ Future that is running and checks if server should be running + periodically. + """ + while not self._stopped: + await asyncio.sleep(0.5) + + print("Starting shutdown") + if self.workers_route: + await self.workers_route.stop() + + print("Stopping site") + await self.site.stop() + print("Site stopped") + await self.runner.cleanup() + + print("Runner stopped") + tasks = [ + task + for task in asyncio.all_tasks() + if task is not asyncio.current_task() + ] + list(map(lambda task: task.cancel(), tasks)) # cancel all the tasks + results = await asyncio.gather(*tasks, return_exceptions=True) + log.debug(f'Finished awaiting cancelled tasks, results: {results}...') + await self.loop.shutdown_asyncgens() + # to really make sure everything else has time to stop + await asyncio.sleep(0.07) + self.loop.stop() diff --git a/openpype/modules/default_modules/job_queue/job_server/utils.py b/openpype/modules/default_modules/job_queue/job_server/utils.py new file mode 100644 index 0000000000..127ca5f090 --- /dev/null +++ b/openpype/modules/default_modules/job_queue/job_server/utils.py @@ -0,0 +1,51 @@ +import sys +import signal +import time +import socket + +from .server import WebServerManager + + +class SharedObjects: + stopped = False + + @classmethod + def stop(cls): + cls.stopped = True + + +def main(port=None, host=None): + def signal_handler(sig, frame): + print("Signal to kill process received. Termination starts.") + SharedObjects.stop() + + signal.signal(signal.SIGINT, signal_handler) + signal.signal(signal.SIGTERM, signal_handler) + + port = int(port or 8079) + host = str(host or "localhost") + + with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as con: + result_of_check = con.connect_ex((host, port)) + + if result_of_check == 0: + print(( + "Server {}:{} is already running or address is occupied." + ).format(host, port)) + return 1 + + print("Running server {}:{}".format(host, port)) + manager = WebServerManager(port, host) + manager.start_server() + + stopped = False + while manager.is_running: + if not stopped and SharedObjects.stopped: + stopped = True + manager.stop_server() + time.sleep(0.1) + return 0 + + +if __name__ == "__main__": + sys.exit(main()) diff --git a/openpype/modules/default_modules/job_queue/job_server/workers.py b/openpype/modules/default_modules/job_queue/job_server/workers.py new file mode 100644 index 0000000000..28ca649c03 --- /dev/null +++ b/openpype/modules/default_modules/job_queue/job_server/workers.py @@ -0,0 +1,122 @@ +import asyncio +from uuid import uuid4 +from aiohttp import WSCloseCode +from aiohttp_json_rpc.protocol import encode_request + + +class WorkerState: + IDLE = object() + JOB_ASSIGNED = object() + JOB_SENT = object() + + +class Worker: + """Worker that can handle jobs of specific host.""" + def __init__(self, host_name, http_request): + self._id = None + self.host_name = host_name + self._http_request = http_request + self._state = WorkerState.IDLE + self._job = None + + # Give ability to send requests to worker + http_request.request_id = str(uuid4()) + http_request.pending_requests = {} + + async def send_job(self): + if self._job is not None: + data = { + "job_id": self._job.id, + "worker_id": self.id, + "data": self._job.data + } + return await self.call("start_job", data) + return False + + async def call(self, method, params=None, timeout=None): + """Call method on worker's side.""" + request_id = self._http_request.request_id + self._http_request.request_id = str(uuid4()) + pending_requests = self._http_request.pending_requests + pending_requests[request_id] = asyncio.Future() + + request = encode_request(method, id=request_id, params=params) + + await self._http_request.ws.send_str(request) + + if timeout: + await asyncio.wait_for( + pending_requests[request_id], + timeout=timeout + ) + + else: + await pending_requests[request_id] + + result = pending_requests[request_id].result() + del pending_requests[request_id] + + return result + + async def close(self): + return await self.ws.close( + code=WSCloseCode.GOING_AWAY, + message="Server shutdown" + ) + + @property + def id(self): + if self._id is None: + self._id = str(uuid4()) + return self._id + + @property + def state(self): + return self._state + + @property + def current_job(self): + return self._job + + @property + def http_request(self): + return self._http_request + + @property + def ws(self): + return self.http_request.ws + + def connection_is_alive(self): + if self.ws.closed or self.ws._writer.transport.is_closing(): + return False + return True + + def is_idle(self): + return self._state is WorkerState.IDLE + + def job_assigned(self): + return ( + self._state is WorkerState.JOB_ASSIGNED + or self._state is WorkerState.JOB_SENT + ) + + def is_working(self): + return self._state is WorkerState.JOB_SENT + + def set_current_job(self, job): + if job is self._job: + return + + self._job = job + if job is None: + self._set_idle() + else: + self._state = WorkerState.JOB_ASSIGNED + job.set_worker(self) + + def _set_idle(self): + self._job = None + self._state = WorkerState.IDLE + + def set_working(self): + self._state = WorkerState.JOB_SENT diff --git a/openpype/modules/default_modules/job_queue/job_server/workers_rpc_route.py b/openpype/modules/default_modules/job_queue/job_server/workers_rpc_route.py new file mode 100644 index 0000000000..0800ca0d4d --- /dev/null +++ b/openpype/modules/default_modules/job_queue/job_server/workers_rpc_route.py @@ -0,0 +1,124 @@ +import asyncio + +import aiohttp +from aiohttp_json_rpc import JsonRpc +from aiohttp_json_rpc.protocol import ( + encode_error, decode_msg, JsonRpcMsgTyp +) +from aiohttp_json_rpc.exceptions import RpcError +from .workers import Worker + + +class WorkerRpc(JsonRpc): + def __init__(self, job_queue, manager, **kwargs): + super().__init__(**kwargs) + + self._job_queue = job_queue + self._manager = manager + + self._stopped = False + + # Register methods + self.add_methods( + ("", self.register_worker), + ("", self.job_done) + ) + asyncio.ensure_future(self._rpc_loop(), loop=self.loop) + + self._manager.add_route( + "*", "/ws", self.handle_request + ) + + # Panel routes for tools + async def register_worker(self, request, host_name): + worker = Worker(host_name, request.http_request) + self._job_queue.add_worker(worker) + return worker.id + + async def _rpc_loop(self): + while self.loop.is_running(): + if self._stopped: + break + + for worker in tuple(self._job_queue.workers()): + if not worker.connection_is_alive(): + self._job_queue.remove_worker(worker) + self._job_queue.assign_jobs() + + await self.send_jobs() + await asyncio.sleep(5) + + async def job_done(self, worker_id, job_id, success, message, data): + worker = self._job_queue.get_worker(worker_id) + if worker is not None: + worker.set_current_job(None) + + job = self._job_queue.get_job(job_id) + if job is not None: + job.set_done(success, message, data) + return True + + async def send_jobs(self): + invalid_workers = [] + for worker in self._job_queue.workers(): + if worker.job_assigned() and not worker.is_working(): + try: + await worker.send_job() + + except ConnectionResetError: + invalid_workers.append(worker) + + for worker in invalid_workers: + self._job_queue.remove_worker(worker) + + async def handle_websocket_request(self, http_request): + """Overide this method to catch CLOSING messages.""" + http_request.msg_id = 0 + http_request.pending = {} + + # prepare and register websocket + ws = aiohttp.web_ws.WebSocketResponse() + await ws.prepare(http_request) + http_request.ws = ws + self.clients.append(http_request) + + while not ws.closed: + self.logger.debug('waiting for messages') + raw_msg = await ws.receive() + + if raw_msg.type == aiohttp.WSMsgType.TEXT: + self.logger.debug('raw msg received: %s', raw_msg.data) + self.loop.create_task( + self._handle_rpc_msg(http_request, raw_msg) + ) + + elif raw_msg.type == aiohttp.WSMsgType.CLOSING: + break + + self.clients.remove(http_request) + return ws + + async def _handle_rpc_msg(self, http_request, raw_msg): + # This is duplicated code from super but there is no way how to do it + # to be able handle server->client requests + try: + _raw_message = raw_msg.data + msg = decode_msg(_raw_message) + + except RpcError as error: + await self._ws_send_str(http_request, encode_error(error)) + return + + if msg.type in (JsonRpcMsgTyp.RESULT, JsonRpcMsgTyp.ERROR): + request_id = msg.data["id"] + if request_id in http_request.pending_requests: + future = http_request.pending_requests[request_id] + future.set_result(msg.data["result"]) + return + + return await super()._handle_rpc_msg(http_request, raw_msg) + + async def stop(self): + self._stopped = True + for worker in tuple(self._job_queue.workers()): + await worker.close() diff --git a/openpype/modules/default_modules/job_queue/job_workers/__init__.py b/openpype/modules/default_modules/job_queue/job_workers/__init__.py new file mode 100644 index 0000000000..f771797aea --- /dev/null +++ b/openpype/modules/default_modules/job_queue/job_workers/__init__.py @@ -0,0 +1,5 @@ +from .base_worker import WorkerJobsConnection + +__all__ = ( + "WorkerJobsConnection", +) diff --git a/openpype/modules/default_modules/job_queue/job_workers/base_worker.py b/openpype/modules/default_modules/job_queue/job_workers/base_worker.py new file mode 100644 index 0000000000..85506565f4 --- /dev/null +++ b/openpype/modules/default_modules/job_queue/job_workers/base_worker.py @@ -0,0 +1,190 @@ +import sys +import datetime +import asyncio +import traceback + +from aiohttp_json_rpc import JsonRpcClient + + +class WorkerClient(JsonRpcClient): + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + + self.add_methods( + ("", self.start_job), + ) + self.current_job = None + self._id = None + + def set_id(self, worker_id): + self._id = worker_id + + async def start_job(self, job_data): + if self.current_job is not None: + return False + + print("Got new job {}".format(str(job_data))) + self.current_job = job_data + return True + + def finish_job(self, success, message, data): + asyncio.ensure_future( + self._finish_job(success, message, data), + loop=self._loop + ) + + async def _finish_job(self, success, message, data): + print("Current job", self.current_job) + job_id = self.current_job["job_id"] + self.current_job = None + + return await self.call( + "job_done", [self._id, job_id, success, message, data] + ) + + +class WorkerJobsConnection: + """WS connection to Job server. + + Helper class to create a connection to process jobs from job server. + + To be able receive jobs is needed to create a connection and then register + as worker for specific host. + """ + retry_time_seconds = 5 + + def __init__(self, server_url, host_name, loop=None): + self.client = None + self._loop = loop + + self._host_name = host_name + self._server_url = server_url + + self._is_running = False + self._connecting = False + self._connected = False + self._stopped = False + + def stop(self): + print("Stopping worker") + self._stopped = True + + @property + def is_running(self): + return self._is_running + + @property + def current_job(self): + if self.client is not None: + return self.client.current_job + return None + + def finish_job(self, success=True, message=None, data=None): + """Worker finished job and sets the result which is send to server.""" + if self.client is None: + print(( + "Couldn't sent job status to server because" + " client is not connected." + )) + else: + self.client.finish_job(success, message, data) + + async def main_loop(self, register_worker=True): + """Main loop of connection which keep connection to server alive.""" + self._is_running = True + + while not self._stopped: + start_time = datetime.datetime.now() + await self._connection_loop(register_worker) + delta = datetime.datetime.now() - start_time + print("Connection loop took {}s".format(str(delta))) + # Check if was stopped and stop while loop in that case + if self._stopped: + break + + if delta.seconds < 60: + print(( + "Can't connect to server will try in {} seconds." + ).format(self.retry_time_seconds)) + + await asyncio.sleep(self.retry_time_seconds) + self._is_running = False + + async def _connect(self): + self.client = WorkerClient() + print("Connecting to {}".format(self._server_url)) + try: + await self.client.connect_url(self._server_url) + except KeyboardInterrupt: + raise + except Exception: + traceback.print_exception(*sys.exc_info()) + + async def _connection_loop(self, register_worker): + self._connecting = True + future = asyncio.run_coroutine_threadsafe( + self._connect(), loop=self._loop + ) + + while self._connecting: + if not future.done(): + await asyncio.sleep(0.07) + continue + + session = getattr(self.client, "_session", None) + ws = getattr(self.client, "_ws", None) + if session is not None: + if session.closed: + self._connecting = False + self._connected = False + break + + elif ws is not None: + self._connecting = False + self._connected = True + + if self._stopped: + break + + await asyncio.sleep(0.07) + + if not self._connected: + self.client = None + return + + print("Connected to job queue server") + if register_worker: + self.register_as_worker() + + while self._connected and self._loop.is_running(): + if self._stopped or ws.closed: + break + + await asyncio.sleep(0.3) + + await self._stop_cleanup() + + def register_as_worker(self): + """Register as worker ready to work on server side.""" + asyncio.ensure_future(self._register_as_worker(), loop=self._loop) + + async def _register_as_worker(self): + worker_id = await self.client.call( + "register_worker", [self._host_name] + ) + self.client.set_id(worker_id) + print( + "Registered as worker with id {}".format(worker_id) + ) + + async def disconnect(self): + await self._stop_cleanup() + + async def _stop_cleanup(self): + print("Cleanup after stop") + if self.client is not None and hasattr(self.client, "_ws"): + await self.client.disconnect() + + self.client = None + self._connecting = False + self._connected = False diff --git a/openpype/modules/default_modules/job_queue/module.py b/openpype/modules/default_modules/job_queue/module.py new file mode 100644 index 0000000000..719d7c8f38 --- /dev/null +++ b/openpype/modules/default_modules/job_queue/module.py @@ -0,0 +1,241 @@ +"""Job queue OpenPype module was created for remote execution of commands. + +## Why is needed +Primarily created for hosts which are not easilly controlled from command line +or in headless mode and is easier to keep one process of host running listening +for jobs to do. + +### Example +One of examples is TVPaint which does not have headless mode, can run only one +process at one time and it's impossible to know what should be executed inside +TVPaint before we know all data about the file that should be processed. + +## Idea +Idea is that there is a server, workers and workstation/s which need to process +something on a worker. + +Workers and workstation/s must have access to server through adress to it's +running instance. Workers use WebSockets and workstations are using HTTP calls. +Also both of them must have access to job queue root which is set in +settings. Root is used as temp where files needed for job can be stored before +sending the job or where result files are stored when job is done. + +Server's address must be set in settings when is running so workers and +workstations know where to send or receive jobs. + +## Command line commands +### start_server +- start server which is handles jobs +- it is possible to specify port and host address (default is localhost:8079) + +### start_worker +- start worker which will process jobs +- has required possitional argument which is application name from OpenPype + settings e.g. 'tvpaint/11-5' ('tvpaint' is group '11-5' is variant) +- it is possible to specify server url but url from settings is used when not + passed (this is added mainly for developing purposes) +""" + +import sys +import json +import copy +import platform + +import click +from openpype.modules import OpenPypeModule +from openpype.api import get_system_settings + + +class JobQueueModule(OpenPypeModule): + name = "job_queue" + + def initialize(self, modules_settings): + server_url = modules_settings.get("server_url") or "" + + self._server_url = self.url_conversion(server_url) + jobs_root_mapping = self._roots_mapping_conversion( + modules_settings.get("jobs_root") + ) + + self._jobs_root_mapping = jobs_root_mapping + + # Is always enabled + # - the module does nothing until is used + self.enabled = True + + @classmethod + def _root_conversion(cls, root_path): + """Make sure root path does not end with slash.""" + # Return empty string if path is invalid + if not root_path: + return "" + + # Remove all slashes + while root_path.endswith("/") or root_path.endswith("\\"): + root_path = root_path[:-1] + return root_path + + @classmethod + def _roots_mapping_conversion(cls, roots_mapping): + roots_mapping = roots_mapping or {} + for platform_name in ("windows", "linux", "darwin"): + roots_mapping[platform_name] = cls._root_conversion( + roots_mapping.get(platform_name) + ) + return roots_mapping + + @staticmethod + def url_conversion(url, ws=False): + if sys.version_info[0] == 2: + from urlparse import urlsplit, urlunsplit + else: + from urllib.parse import urlsplit, urlunsplit + + if not url: + return url + + url_parts = list(urlsplit(url)) + scheme = url_parts[0] + if not scheme: + if ws: + url = "ws://{}".format(url) + else: + url = "http://{}".format(url) + url_parts = list(urlsplit(url)) + + elif ws: + if scheme not in ("ws", "wss"): + if scheme == "https": + url_parts[0] = "wss" + else: + url_parts[0] = "ws" + + elif scheme not in ("http", "https"): + if scheme == "wss": + url_parts[0] = "https" + else: + url_parts[0] = "http" + + return urlunsplit(url_parts) + + def get_jobs_root_mapping(self): + return copy.deepcopy(self._jobs_root_mapping) + + def get_jobs_root(self): + return self._jobs_root_mapping.get(platform.system().lower()) + + @classmethod + def get_jobs_root_from_settings(cls): + module_settings = get_system_settings()["modules"] + jobs_root_mapping = module_settings.get(cls.name, {}).get("jobs_root") + converted_mapping = cls._roots_mapping_conversion(jobs_root_mapping) + + return converted_mapping[platform.system().lower()] + + @property + def server_url(self): + return self._server_url + + def send_job(self, host_name, job_data): + import requests + + job_data = job_data or {} + job_data["host_name"] = host_name + api_path = "{}/api/jobs".format(self._server_url) + post_request = requests.post(api_path, data=json.dumps(job_data)) + return str(post_request.content.decode()) + + def get_job_status(self, job_id): + import requests + + api_path = "{}/api/jobs/{}".format(self._server_url, job_id) + return requests.get(api_path).json() + + def cli(self, click_group): + click_group.add_command(cli_main) + + @classmethod + def get_server_url_from_settings(cls): + module_settings = get_system_settings()["modules"] + return cls.url_conversion( + module_settings + .get(cls.name, {}) + .get("server_url") + ) + + @classmethod + def start_server(cls, port=None, host=None): + from .job_server import main + + return main(port, host) + + @classmethod + def start_worker(cls, app_name, server_url=None): + import requests + from openpype.lib import ApplicationManager + + if not server_url: + server_url = cls.get_server_url_from_settings() + + if not server_url: + raise ValueError("Server url is not set.") + + http_server_url = cls.url_conversion(server_url) + + # Validate url + requests.get(http_server_url) + + ws_server_url = cls.url_conversion(server_url) + "/ws" + + app_manager = ApplicationManager() + app = app_manager.applications.get(app_name) + if app is None: + raise ValueError( + "Didn't find application \"{}\" in settings.".format(app_name) + ) + + if app.host_name == "tvpaint": + return cls._start_tvpaint_worker(app, ws_server_url) + raise ValueError("Unknown host \"{}\"".format(app.host_name)) + + @classmethod + def _start_tvpaint_worker(cls, app, server_url): + from openpype.hosts.tvpaint.worker import main + + executable = app.find_executable() + if not executable: + raise ValueError(( + "Executable for app \"{}\" is not set" + " or accessible on this workstation." + ).format(app.full_name)) + + return main(str(executable), server_url) + + +@click.group( + JobQueueModule.name, + help="Application job server. Can be used as render farm." +) +def cli_main(): + pass + + +@cli_main.command( + "start_server", + help="Start server handling workers and their jobs." +) +@click.option("--port", help="Server port") +@click.option("--host", help="Server host (ip address)") +def cli_start_server(port, host): + JobQueueModule.start_server(port, host) + + +@cli_main.command( + "start_worker", help=( + "Start a worker for a specific application. (e.g. \"tvpaint/11.5\")" + ) +) +@click.argument("app_name") +@click.option("--server_url", help="Server url which handle workers and jobs.") +def cli_start_worker(app_name, server_url): + JobQueueModule.start_worker(app_name, server_url) diff --git a/openpype/modules/default_modules/python_console_interpreter/window/widgets.py b/openpype/modules/default_modules/python_console_interpreter/window/widgets.py index 0e8dd2fb9b..ecf41eaf3e 100644 --- a/openpype/modules/default_modules/python_console_interpreter/window/widgets.py +++ b/openpype/modules/default_modules/python_console_interpreter/window/widgets.py @@ -176,6 +176,7 @@ class PythonCodeEditor(QtWidgets.QPlainTextEdit): class PythonTabWidget(QtWidgets.QWidget): + add_tab_requested = QtCore.Signal() before_execute = QtCore.Signal(str) def __init__(self, parent): @@ -185,11 +186,15 @@ class PythonTabWidget(QtWidgets.QWidget): self.setFocusProxy(code_input) + add_tab_btn = QtWidgets.QPushButton("Add tab...", self) + add_tab_btn.setToolTip("Add new tab") + execute_btn = QtWidgets.QPushButton("Execute", self) execute_btn.setToolTip("Execute command (Ctrl + Enter)") btns_layout = QtWidgets.QHBoxLayout() btns_layout.setContentsMargins(0, 0, 0, 0) + btns_layout.addWidget(add_tab_btn) btns_layout.addStretch(1) btns_layout.addWidget(execute_btn) @@ -198,12 +203,16 @@ class PythonTabWidget(QtWidgets.QWidget): layout.addWidget(code_input, 1) layout.addLayout(btns_layout, 0) + add_tab_btn.clicked.connect(self._on_add_tab_clicked) execute_btn.clicked.connect(self._on_execute_clicked) code_input.execute_requested.connect(self.execute) self._code_input = code_input self._interpreter = InteractiveInterpreter() + def _on_add_tab_clicked(self): + self.add_tab_requested.emit() + def _on_execute_clicked(self): self.execute() @@ -352,9 +361,6 @@ class PythonInterpreterWidget(QtWidgets.QWidget): tab_widget.setTabsClosable(False) tab_widget.setContextMenuPolicy(QtCore.Qt.CustomContextMenu) - add_tab_btn = QtWidgets.QPushButton("+", tab_widget) - tab_widget.setCornerWidget(add_tab_btn, QtCore.Qt.TopLeftCorner) - widgets_splitter = QtWidgets.QSplitter(self) widgets_splitter.setOrientation(QtCore.Qt.Vertical) widgets_splitter.addWidget(output_widget) @@ -371,14 +377,12 @@ class PythonInterpreterWidget(QtWidgets.QWidget): line_check_timer.setInterval(200) line_check_timer.timeout.connect(self._on_timer_timeout) - add_tab_btn.clicked.connect(self._on_add_clicked) tab_bar.right_clicked.connect(self._on_tab_right_click) tab_bar.double_clicked.connect(self._on_tab_double_click) tab_bar.mid_clicked.connect(self._on_tab_mid_click) tab_widget.tabCloseRequested.connect(self._on_tab_close_req) self._widgets_splitter = widgets_splitter - self._add_tab_btn = add_tab_btn self._output_widget = output_widget self._tab_widget = tab_widget self._line_check_timer = line_check_timer @@ -459,14 +463,41 @@ class PythonInterpreterWidget(QtWidgets.QWidget): return menu = QtWidgets.QMenu(self._tab_widget) - menu.addAction("Rename") + + add_tab_action = QtWidgets.QAction("Add tab...", menu) + add_tab_action.setToolTip("Add new tab") + + rename_tab_action = QtWidgets.QAction("Rename...", menu) + rename_tab_action.setToolTip("Rename tab") + + duplicate_tab_action = QtWidgets.QAction("Duplicate...", menu) + duplicate_tab_action.setToolTip("Duplicate code to new tab") + + close_tab_action = QtWidgets.QAction("Close", menu) + close_tab_action.setToolTip("Close tab and lose content") + close_tab_action.setEnabled(self._tab_widget.tabsClosable()) + + menu.addAction(add_tab_action) + menu.addAction(rename_tab_action) + menu.addAction(duplicate_tab_action) + menu.addAction(close_tab_action) + result = menu.exec_(global_point) if result is None: return - if result.text() == "Rename": + if result is rename_tab_action: self._rename_tab_req(tab_idx) + elif result is add_tab_action: + self._on_add_requested() + + elif result is duplicate_tab_action: + self._duplicate_requested(tab_idx) + + elif result is close_tab_action: + self._on_tab_close_req(tab_idx) + def _rename_tab_req(self, tab_idx): dialog = TabNameDialog(self) dialog.set_tab_name(self._tab_widget.tabText(tab_idx)) @@ -475,6 +506,16 @@ class PythonInterpreterWidget(QtWidgets.QWidget): if tab_name: self._tab_widget.setTabText(tab_idx, tab_name) + def _duplicate_requested(self, tab_idx=None): + if tab_idx is None: + tab_idx = self._tab_widget.currentIndex() + + src_widget = self._tab_widget.widget(tab_idx) + dst_widget = self._add_tab() + if dst_widget is None: + return + dst_widget.set_code(src_widget.get_code()) + def _on_tab_mid_click(self, global_point): point = self._tab_widget.mapFromGlobal(global_point) tab_bar = self._tab_widget.tabBar() @@ -525,12 +566,17 @@ class PythonInterpreterWidget(QtWidgets.QWidget): lines.append(self.ansi_escape.sub("", line)) self._append_lines(lines) - def _on_add_clicked(self): + def _on_add_requested(self): + self._add_tab() + + def _add_tab(self): dialog = TabNameDialog(self) dialog.exec_() tab_name = dialog.result() if tab_name: - self.add_tab(tab_name) + return self.add_tab(tab_name) + + return None def _on_before_execute(self, code_text): at_max = self._output_widget.vertical_scroll_at_max() @@ -562,6 +608,7 @@ class PythonInterpreterWidget(QtWidgets.QWidget): def add_tab(self, tab_name, index=None): widget = PythonTabWidget(self) widget.before_execute.connect(self._on_before_execute) + widget.add_tab_requested.connect(self._on_add_requested) if index is None: if self._tab_widget.count() > 0: index = self._tab_widget.currentIndex() + 1 diff --git a/openpype/modules/default_modules/royal_render/__init__.py b/openpype/modules/default_modules/royal_render/__init__.py new file mode 100644 index 0000000000..cc92e3b50d --- /dev/null +++ b/openpype/modules/default_modules/royal_render/__init__.py @@ -0,0 +1,6 @@ +from .royal_render_module import RoyalRenderModule + + +__all__ = ( + "RoyalRenderModule", +) diff --git a/openpype/modules/default_modules/royal_render/api.py b/openpype/modules/default_modules/royal_render/api.py new file mode 100644 index 0000000000..ed9e71f240 --- /dev/null +++ b/openpype/modules/default_modules/royal_render/api.py @@ -0,0 +1,199 @@ +# -*- coding: utf-8 -*- +"""Wrapper around Royal Render API.""" +import sys +import os + +from openpype.settings import get_project_settings +from openpype.lib.local_settings import OpenPypeSettingsRegistry +from openpype.lib import PypeLogger, run_subprocess +from .rr_job import RRJob, SubmitFile, SubmitterParameter + + +log = PypeLogger.get_logger("RoyalRender") + + +class Api: + + _settings = None + RR_SUBMIT_CONSOLE = 1 + RR_SUBMIT_API = 2 + + def __init__(self, settings, project=None): + self._settings = settings + self._initialize_rr(project) + + def _initialize_rr(self, project=None): + # type: (str) -> None + """Initialize RR Path. + + Args: + project (str, Optional): Project name to set RR api in + context. + + """ + if project: + project_settings = get_project_settings(project) + rr_path = ( + project_settings + ["royalrender"] + ["rr_paths"] + ) + else: + rr_path = ( + self._settings + ["modules"] + ["royalrender"] + ["rr_path"] + ["default"] + ) + os.environ["RR_ROOT"] = rr_path + self._rr_path = rr_path + + def _get_rr_bin_path(self, rr_root=None): + # type: (str) -> str + """Get path to RR bin folder.""" + rr_root = rr_root or self._rr_path + is_64bit_python = sys.maxsize > 2 ** 32 + + rr_bin_path = "" + if sys.platform.lower() == "win32": + rr_bin_path = "/bin/win64" + if not is_64bit_python: + # we are using 64bit python + rr_bin_path = "/bin/win" + rr_bin_path = rr_bin_path.replace( + "/", os.path.sep + ) + + if sys.platform.lower() == "darwin": + rr_bin_path = "/bin/mac64" + if not is_64bit_python: + rr_bin_path = "/bin/mac" + + if sys.platform.lower() == "linux": + rr_bin_path = "/bin/lx64" + + return os.path.join(rr_root, rr_bin_path) + + def _initialize_module_path(self): + # type: () -> None + """Set RR modules for Python.""" + # default for linux + rr_bin = self._get_rr_bin_path() + rr_module_path = os.path.join(rr_bin, "lx64/lib") + + if sys.platform.lower() == "win32": + rr_module_path = rr_bin + rr_module_path = rr_module_path.replace( + "/", os.path.sep + ) + + if sys.platform.lower() == "darwin": + rr_module_path = os.path.join(rr_bin, "lib/python/27") + + sys.path.append(os.path.join(self._rr_path, rr_module_path)) + + def create_submission(self, jobs, submitter_attributes, file_name=None): + # type: (list[RRJob], list[SubmitterParameter], str) -> SubmitFile + """Create jobs submission file. + + Args: + jobs (list): List of :class:`RRJob` + submitter_attributes (list): List of submitter attributes + :class:`SubmitterParameter` for whole submission batch. + file_name (str), optional): File path to write data to. + + Returns: + str: XML data of job submission files. + + """ + raise NotImplementedError + + def submit_file(self, file, mode=RR_SUBMIT_CONSOLE): + # type: (SubmitFile, int) -> None + if mode == self.RR_SUBMIT_CONSOLE: + self._submit_using_console(file) + + # RR v7 supports only Python 2.7 so we bail out in fear + # until there is support for Python 3 😰 + raise NotImplementedError( + "Submission via RoyalRender API is not supported yet") + # self._submit_using_api(file) + + def _submit_using_console(self, file): + # type: (SubmitFile) -> bool + rr_console = os.path.join( + self._get_rr_bin_path(), + "rrSubmitterconsole" + ) + + if sys.platform.lower() == "darwin": + if "/bin/mac64" in rr_console: + rr_console = rr_console.replace("/bin/mac64", "/bin/mac") + + if sys.platform.lower() == "win32": + if "/bin/win64" in rr_console: + rr_console = rr_console.replace("/bin/win64", "/bin/win") + rr_console += ".exe" + + args = [rr_console, file] + run_subprocess(" ".join(args), logger=log) + + def _submit_using_api(self, file): + # type: (SubmitFile) -> None + """Use RR API to submit jobs. + + Args: + file (SubmitFile): Submit jobs definition. + + Throws: + RoyalRenderException: When something fails. + + """ + self._initialize_module_path() + import libpyRR2 as rrLib # noqa + from rrJob import getClass_JobBasics # noqa + import libpyRR2 as _RenderAppBasic # noqa + + tcp = rrLib._rrTCP("") # noqa + rr_server = tcp.getRRServer() + + if len(rr_server) == 0: + log.info("Got RR IP address {}".format(rr_server)) + + # TODO: Port is hardcoded in RR? If not, move it to Settings + if not tcp.setServer(rr_server, 7773): + log.error( + "Can not set RR server: {}".format(tcp.errorMessage())) + raise RoyalRenderException(tcp.errorMessage()) + + # TODO: This need UI and better handling of username/password. + # We can't store password in keychain as it is pulled multiple + # times and users on linux must enter keychain password every time. + # Probably best way until we setup our own user management would be + # to encrypt password and save it to json locally. Not bulletproof + # but at least it is not stored in plaintext. + reg = OpenPypeSettingsRegistry() + try: + rr_user = reg.get_item("rr_username") + rr_password = reg.get_item("rr_password") + except ValueError: + # user has no rr credentials set + pass + else: + # login to RR + tcp.setLogin(rr_user, rr_password) + + job = getClass_JobBasics() + renderer = _RenderAppBasic() + + # iterate over SubmitFile, set _JobBasic (job) and renderer + # and feed it to jobSubmitNew() + # not implemented yet + job.renderer = renderer + tcp.jobSubmitNew(job) + + +class RoyalRenderException(Exception): + """Exception used in various error states coming from RR.""" + pass diff --git a/openpype/modules/default_modules/royal_render/plugins/publish/collect_default_rr_path.py b/openpype/modules/default_modules/royal_render/plugins/publish/collect_default_rr_path.py new file mode 100644 index 0000000000..cdca03bef0 --- /dev/null +++ b/openpype/modules/default_modules/royal_render/plugins/publish/collect_default_rr_path.py @@ -0,0 +1,23 @@ +# -*- coding: utf-8 -*- +"""Collect default Deadline server.""" +import pyblish.api + + +class CollectDefaultRRPath(pyblish.api.ContextPlugin): + """Collect default Royal Render path.""" + + order = pyblish.api.CollectorOrder + 0.01 + label = "Default Royal Render Path" + + def process(self, context): + try: + rr_module = context.data.get( + "openPypeModules")["royalrender"] + except AttributeError: + msg = "Cannot get OpenPype Royal Render module." + self.log.error(msg) + raise AssertionError(msg) + + # get default deadline webservice url from deadline module + self.log.debug(rr_module.rr_paths) + context.data["defaultRRPath"] = rr_module.rr_paths["default"] # noqa: E501 diff --git a/openpype/modules/default_modules/royal_render/plugins/publish/collect_rr_path_from_instance.py b/openpype/modules/default_modules/royal_render/plugins/publish/collect_rr_path_from_instance.py new file mode 100644 index 0000000000..fb27a76d11 --- /dev/null +++ b/openpype/modules/default_modules/royal_render/plugins/publish/collect_rr_path_from_instance.py @@ -0,0 +1,49 @@ +# -*- coding: utf-8 -*- +import pyblish.api + + +class CollectRRPathFromInstance(pyblish.api.InstancePlugin): + """Collect RR Path from instance.""" + + order = pyblish.api.CollectorOrder + label = "Royal Render Path from the Instance" + families = ["rendering"] + + def process(self, instance): + instance.data["rrPath"] = self._collect_rr_path(instance) + self.log.info( + "Using {} for submission.".format(instance.data["rrPath"])) + + @staticmethod + def _collect_rr_path(render_instance): + # type: (pyblish.api.Instance) -> str + """Get Royal Render path from render instance.""" + rr_settings = ( + render_instance.context.data + ["system_settings"] + ["modules"] + ["royalrender"] + ) + try: + default_servers = rr_settings["rr_paths"] + project_servers = ( + render_instance.context.data + ["project_settings"] + ["royalrender"] + ["rr_paths"] + ) + rr_servers = { + k: default_servers[k] + for k in project_servers + if k in default_servers + } + + except AttributeError: + # Handle situation were we had only one url for deadline. + return render_instance.context.data["defaultRRPath"] + + return rr_servers[ + list(rr_servers.keys())[ + int(render_instance.data.get("rrPaths")) + ] + ] diff --git a/openpype/modules/default_modules/royal_render/plugins/publish/collect_sequences_from_job.py b/openpype/modules/default_modules/royal_render/plugins/publish/collect_sequences_from_job.py new file mode 100644 index 0000000000..2505d671af --- /dev/null +++ b/openpype/modules/default_modules/royal_render/plugins/publish/collect_sequences_from_job.py @@ -0,0 +1,209 @@ +# -*- coding: utf-8 -*- +"""Collect sequences from Royal Render Job.""" +import os +import re +import copy +import json +from pprint import pformat + +import pyblish.api +from avalon import api + + +def collect(root, + regex=None, + exclude_regex=None, + frame_start=None, + frame_end=None): + """Collect sequence collections in root""" + + import clique + + files = [] + for filename in os.listdir(root): + + # Must have extension + ext = os.path.splitext(filename)[1] + if not ext: + continue + + # Only files + if not os.path.isfile(os.path.join(root, filename)): + continue + + # Include and exclude regex + if regex and not re.search(regex, filename): + continue + if exclude_regex and re.search(exclude_regex, filename): + continue + + files.append(filename) + + # Match collections + # Support filenames like: projectX_shot01_0010.tiff with this regex + pattern = r"(?P(?P0*)\d+)\.\D+\d?$" + collections, remainder = clique.assemble(files, + patterns=[pattern], + minimum_items=1) + + # Ignore any remainders + if remainder: + print("Skipping remainder {}".format(remainder)) + + # Exclude any frames outside start and end frame. + for collection in collections: + for index in list(collection.indexes): + if frame_start is not None and index < frame_start: + collection.indexes.discard(index) + continue + if frame_end is not None and index > frame_end: + collection.indexes.discard(index) + continue + + # Keep only collections that have at least a single frame + collections = [c for c in collections if c.indexes] + + return collections + + +class CollectSequencesFromJob(pyblish.api.ContextPlugin): + """Gather file sequences from job directory. + + When "OPENPYPE_PUBLISH_DATA" environment variable is set these paths + (folders or .json files) are parsed for image sequences. Otherwise the + current working directory is searched for file sequences. + + """ + order = pyblish.api.CollectorOrder + targets = ["rr_control"] + label = "Collect Rendered Frames" + + def process(self, context): + if os.environ.get("OPENPYPE_PUBLISH_DATA"): + self.log.debug(os.environ.get("OPENPYPE_PUBLISH_DATA")) + paths = os.environ["OPENPYPE_PUBLISH_DATA"].split(os.pathsep) + self.log.info("Collecting paths: {}".format(paths)) + else: + cwd = context.get("workspaceDir", os.getcwd()) + paths = [cwd] + + for path in paths: + + self.log.info("Loading: {}".format(path)) + + if path.endswith(".json"): + # Search using .json configuration + with open(path, "r") as f: + try: + data = json.load(f) + except Exception as exc: + self.log.error("Error loading json: " + "{} - Exception: {}".format(path, exc)) + raise + + cwd = os.path.dirname(path) + root_override = data.get("root") + if root_override: + if os.path.isabs(root_override): + root = root_override + else: + root = os.path.join(cwd, root_override) + else: + root = cwd + + metadata = data.get("metadata") + if metadata: + session = metadata.get("session") + if session: + self.log.info("setting session using metadata") + api.Session.update(session) + os.environ.update(session) + + else: + # Search in directory + data = {} + root = path + + self.log.info("Collecting: {}".format(root)) + regex = data.get("regex") + if regex: + self.log.info("Using regex: {}".format(regex)) + + collections = collect(root=root, + regex=regex, + exclude_regex=data.get("exclude_regex"), + frame_start=data.get("frameStart"), + frame_end=data.get("frameEnd")) + + self.log.info("Found collections: {}".format(collections)) + + if data.get("subset") and len(collections) > 1: + self.log.error("Forced subset can only work with a single " + "found sequence") + raise RuntimeError("Invalid sequence") + + fps = data.get("fps", 25) + + # Get family from the data + families = data.get("families", ["render"]) + if "render" not in families: + families.append("render") + if "ftrack" not in families: + families.append("ftrack") + if "review" not in families: + families.append("review") + + for collection in collections: + instance = context.create_instance(str(collection)) + self.log.info("Collection: %s" % list(collection)) + + # Ensure each instance gets a unique reference to the data + data = copy.deepcopy(data) + + # If no subset provided, get it from collection's head + subset = data.get("subset", collection.head.rstrip("_. ")) + + # If no start or end frame provided, get it from collection + indices = list(collection.indexes) + start = data.get("frameStart", indices[0]) + end = data.get("frameEnd", indices[-1]) + + # root = os.path.normpath(root) + # self.log.info("Source: {}}".format(data.get("source", ""))) + + ext = list(collection)[0].split('.')[-1] + + instance.data.update({ + "name": str(collection), + "family": families[0], # backwards compatibility / pyblish + "families": list(families), + "subset": subset, + "asset": data.get("asset", api.Session["AVALON_ASSET"]), + "stagingDir": root, + "frameStart": start, + "frameEnd": end, + "fps": fps, + "source": data.get('source', '') + }) + instance.append(collection) + instance.context.data['fps'] = fps + + if "representations" not in instance.data: + instance.data["representations"] = [] + + representation = { + 'name': ext, + 'ext': '{}'.format(ext), + 'files': list(collection), + "stagingDir": root, + "anatomy_template": "render", + "fps": fps, + "tags": ['review'] + } + instance.data["representations"].append(representation) + + if data.get('user'): + context.data["user"] = data['user'] + + self.log.debug("Collected instance:\n" + "{}".format(pformat(instance.data))) diff --git a/openpype/modules/default_modules/royal_render/royal_render_module.py b/openpype/modules/default_modules/royal_render/royal_render_module.py new file mode 100644 index 0000000000..4f72860ad6 --- /dev/null +++ b/openpype/modules/default_modules/royal_render/royal_render_module.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +"""Module providing support for Royal Render.""" +import os +import openpype.modules +from openpype.modules import OpenPypeModule +from openpype_interfaces import IPluginPaths + + +class RoyalRenderModule(OpenPypeModule, IPluginPaths): + """Class providing basic Royal Render implementation logic.""" + name = "royalrender" + + @property + def api(self): + if not self._api: + # import royal render modules + from . import api as rr_api + self._api = rr_api.Api(self.settings) + + return self._api + + def __init__(self, manager, settings): + # type: (openpype.modules.base.ModulesManager, dict) -> None + self.rr_paths = {} + self._api = None + self.settings = settings + super(RoyalRenderModule, self).__init__(manager, settings) + + def initialize(self, module_settings): + # type: (dict) -> None + rr_settings = module_settings[self.name] + self.enabled = rr_settings["enabled"] + self.rr_paths = rr_settings.get("rr_paths") + + @staticmethod + def get_plugin_paths(): + # type: () -> dict + """Royal Render plugin paths. + + Returns: + dict: Dictionary of plugin paths for RR. + """ + current_dir = os.path.dirname(os.path.abspath(__file__)) + return { + "publish": [os.path.join(current_dir, "plugins", "publish")] + } diff --git a/openpype/modules/default_modules/royal_render/rr_job.py b/openpype/modules/default_modules/royal_render/rr_job.py new file mode 100644 index 0000000000..c660eceac7 --- /dev/null +++ b/openpype/modules/default_modules/royal_render/rr_job.py @@ -0,0 +1,256 @@ +# -*- coding: utf-8 -*- +"""Python wrapper for RoyalRender XML job file.""" +from xml.dom import minidom as md +import attr +from collections import namedtuple, OrderedDict + + +CustomAttribute = namedtuple("CustomAttribute", ["name", "value"]) + + +@attr.s +class RRJob: + """Mapping of Royal Render job file to a data class.""" + + # Required + # -------- + + # Name of your render application. Same as in the render config file. + # (Maya, Softimage) + Software = attr.ib() # type: str + + # The OS the scene was created on, all texture paths are set on + # that OS. Possible values are windows, linux, osx + SceneOS = attr.ib() # type: str + + # Renderer you use. Same as in the render config file + # (VRay, Mental Ray, Arnold) + Renderer = attr.ib() # type: str + + # Version you want to render with. (5.11, 2010, 12) + Version = attr.ib() # type: str + + # Name of the scene file with full path. + SceneName = attr.ib() # type: str + + # Is the job enabled for submission? + # enabled by default + IsActive = attr.ib() # type: str + + # Sequence settings of this job + SeqStart = attr.ib() # type: int + SeqEnd = attr.ib() # type: int + SeqStep = attr.ib() # type: int + SeqFileOffset = attr.ib() # type: int + + # If you specify ImageDir, then ImageFilename has no path. If you do + # NOT specify ImageDir, then ImageFilename has to include the path. + # Same for ImageExtension. + # Important: Do not forget any _ or . in front or after the frame + # numbering. Usually ImageExtension always starts with a . (.tga, .exr) + ImageDir = attr.ib() # type: str + ImageFilename = attr.ib() # type: str + ImageExtension = attr.ib() # type: str + + # Some applications always add a . or _ in front of the frame number. + # Set this variable to that character. The user can then change + # the filename at the rrSubmitter and the submitter keeps + # track of this character. + ImagePreNumberLetter = attr.ib() # type: str + + # If you render a single file, e.g. Quicktime or Avi, then you have to + # set this value. Videos have to be rendered at once on one client. + ImageSingleOutputFile = attr.ib(default="false") # type: str + + # Semi-Required (required for some render applications) + # ----------------------------------------------------- + + # The database of your scene file. In Maya and XSI called "project", + # in Lightwave "content dir" + SceneDatabaseDir = attr.ib(default=None) # type: str + + # Required if you want to split frames on multiple clients + ImageWidth = attr.ib(default=None) # type: int + ImageHeight = attr.ib(default=None) # type: int + Camera = attr.ib(default=None) # type: str + Layer = attr.ib(default=None) # type: str + Channel = attr.ib(default=None) # type: str + + # Optional + # -------- + + # Used for the RR render license function. + # E.g. If you render with mentalRay, then add mentalRay. If you render + # with Nuke and you use Furnace plugins in your comp, add Furnace. + # TODO: determine how this work for multiple plugins + RequiredPlugins = attr.ib(default=None) # type: str + + # Frame Padding of the frame number in the rendered filename. + # Some render config files are setting the padding at render time. + ImageFramePadding = attr.ib(default=None) # type: str + + # Some render applications support overriding the image format at + # the render commandline. + OverrideImageFormat = attr.ib(default=None) # type: str + + # rrControl can display the name of additonal channels that are + # rendered. Each channel requires these two values. ChannelFilename + # contains the full path. + ChannelFilename = attr.ib(default=None) # type: str + ChannelExtension = attr.ib(default=None) # type: str + + # A value between 0 and 255. Each job gets the Pre ID attached as small + # letter to the main ID. A new main ID is generated for every machine + # for every 5/1000s. + PreID = attr.ib(default=None) # type: int + + # When the job is received by the server, the server checks for other + # jobs send from this machine. If a job with the PreID was found, then + # this jobs waits for the other job. Note: This flag can be used multiple + # times to wait for multiple jobs. + WaitForPreID = attr.ib(default=None) # type: int + + # List of submitter options per job + # list item must be of `SubmitterParameter` type + SubmitterParameters = attr.ib(factory=list) # type: list + + # List of Custom job attributes + # Royal Render support custom attributes in format or + # + # list item must be of `CustomAttribute` named tuple + CustomAttributes = attr.ib(factory=list) # type: list + + # Additional information for subsequent publish script and + # for better display in rrControl + UserName = attr.ib(default=None) # type: str + CustomSeQName = attr.ib(default=None) # type: str + CustomSHotName = attr.ib(default=None) # type: str + CustomVersionName = attr.ib(default=None) # type: str + CustomUserInfo = attr.ib(default=None) # type: str + SubmitMachine = attr.ib(default=None) # type: str + Color_ID = attr.ib(default=2) # type: int + + RequiredLicenses = attr.ib(default=None) # type: str + + # Additional frame info + Priority = attr.ib(default=50) # type: int + TotalFrames = attr.ib(default=None) # type: int + Tiled = attr.ib(default=None) # type: str + + +class SubmitterParameter: + """Wrapper for Submitter Parameters.""" + def __init__(self, parameter, *args): + # type: (str, list) -> None + self._parameter = parameter + self._values = args + + def serialize(self): + # type: () -> str + """Serialize submitter parameter as a string value. + + This can be later on used as text node in job xml file. + + Returns: + str: concatenated string of parameter values. + + """ + return '"{param}={val}"'.format( + param=self._parameter, val="~".join(self._values)) + + +@attr.s +class SubmitFile: + """Class wrapping Royal Render submission XML file.""" + + # Syntax version of the submission file. + syntax_version = attr.ib(default="6.0") # type: str + + # Delete submission file after processing + DeleteXML = attr.ib(default=1) # type: int + + # List of submitter options per job + # list item must be of `SubmitterParameter` type + SubmitterParameters = attr.ib(factory=list) # type: list + + # List of job is submission batch. + # list item must be of type `RRJob` + Jobs = attr.ib(factory=list) # type: list + + @staticmethod + def _process_submitter_parameters(parameters, dom, append_to): + # type: (list[SubmitterParameter], md.Document, md.Element) -> None + """Take list of :class:`SubmitterParameter` and process it as XML. + + This will take :class:`SubmitterParameter`, create XML element + for them and convert value to Royal Render compatible string + (options and values separated by ~) + + Args: + parameters (list of SubmitterParameter): List of parameters. + dom (xml.dom.minidom.Document): XML Document + append_to (xml.dom.minidom.Element): Element to append to. + + """ + for param in parameters: + if not isinstance(param, SubmitterParameter): + raise AttributeError( + "{} is not of type `SubmitterParameter`".format(param)) + xml_parameter = dom.createElement("SubmitterParameter") + xml_parameter.appendChild(dom.createTextNode(param.serialize())) + append_to.appendChild(xml_parameter) + + def serialize(self): + # type: () -> str + """Return all data serialized as XML. + + Returns: + str: XML data as string. + + """ + def filter_data(a, v): + """Skip private attributes.""" + if a.name.startswith("_"): + return False + if v is None: + return False + return True + + root = md.Document() + # root element: + job_file = root.createElement('RR_Job_File') + job_file.setAttribute("syntax_version", self.syntax_version) + + # handle Submitter Parameters for batch + # foo=bar~baz~goo + self._process_submitter_parameters( + self.SubmitterParameters, root, job_file) + + for job in self.Jobs: # type: RRJob + if not isinstance(job, RRJob): + raise AttributeError( + "{} is not of type `SubmitterParameter`".format(job)) + xml_job = root.createElement("Job") + # handle Submitter Parameters for job + self._process_submitter_parameters( + job.SubmitterParameters, root, xml_job + ) + job_custom_attributes = job.CustomAttributes + + serialized_job = attr.asdict( + job, dict_factory=OrderedDict, filter=filter_data) + serialized_job.pop("CustomAttributes") + serialized_job.pop("SubmitterParameters") + + for custom_attr in job_custom_attributes: # type: CustomAttribute + serialized_job["Custom{}".format( + custom_attr.name)] = custom_attr.value + + for item, value in serialized_job.items(): + xml_attr = root.create(item) + xml_attr.appendChild( + root.createTextNode(value) + ) + xml_job.appendChild(xml_attr) + + return root.toprettyxml(indent="\t") diff --git a/openpype/modules/default_modules/royal_render/rr_root/README.md b/openpype/modules/default_modules/royal_render/rr_root/README.md new file mode 100644 index 0000000000..0a9777833e --- /dev/null +++ b/openpype/modules/default_modules/royal_render/rr_root/README.md @@ -0,0 +1,5 @@ +## OpenPype RoyalRender integration plugins + +### Installation + +Copy content of this folder to your `RR_ROOT` (place where RoyalRender studio wide installation is). \ No newline at end of file diff --git a/openpype/modules/default_modules/royal_render/rr_root/plugins/control_job/perjob/m50__openpype_publish_render.py b/openpype/modules/default_modules/royal_render/rr_root/plugins/control_job/perjob/m50__openpype_publish_render.py new file mode 100644 index 0000000000..290f26a44a --- /dev/null +++ b/openpype/modules/default_modules/royal_render/rr_root/plugins/control_job/perjob/m50__openpype_publish_render.py @@ -0,0 +1,185 @@ +# -*- coding: utf-8 -*- +"""This is RR control plugin that runs on the job by user interaction. + +It asks user for context to publish, getting it from OpenPype. In order to +run it needs `OPENPYPE_ROOT` to be set to know where to execute OpenPype. + +""" +import rr # noqa +import rrGlobal # noqa +import subprocess +import os +import glob +import platform +import tempfile +import json + + +class OpenPypeContextSelector: + """Class to handle publishing context determination in RR.""" + + def __init__(self): + self.job = rr.getJob() + self.context = None + + self.openpype_executable = "openpype_gui" + if platform.system().lower() == "windows": + self.openpype_executable = "{}.exe".format( + self.openpype_executable) + + op_path = os.environ.get("OPENPYPE_ROOT") + print("initializing ... {}".format(op_path)) + if not op_path: + print("Warning: OpenPype root is not found.") + + if platform.system().lower() == "windows": + print(" * trying to find OpenPype on local computer.") + op_path = os.path.join( + os.environ.get("PROGRAMFILES"), + "OpenPype", "openpype_console.exe" + ) + if os.path.exists(op_path): + print(" - found OpenPype installation {}".format(op_path)) + else: + # try to find in user local context + op_path = os.path.join( + os.environ.get("LOCALAPPDATA"), + "Programs", + "OpenPype", "openpype_console.exe" + ) + if os.path.exists(op_path): + print( + " - found OpenPype installation {}".format( + op_path)) + else: + raise Exception("Error: OpenPype was not found.") + + self.openpype_root = op_path + + # TODO: this should try to find metadata file. Either using + # jobs custom attributes or using environment variable + # or just using plain existence of file. + # self.context = self._process_metadata_file() + + def _process_metadata_file(self): + """Find and process metadata file. + + Try to find metadata json file in job folder to get context from. + + Returns: + dict: Context from metadata json file. + + """ + image_dir = self.job.imageDir + metadata_files = glob.glob( + "{}{}*_metadata.json".format(image_dir, os.path.sep)) + if not metadata_files: + return {} + + raise NotImplementedError( + "Processing existing metadata not implemented yet.") + + def process_job(self): + """Process selected job. + + This should process selected job. If context can be determined + automatically, no UI will be show and publishing will directly + proceed. + """ + if not self.context: + self.show() + + self.context["user"] = self.job.userName + self.run_publish() + + def show(self): + """Show UI for context selection. + + Because of RR UI limitations, this must be done using OpenPype + itself. + + """ + tf = tempfile.TemporaryFile(delete=False) + context_file = tf.name + op_args = [os.path.join(self.openpype_root, self.openpype_executable), + "contextselection", tf.name] + + tf.close() + print(">>> running {}".format(" ".join(op_args))) + + subprocess.call(op_args) + + with open(context_file, "r") as cf: + self.context = json.load(cf) + + os.unlink(context_file) + print(">>> context: {}".format(self.context)) + + if not self.context or \ + not self.context.get("project") or \ + not self.context.get("asset") or \ + not self.context.get("task"): + self._show_rr_warning("Context selection failed.") + return + + # self.context["app_name"] = self.job.renderer.name + self.context["app_name"] = "maya/2020" + + @staticmethod + def _show_rr_warning(text): + warning_dialog = rrGlobal.getGenericUI() + warning_dialog.addItem(rrGlobal.genUIType.label, "infoLabel", "") + warning_dialog.setText("infoLabel", text) + warning_dialog.addItem( + rrGlobal.genUIType.layoutH, "btnLayout", "") + warning_dialog.addItem( + rrGlobal.genUIType.closeButton, "Ok", "btnLayout") + warning_dialog.execute() + del warning_dialog + + def run_publish(self): + """Run publish process.""" + env = {'AVALON_PROJECT': str(self.context.get("project")), + "AVALON_ASSET": str(self.context.get("asset")), + "AVALON_TASK": str(self.context.get("task")), + "AVALON_APP_NAME": str(self.context.get("app_name"))} + + print(">>> setting environment:") + for k, v in env.items(): + print(" {}: {}".format(k, v)) + + publishing_paths = [os.path.join(self.job.imageDir, + os.path.dirname( + self.job.imageFileName))] + + # add additional channels + channel_idx = 0 + channel = self.job.channelFileName(channel_idx) + while channel: + channel_path = os.path.dirname( + os.path.join(self.job.imageDir, channel)) + if channel_path not in publishing_paths: + publishing_paths.append(channel_path) + channel_idx += 1 + channel = self.job.channelFileName(channel_idx) + + args = [os.path.join(self.openpype_root, self.openpype_executable), + 'publish', '-t', "rr_control", "--gui" + ] + + args += publishing_paths + + print(">>> running {}".format(" ".join(args))) + orig = os.environ.copy() + orig.update(env) + try: + subprocess.call(args, env=orig) + except subprocess.CalledProcessError as e: + self._show_rr_warning(" Publish failed [ {} ]".format( + e.returncode + )) + + +print("running selector") +selector = OpenPypeContextSelector() +selector.process_job() diff --git a/openpype/modules/default_modules/sync_server/README.md b/openpype/modules/default_modules/sync_server/README.md index d7d7f3718b..e283b3bb66 100644 --- a/openpype/modules/default_modules/sync_server/README.md +++ b/openpype/modules/default_modules/sync_server/README.md @@ -56,6 +56,13 @@ representation.files.sites: `db.getCollection('MY_PROJECT').update({type:"representation"}, {$set:{"files.$[].sites.MY_CONFIGURED_REMOTE_SITE" : {}}}, true, true)` +I want to create new custom provider: +----------------------------------- +- take `providers\abstract_provider.py` as a base class +- create provider class in `providers` with a name according to a provider (eg. 'gdrive.py' for gdrive provider etc.) +- upload provider icon in png format, 24x24, into `providers\resources`, its name must follow name of provider (eg. 'gdrive.png' for gdrive provider) +- register new provider into `providers.lib.py`, test how many files could be manipulated at same time, check provider's API for limits + Needed configuration: -------------------- `pype/settings/defaults/project_settings/global.json`.`sync_server`: diff --git a/openpype/modules/default_modules/sync_server/providers/dropbox.py b/openpype/modules/default_modules/sync_server/providers/dropbox.py index 2bc7a83a5b..90d7d44bb8 100644 --- a/openpype/modules/default_modules/sync_server/providers/dropbox.py +++ b/openpype/modules/default_modules/sync_server/providers/dropbox.py @@ -24,25 +24,19 @@ class DropboxHandler(AbstractProvider): ) return - provider_presets = self.presets.get(self.CODE) - if not provider_presets: - msg = "Sync Server: No provider presets for {}".format(self.CODE) - log.info(msg) - return - - token = self.presets[self.CODE].get("token", "") + token = self.presets.get("token", "") if not token: msg = "Sync Server: No access token for dropbox provider" log.info(msg) return - team_folder_name = self.presets[self.CODE].get("team_folder_name", "") + team_folder_name = self.presets.get("team_folder_name", "") if not team_folder_name: msg = "Sync Server: No team folder name for dropbox provider" log.info(msg) return - acting_as_member = self.presets[self.CODE].get("acting_as_member", "") + acting_as_member = self.presets.get("acting_as_member", "") if not acting_as_member: msg = ( "Sync Server: No acting member for dropbox provider" @@ -51,13 +45,15 @@ class DropboxHandler(AbstractProvider): return self.dbx = None - try: - self.dbx = self._get_service( - token, acting_as_member, team_folder_name - ) - except Exception as e: - log.info("Could not establish dropbox object: {}".format(e)) - return + + if self.presets["enabled"]: + try: + self.dbx = self._get_service( + token, acting_as_member, team_folder_name + ) + except Exception as e: + log.info("Could not establish dropbox object: {}".format(e)) + return super(AbstractProvider, self).__init__() @@ -101,12 +97,12 @@ class DropboxHandler(AbstractProvider): }, # roots could be overriden only on Project level, User cannot { - "key": "roots", + "key": "root", "label": "Roots", "type": "dict-roots", "object_type": { "type": "path", - "multiplatform": True, + "multiplatform": False, "multipath": False } } @@ -169,7 +165,7 @@ class DropboxHandler(AbstractProvider): Returns: (boolean) """ - return self.dbx is not None + return self.presets["enabled"] and self.dbx is not None @classmethod def get_configurable_items(cls): diff --git a/openpype/modules/default_modules/sync_server/providers/gdrive.py b/openpype/modules/default_modules/sync_server/providers/gdrive.py index 8c8447f8f0..d43e2b3d61 100644 --- a/openpype/modules/default_modules/sync_server/providers/gdrive.py +++ b/openpype/modules/default_modules/sync_server/providers/gdrive.py @@ -73,13 +73,7 @@ class GDriveHandler(AbstractProvider): format(site_name)) return - provider_presets = self.presets.get(self.CODE) - if not provider_presets: - msg = "Sync Server: No provider presets for {}".format(self.CODE) - log.info(msg) - return - - cred_path = self.presets[self.CODE].get("credentials_url", {}).\ + cred_path = self.presets.get("credentials_url", {}).\ get(platform.system().lower()) or '' if not os.path.exists(cred_path): msg = "Sync Server: No credentials for gdrive provider " + \ @@ -87,10 +81,12 @@ class GDriveHandler(AbstractProvider): log.info(msg) return - self.service = self._get_gd_service(cred_path) + self.service = None + if self.presets["enabled"]: + self.service = self._get_gd_service(cred_path) - self._tree = tree - self.active = True + self._tree = tree + self.active = True def is_active(self): """ @@ -98,7 +94,7 @@ class GDriveHandler(AbstractProvider): Returns: (boolean) """ - return self.service is not None + return self.presets["enabled"] and self.service is not None @classmethod def get_system_settings_schema(cls): @@ -125,18 +121,20 @@ class GDriveHandler(AbstractProvider): editable = [ # credentials could be overriden on Project or User level { - 'key': "credentials_url", - 'label': "Credentials url", - 'type': 'text' + "type": "path", + "key": "credentials_url", + "label": "Credentials url", + "multiplatform": True, + "placeholder": "Credentials url" }, # roots could be overriden only on Project leve, User cannot { - "key": "roots", + "key": "root", "label": "Roots", "type": "dict-roots", "object_type": { "type": "path", - "multiplatform": True, + "multiplatform": False, "multipath": False } } diff --git a/openpype/modules/default_modules/sync_server/providers/local_drive.py b/openpype/modules/default_modules/sync_server/providers/local_drive.py index e6c62f2daa..68f604b39c 100644 --- a/openpype/modules/default_modules/sync_server/providers/local_drive.py +++ b/openpype/modules/default_modules/sync_server/providers/local_drive.py @@ -50,7 +50,7 @@ class LocalDriveHandler(AbstractProvider): # for non 'studio' sites, 'studio' is configured in Anatomy editable = [ { - "key": "roots", + "key": "root", "label": "Roots", "type": "dict-roots", "object_type": { @@ -73,7 +73,7 @@ class LocalDriveHandler(AbstractProvider): """ editable = [ { - 'key': "roots", + 'key': "root", 'label': "Roots", 'type': 'dict' } @@ -89,6 +89,7 @@ class LocalDriveHandler(AbstractProvider): if not os.path.isfile(source_path): raise FileNotFoundError("Source file {} doesn't exist." .format(source_path)) + if overwrite: thread = threading.Thread(target=self._copy, args=(source_path, target_path)) @@ -181,7 +182,10 @@ class LocalDriveHandler(AbstractProvider): def _copy(self, source_path, target_path): print("copying {}->{}".format(source_path, target_path)) - shutil.copy(source_path, target_path) + try: + shutil.copy(source_path, target_path) + except shutil.SameFileError: + print("same files, skipping") def _mark_progress(self, collection, file, representation, server, site, source_path, target_path, direction): diff --git a/openpype/modules/default_modules/sync_server/providers/sftp.py b/openpype/modules/default_modules/sync_server/providers/sftp.py index d737849cdc..1585b326bd 100644 --- a/openpype/modules/default_modules/sync_server/providers/sftp.py +++ b/openpype/modules/default_modules/sync_server/providers/sftp.py @@ -1,8 +1,6 @@ import os import os.path import time -import sys -import six import threading import platform @@ -14,6 +12,7 @@ log = Logger().get_logger("SyncServer") pysftp = None try: import pysftp + import paramiko except (ImportError, SyntaxError): pass @@ -37,7 +36,6 @@ class SFTPHandler(AbstractProvider): def __init__(self, project_name, site_name, tree=None, presets=None): self.presets = None - self.active = False self.project_name = project_name self.site_name = site_name self.root = None @@ -49,22 +47,15 @@ class SFTPHandler(AbstractProvider): format(site_name)) return - provider_presets = self.presets.get(self.CODE) - if not provider_presets: - msg = "Sync Server: No provider presets for {}".format(self.CODE) - log.warning(msg) - return - # store to instance for reconnect - self.sftp_host = provider_presets["sftp_host"] - self.sftp_port = provider_presets["sftp_port"] - self.sftp_user = provider_presets["sftp_user"] - self.sftp_pass = provider_presets["sftp_pass"] - self.sftp_key = provider_presets["sftp_key"] - self.sftp_key_pass = provider_presets["sftp_key_pass"] + self.sftp_host = presets["sftp_host"] + self.sftp_port = presets["sftp_port"] + self.sftp_user = presets["sftp_user"] + self.sftp_pass = presets["sftp_pass"] + self.sftp_key = presets["sftp_key"] + self.sftp_key_pass = presets["sftp_key_pass"] self._tree = None - self.active = True @property def conn(self): @@ -80,7 +71,7 @@ class SFTPHandler(AbstractProvider): Returns: (boolean) """ - return self.conn is not None + return self.presets["enabled"] and self.conn is not None @classmethod def get_system_settings_schema(cls): @@ -108,7 +99,7 @@ class SFTPHandler(AbstractProvider): editable = [ # credentials could be overriden on Project or User level { - 'key': "sftp_server", + 'key': "sftp_host", 'label': "SFTP host name", 'type': 'text' }, @@ -130,7 +121,8 @@ class SFTPHandler(AbstractProvider): { 'key': "sftp_key", 'label': "SFTP user ssh key", - 'type': 'path' + 'type': 'path', + "multiplatform": True }, { 'key': "sftp_key_pass", @@ -139,12 +131,12 @@ class SFTPHandler(AbstractProvider): }, # roots could be overriden only on Project leve, User cannot { - "key": "roots", + "key": "root", "label": "Roots", "type": "dict-roots", "object_type": { "type": "path", - "multiplatform": True, + "multiplatform": False, "multipath": False } } @@ -176,7 +168,8 @@ class SFTPHandler(AbstractProvider): { 'key': "sftp_key", 'label': "SFTP user ssh key", - 'type': 'path' + 'type': 'path', + "multiplatform": True }, { 'key': "sftp_key_pass", @@ -426,7 +419,11 @@ class SFTPHandler(AbstractProvider): if self.sftp_key_pass: conn_params['private_key_pass'] = self.sftp_key_pass - return pysftp.Connection(**conn_params) + try: + return pysftp.Connection(**conn_params) + except (paramiko.ssh_exception.SSHException, + pysftp.exceptions.ConnectionException): + log.warning("Couldn't connect", exc_info=True) def _mark_progress(self, collection, file, representation, server, site, source_path, target_path, direction): diff --git a/openpype/modules/default_modules/sync_server/sync_server.py b/openpype/modules/default_modules/sync_server/sync_server.py index 6aca2460e3..22eed01ef3 100644 --- a/openpype/modules/default_modules/sync_server/sync_server.py +++ b/openpype/modules/default_modules/sync_server/sync_server.py @@ -80,6 +80,10 @@ async def upload(module, collection, file, representation, provider_name, remote_site_name, True ) + + module.handle_alternate_site(collection, representation, remote_site_name, + file["_id"], file_id) + return file_id @@ -131,6 +135,10 @@ async def download(module, collection, file, representation, provider_name, local_site, True ) + + module.handle_alternate_site(collection, representation, local_site, + file["_id"], file_id) + return file_id @@ -246,6 +254,7 @@ class SyncServerThread(threading.Thread): asyncio.ensure_future(self.check_shutdown(), loop=self.loop) asyncio.ensure_future(self.sync_loop(), loop=self.loop) + log.info("Sync Server Started") self.loop.run_forever() except Exception: log.warning( diff --git a/openpype/modules/default_modules/sync_server/sync_server_module.py b/openpype/modules/default_modules/sync_server/sync_server_module.py index d60147a989..500203f3fc 100644 --- a/openpype/modules/default_modules/sync_server/sync_server_module.py +++ b/openpype/modules/default_modules/sync_server/sync_server_module.py @@ -109,6 +109,7 @@ class SyncServerModule(OpenPypeModule, ITrayModule): # some parts of code need to run sequentially, not in async self.lock = None + self._sync_system_settings = None # settings for all enabled projects for sync self._sync_project_settings = None self.sync_server_thread = None # asyncio requires new thread @@ -152,9 +153,9 @@ class SyncServerModule(OpenPypeModule, ITrayModule): if not site_name: site_name = self.DEFAULT_SITE - self.reset_provider_for_file(collection, - representation_id, - site_name=site_name, force=force) + self.reset_site_on_representation(collection, + representation_id, + site_name=site_name, force=force) # public facing API def remove_site(self, collection, representation_id, site_name, @@ -176,10 +177,10 @@ class SyncServerModule(OpenPypeModule, ITrayModule): if not self.get_sync_project_setting(collection): raise ValueError("Project not configured") - self.reset_provider_for_file(collection, - representation_id, - site_name=site_name, - remove=True) + self.reset_site_on_representation(collection, + representation_id, + site_name=site_name, + remove=True) if remove_local_files: self._remove_local_file(collection, representation_id, site_name) @@ -314,8 +315,8 @@ class SyncServerModule(OpenPypeModule, ITrayModule): """ log.info("Pausing SyncServer for {}".format(representation_id)) self._paused_representations.add(representation_id) - self.reset_provider_for_file(collection, representation_id, - site_name=site_name, pause=True) + self.reset_site_on_representation(collection, representation_id, + site_name=site_name, pause=True) def unpause_representation(self, collection, representation_id, site_name): """ @@ -334,8 +335,8 @@ class SyncServerModule(OpenPypeModule, ITrayModule): except KeyError: pass # self.paused_representations is not persistent - self.reset_provider_for_file(collection, representation_id, - site_name=site_name, pause=False) + self.reset_site_on_representation(collection, representation_id, + site_name=site_name, pause=False) def is_representation_paused(self, representation_id, check_parents=False, project_name=None): @@ -769,6 +770,58 @@ class SyncServerModule(OpenPypeModule, ITrayModule): enabled_projects.append(project_name) return enabled_projects + + def handle_alternate_site(self, collection, representation, processed_site, + file_id, synced_file_id): + """ + For special use cases where one site vendors another. + + Current use case is sftp site vendoring (exposing) same data as + regular site (studio). Each site is accessible for different + audience. 'studio' for artists in a studio, 'sftp' for externals. + + Change of file status on one site actually means same change on + 'alternate' site. (eg. artists publish to 'studio', 'sftp' is using + same location >> file is accesible on 'sftp' site right away. + + Args: + collection (str): name of project + representation (dict) + processed_site (str): real site_name of published/uploaded file + file_id (ObjectId): DB id of file handled + synced_file_id (str): id of the created file returned + by provider + """ + sites = self.sync_system_settings.get("sites", {}) + sites[self.DEFAULT_SITE] = {"provider": "local_drive", + "alternative_sites": []} + + alternate_sites = [] + for site_name, site_info in sites.items(): + conf_alternative_sites = site_info.get("alternative_sites", []) + if processed_site in conf_alternative_sites: + alternate_sites.append(site_name) + continue + if processed_site == site_name and conf_alternative_sites: + alternate_sites.extend(conf_alternative_sites) + continue + + alternate_sites = set(alternate_sites) + + for alt_site in alternate_sites: + query = { + "_id": representation["_id"] + } + elem = {"name": alt_site, + "created_dt": datetime.now(), + "id": synced_file_id} + + self.log.debug("Adding alternate {} to {}".format( + alt_site, representation["_id"])) + self._add_site(collection, query, + [representation], elem, + alt_site, file_id=file_id, force=True) + """ End of Public API """ def get_local_file_path(self, collection, site_name, file_path): @@ -799,12 +852,19 @@ class SyncServerModule(OpenPypeModule, ITrayModule): def tray_init(self): """ - Actual initialization of Sync Server. + Actual initialization of Sync Server for Tray. Called when tray is initialized, it checks if module should be enabled. If not, no initialization necessary. """ - # import only in tray, because of Python2 hosts + self.server_init() + + from .tray.app import SyncServerWindow + self.widget = SyncServerWindow(self) + + def server_init(self): + """Actual initialization of Sync Server.""" + # import only in tray or Python3, because of Python2 hosts from .sync_server import SyncServerThread if not self.enabled: @@ -816,6 +876,7 @@ class SyncServerModule(OpenPypeModule, ITrayModule): return self.lock = threading.Lock() + self.sync_server_thread = SyncServerThread(self) def tray_start(self): @@ -829,6 +890,9 @@ class SyncServerModule(OpenPypeModule, ITrayModule): Returns: None """ + self.server_start() + + def server_start(self): if self.sync_project_settings and self.enabled: self.sync_server_thread.start() else: @@ -841,6 +905,9 @@ class SyncServerModule(OpenPypeModule, ITrayModule): Called from Module Manager """ + self.server_exit() + + def server_exit(self): if not self.sync_server_thread: return @@ -850,6 +917,7 @@ class SyncServerModule(OpenPypeModule, ITrayModule): log.info("Stopping sync server server") self.sync_server_thread.is_running = False self.sync_server_thread.stop() + log.info("Sync server stopped") except Exception: log.warning( "Error has happened during Killing sync server", @@ -892,6 +960,14 @@ class SyncServerModule(OpenPypeModule, ITrayModule): return self._connection + @property + def sync_system_settings(self): + if self._sync_system_settings is None: + self._sync_system_settings = get_system_settings()["modules"].\ + get("sync_server") + + return self._sync_system_settings + @property def sync_project_settings(self): if self._sync_project_settings is None: @@ -977,9 +1053,7 @@ class SyncServerModule(OpenPypeModule, ITrayModule): (dict): {'studio': {'provider':'local_drive'...}, 'MY_LOCAL': {'provider':....}} """ - sys_sett = get_system_settings() - sync_sett = sys_sett["modules"].get("sync_server") - + sync_sett = self.sync_system_settings project_enabled = True if project_name: project_enabled = project_name in self.get_enabled_projects() @@ -1037,10 +1111,9 @@ class SyncServerModule(OpenPypeModule, ITrayModule): if provider: return provider - sys_sett = get_system_settings() - sync_sett = sys_sett["modules"].get("sync_server") - for site, detail in sync_sett.get("sites", {}).items(): - sites[site] = detail.get("provider") + sync_sett = self.sync_system_settings + for conf_site, detail in sync_sett.get("sites", {}).items(): + sites[conf_site] = detail.get("provider") return sites.get(site, 'N/A') @@ -1319,9 +1392,9 @@ class SyncServerModule(OpenPypeModule, ITrayModule): return -1, None - def reset_provider_for_file(self, collection, representation_id, - side=None, file_id=None, site_name=None, - remove=False, pause=None, force=False): + def reset_site_on_representation(self, collection, representation_id, + side=None, file_id=None, site_name=None, + remove=False, pause=None, force=False): """ Reset information about synchronization for particular 'file_id' and provider. @@ -1407,9 +1480,12 @@ class SyncServerModule(OpenPypeModule, ITrayModule): update = { "$set": {"files.$[f].sites.$[s]": elem} } + if not isinstance(file_id, ObjectId): + file_id = ObjectId(file_id) + arr_filter = [ {'s.name': site_name}, - {'f._id': ObjectId(file_id)} + {'f._id': file_id} ] self._update_site(collection, query, update, arr_filter) @@ -1498,6 +1574,7 @@ class SyncServerModule(OpenPypeModule, ITrayModule): Use 'force' to remove existing or raises ValueError """ + reseted_existing = False for repre_file in representation.pop().get("files"): if file_id and file_id != repre_file["_id"]: continue @@ -1508,12 +1585,15 @@ class SyncServerModule(OpenPypeModule, ITrayModule): self._reset_site_for_file(collection, query, elem, repre_file["_id"], site_name) - return + reseted_existing = True else: msg = "Site {} already present".format(site_name) log.info(msg) raise ValueError(msg) + if reseted_existing: + return + if not file_id: update = { "$push": {"files.$[].sites": elem} diff --git a/openpype/modules/default_modules/sync_server/tray/widgets.py b/openpype/modules/default_modules/sync_server/tray/widgets.py index 87044889b7..18487b3d11 100644 --- a/openpype/modules/default_modules/sync_server/tray/widgets.py +++ b/openpype/modules/default_modules/sync_server/tray/widgets.py @@ -154,7 +154,7 @@ class SyncProjectListWidget(QtWidgets.QWidget): selected_index.isValid() and \ not self._selection_changed: mode = QtCore.QItemSelectionModel.Select | \ - QtCore.QItemSelectionModel.Rows + QtCore.QItemSelectionModel.Rows self.project_list.selectionModel().select(selected_index, mode) if self.current_project: @@ -489,7 +489,7 @@ class _SyncRepresentationWidget(QtWidgets.QWidget): format(check_progress)) continue - self.sync_server.reset_provider_for_file( + self.sync_server.reset_site_on_representation( self.model.project, representation_id, site_name=site_name, @@ -872,7 +872,7 @@ class SyncRepresentationDetailWidget(_SyncRepresentationWidget): format(check_progress)) continue - self.sync_server.reset_provider_for_file( + self.sync_server.reset_site_on_representation( self.model.project, self.representation_id, site_name=site_name, diff --git a/openpype/modules/default_modules/timers_manager/timers_manager.py b/openpype/modules/default_modules/timers_manager/timers_manager.py index 1aeccbb958..0f165ff0ac 100644 --- a/openpype/modules/default_modules/timers_manager/timers_manager.py +++ b/openpype/modules/default_modules/timers_manager/timers_manager.py @@ -95,8 +95,10 @@ class TimersManager(OpenPypeModule, ITrayService): message_time = int(timers_settings["message_time"] * 60) auto_stop = timers_settings["auto_stop"] + platform_name = platform.system().lower() # Turn of auto stop on MacOs because pynput requires root permissions - if platform.system().lower() == "darwin" or full_time <= 0: + # and on linux can cause thread locks on application close + if full_time <= 0 or platform_name in ("darwin", "linux"): auto_stop = False self.auto_stop = auto_stop diff --git a/openpype/pipeline/create/__init__.py b/openpype/pipeline/create/__init__.py index 610ef6d8e2..948b719851 100644 --- a/openpype/pipeline/create/__init__.py +++ b/openpype/pipeline/create/__init__.py @@ -1,3 +1,6 @@ +from .constants import ( + SUBSET_NAME_ALLOWED_SYMBOLS +) from .creator_plugins import ( CreatorError, @@ -13,6 +16,8 @@ from .context import ( __all__ = ( + "SUBSET_NAME_ALLOWED_SYMBOLS", + "CreatorError", "BaseCreator", diff --git a/openpype/pipeline/create/constants.py b/openpype/pipeline/create/constants.py new file mode 100644 index 0000000000..bfbbccfd12 --- /dev/null +++ b/openpype/pipeline/create/constants.py @@ -0,0 +1,6 @@ +SUBSET_NAME_ALLOWED_SYMBOLS = "a-zA-Z0-9_." + + +__all__ = ( + "SUBSET_NAME_ALLOWED_SYMBOLS", +) diff --git a/openpype/plugins/load/copy_file.py b/openpype/plugins/load/copy_file.py index 1acacf6b27..eaf5853035 100644 --- a/openpype/plugins/load/copy_file.py +++ b/openpype/plugins/load/copy_file.py @@ -18,7 +18,7 @@ class CopyFile(api.Loader): @staticmethod def copy_file_to_clipboard(path): - from avalon.vendor.Qt import QtCore, QtWidgets + from Qt import QtCore, QtWidgets clipboard = QtWidgets.QApplication.clipboard() assert clipboard, "Must have running QApplication instance" diff --git a/openpype/plugins/load/copy_file_path.py b/openpype/plugins/load/copy_file_path.py index f64f3e76d8..2041c79f6d 100644 --- a/openpype/plugins/load/copy_file_path.py +++ b/openpype/plugins/load/copy_file_path.py @@ -19,7 +19,7 @@ class CopyFilePath(api.Loader): @staticmethod def copy_path_to_clipboard(path): - from avalon.vendor.Qt import QtWidgets + from Qt import QtWidgets clipboard = QtWidgets.QApplication.clipboard() assert clipboard, "Must have running QApplication instance" diff --git a/openpype/plugins/load/delete_old_versions.py b/openpype/plugins/load/delete_old_versions.py index 263c534b64..b2f2c88975 100644 --- a/openpype/plugins/load/delete_old_versions.py +++ b/openpype/plugins/load/delete_old_versions.py @@ -5,9 +5,9 @@ import uuid import clique from pymongo import UpdateOne import ftrack_api +from Qt import QtWidgets, QtCore from avalon import api, style -from avalon.vendor.Qt import QtWidgets, QtCore from avalon.vendor import qargparse from avalon.api import AvalonMongoDB import avalon.pipeline diff --git a/openpype/plugins/load/open_djv.py b/openpype/plugins/load/open_djv.py index 5b49bb58d0..4b0e8411c8 100644 --- a/openpype/plugins/load/open_djv.py +++ b/openpype/plugins/load/open_djv.py @@ -32,7 +32,7 @@ class OpenInDJV(api.Loader): def load(self, context, name, namespace, data): directory = os.path.dirname(self.fname) - from avalon.vendor import clique + import clique pattern = clique.PATTERNS["frames"] files = os.listdir(directory) diff --git a/openpype/plugins/load/open_file.py b/openpype/plugins/load/open_file.py index b496311e0c..4133a64eb3 100644 --- a/openpype/plugins/load/open_file.py +++ b/openpype/plugins/load/open_file.py @@ -27,7 +27,7 @@ class Openfile(api.Loader): color = "orange" def load(self, context, name, namespace, data): - from avalon.vendor import clique + import clique directory = os.path.dirname(self.fname) pattern = clique.PATTERNS["frames"] diff --git a/openpype/plugins/publish/collect_anatomy_context_data.py b/openpype/plugins/publish/collect_anatomy_context_data.py index ec88d5669d..6b95979b76 100644 --- a/openpype/plugins/publish/collect_anatomy_context_data.py +++ b/openpype/plugins/publish/collect_anatomy_context_data.py @@ -54,6 +54,12 @@ class CollectAnatomyContextData(pyblish.api.ContextPlugin): if hierarchy_items: hierarchy = os.path.join(*hierarchy_items) + asset_tasks = asset_entity["data"]["tasks"] + task_type = asset_tasks.get(task_name, {}).get("type") + + project_task_types = project_entity["config"]["tasks"] + task_code = project_task_types.get(task_type, {}).get("short_name") + context_data = { "project": { "name": project_entity["name"], @@ -61,7 +67,11 @@ class CollectAnatomyContextData(pyblish.api.ContextPlugin): }, "asset": asset_entity["name"], "hierarchy": hierarchy.replace("\\", "/"), - "task": task_name, + "task": { + "name": task_name, + "type": task_type, + "short": task_code, + }, "username": context.data["user"], "app": context.data["hostName"] } diff --git a/openpype/plugins/publish/collect_anatomy_instance_data.py b/openpype/plugins/publish/collect_anatomy_instance_data.py index 4fd657167c..da6a2195ee 100644 --- a/openpype/plugins/publish/collect_anatomy_instance_data.py +++ b/openpype/plugins/publish/collect_anatomy_instance_data.py @@ -38,6 +38,8 @@ class CollectAnatomyInstanceData(pyblish.api.ContextPlugin): order = pyblish.api.CollectorOrder + 0.49 label = "Collect Anatomy Instance data" + follow_workfile_version = False + def process(self, context): self.log.info("Collecting anatomy data for all instances.") @@ -212,8 +214,13 @@ class CollectAnatomyInstanceData(pyblish.api.ContextPlugin): project_doc = context.data["projectEntity"] context_asset_doc = context.data["assetEntity"] + project_task_types = project_doc["config"]["tasks"] + for instance in context: - version_number = instance.data.get("version") + if self.follow_workfile_version: + version_number = context.data('version') + else: + version_number = instance.data.get("version") # If version is not specified for instance or context if version_number is None: # TODO we should be able to change default version by studio @@ -240,7 +247,18 @@ class CollectAnatomyInstanceData(pyblish.api.ContextPlugin): # Task task_name = instance.data.get("task") if task_name: - anatomy_updates["task"] = task_name + asset_tasks = asset_doc["data"]["tasks"] + task_type = asset_tasks.get(task_name, {}).get("type") + task_code = ( + project_task_types + .get(task_type, {}) + .get("short_name") + ) + anatomy_updates["task"] = { + "name": task_name, + "type": task_type, + "short": task_code + } # Additional data resolution_width = instance.data.get("resolutionWidth") diff --git a/openpype/plugins/publish/collect_modules.py b/openpype/plugins/publish/collect_modules.py index bec0c2b436..2f6cb1ef0e 100644 --- a/openpype/plugins/publish/collect_modules.py +++ b/openpype/plugins/publish/collect_modules.py @@ -7,7 +7,7 @@ import pyblish.api class CollectModules(pyblish.api.ContextPlugin): """Collect OpenPype modules.""" - order = pyblish.api.CollectorOrder + order = pyblish.api.CollectorOrder - 0.45 label = "OpenPype Modules" def process(self, context): diff --git a/openpype/plugins/publish/collect_scene_loaded_versions.py b/openpype/plugins/publish/collect_scene_loaded_versions.py new file mode 100644 index 0000000000..c26b322df2 --- /dev/null +++ b/openpype/plugins/publish/collect_scene_loaded_versions.py @@ -0,0 +1,55 @@ + +import pyblish.api +from avalon import api, io + + +class CollectSceneLoadedVersions(pyblish.api.ContextPlugin): + + order = pyblish.api.CollectorOrder + 0.0001 + label = "Collect Versions Loaded in Scene" + hosts = [ + "aftereffects", + "blender", + "celaction", + "fusion", + "harmony", + "hiero", + "houdini", + "maya", + "nuke", + "photoshop", + "resolve", + "tvpaint" + ] + + def process(self, context): + host = api.registered_host() + if host is None: + self.log.warn("No registered host.") + return + + if not hasattr(host, "ls"): + host_name = host.__name__ + self.log.warn("Host %r doesn't have ls() implemented." % host_name) + return + + loaded_versions = [] + _containers = list(host.ls()) + _repr_ids = [io.ObjectId(c["representation"]) for c in _containers] + version_by_repr = { + str(doc["_id"]): doc["parent"] for doc in + io.find({"_id": {"$in": _repr_ids}}, projection={"parent": 1}) + } + + for con in _containers: + # NOTE: + # may have more then one representation that are same version + version = { + "objectName": con["objectName"], # container node name + "subsetName": con["name"], + "representation": io.ObjectId(con["representation"]), + "version": version_by_repr[con["representation"]], # _id + } + loaded_versions.append(version) + + context.data["loadedVersions"] = loaded_versions diff --git a/openpype/plugins/publish/collect_scene_version.py b/openpype/plugins/publish/collect_scene_version.py index ca12f2900c..8ed6e25e66 100644 --- a/openpype/plugins/publish/collect_scene_version.py +++ b/openpype/plugins/publish/collect_scene_version.py @@ -10,7 +10,7 @@ class CollectSceneVersion(pyblish.api.ContextPlugin): """ order = pyblish.api.CollectorOrder - label = 'Collect Version' + label = 'Collect Scene Version' hosts = [ "aftereffects", "blender", diff --git a/openpype/plugins/publish/extract_burnin.py b/openpype/plugins/publish/extract_burnin.py index 06eb85c593..35d9e4b2f2 100644 --- a/openpype/plugins/publish/extract_burnin.py +++ b/openpype/plugins/publish/extract_burnin.py @@ -110,6 +110,9 @@ class ExtractBurnin(openpype.api.Extractor): ).format(host_name, family, task_name)) return + self.log.debug("profile: {}".format( + profile)) + # Pre-filter burnin definitions by instance families burnin_defs = self.filter_burnins_defs(profile, instance) if not burnin_defs: @@ -126,18 +129,41 @@ class ExtractBurnin(openpype.api.Extractor): anatomy = instance.context.data["anatomy"] scriptpath = self.burnin_script_path() + # Executable args that will execute the script # [pype executable, *pype script, "run"] executable_args = get_pype_execute_args("run", scriptpath) for idx, repre in enumerate(tuple(instance.data["representations"])): self.log.debug("repre ({}): `{}`".format(idx + 1, repre["name"])) + + repre_burnin_links = repre.get("burnins", []) + if not self.repres_is_valid(repre): continue + self.log.debug("repre_burnin_links: {}".format( + repre_burnin_links)) + + self.log.debug("burnin_defs.keys(): {}".format( + burnin_defs.keys())) + + # Filter output definition by `burnin` represetation key + repre_linked_burnins = { + name: output for name, output in burnin_defs.items() + if name in repre_burnin_links + } + self.log.debug("repre_linked_burnins: {}".format( + repre_linked_burnins)) + + # if any match then replace burnin defs and follow tag filtering + _burnin_defs = copy.deepcopy(burnin_defs) + if repre_linked_burnins: + _burnin_defs = repre_linked_burnins + # Filter output definition by representation tags (optional) repre_burnin_defs = self.filter_burnins_by_tags( - burnin_defs, repre["tags"] + _burnin_defs, repre["tags"] ) if not repre_burnin_defs: self.log.info(( @@ -184,7 +210,9 @@ class ExtractBurnin(openpype.api.Extractor): for key in self.positions: value = burnin_def.get(key) if value: - burnin_values[key] = value + burnin_values[key] = value.replace( + "{task}", "{task[name]}" + ) # Remove "delete" tag from new representation if "delete" in new_repre["tags"]: @@ -281,6 +309,8 @@ class ExtractBurnin(openpype.api.Extractor): # NOTE we maybe can keep source representation if necessary instance.data["representations"].remove(repre) + self.log.debug("Files to delete: {}".format(files_to_delete)) + # Delete input files for filepath in files_to_delete: if os.path.exists(filepath): diff --git a/openpype/plugins/publish/extract_review.py b/openpype/plugins/publish/extract_review.py index 264b362558..3ab6ffd489 100644 --- a/openpype/plugins/publish/extract_review.py +++ b/openpype/plugins/publish/extract_review.py @@ -180,6 +180,9 @@ class ExtractReview(pyblish.api.InstancePlugin): if "tags" not in output_def: output_def["tags"] = [] + if "burnins" not in output_def: + output_def["burnins"] = [] + # Create copy of representation new_repre = copy.deepcopy(repre) @@ -192,8 +195,20 @@ class ExtractReview(pyblish.api.InstancePlugin): if tag not in new_repre["tags"]: new_repre["tags"].append(tag) + # Add burnin link from output definition to representation + for burnin in output_def["burnins"]: + if burnin not in new_repre.get("burnins", []): + if not new_repre.get("burnins"): + new_repre["burnins"] = [] + new_repre["burnins"].append(str(burnin)) + self.log.debug( - "New representation tags: `{}`".format(new_repre["tags"]) + "Linked burnins: `{}`".format(new_repre.get("burnins")) + ) + + self.log.debug( + "New representation tags: `{}`".format( + new_repre.get("tags")) ) temp_data = self.prepare_temp_data( @@ -232,12 +247,16 @@ class ExtractReview(pyblish.api.InstancePlugin): for f in files_to_clean: os.unlink(f) - output_name = output_def["filename_suffix"] + output_name = new_repre.get("outputName", "") + output_ext = new_repre["ext"] + if output_name: + output_name += "_" + output_name += output_def["filename_suffix"] if temp_data["without_handles"]: output_name += "_noHandles" new_repre.update({ - "name": output_def["filename_suffix"], + "name": "{}_{}".format(output_name, output_ext), "outputName": output_name, "outputDef": output_def, "frameStartFtrack": temp_data["output_frame_start"], @@ -991,10 +1010,11 @@ class ExtractReview(pyblish.api.InstancePlugin): streams = ffprobe_streams( full_input_path_single_file, self.log ) - except Exception: + except Exception as exc: raise AssertionError(( - "FFprobe couldn't read information about input file: \"{}\"" - ).format(full_input_path_single_file)) + "FFprobe couldn't read information about input file: \"{}\"." + " Error message: {}" + ).format(full_input_path_single_file, str(exc))) # Try to find first stream with defined 'width' and 'height' # - this is to avoid order of streams where audio can be as first diff --git a/openpype/plugins/publish/integrate_inputlinks.py b/openpype/plugins/publish/integrate_inputlinks.py new file mode 100644 index 0000000000..f973dfc963 --- /dev/null +++ b/openpype/plugins/publish/integrate_inputlinks.py @@ -0,0 +1,131 @@ + +from collections import OrderedDict +from avalon import io +import pyblish.api + + +class IntegrateInputLinks(pyblish.api.ContextPlugin): + """Connecting version level dependency links""" + + order = pyblish.api.IntegratorOrder + 0.2 + label = "Connect Dependency InputLinks" + + def process(self, context): + """Connect dependency links for all instances, globally + + Code steps: + * filter out instances that has "versionEntity" entry in data + * find workfile instance within context + * if workfile found: + - link all `loadedVersions` as input of the workfile + - link workfile as input of all publishing instances + * else: + - show "no workfile" warning + * link instances' inputs if it's data has "inputVersions" entry + * Write into database + + inputVersions: + The "inputVersions" in instance.data should be a list of + version document's Id (str or ObjectId), which are the + dependencies of the publishing instance that should be + extracted from working scene by the DCC specific publish + plugin. + + """ + workfile = None + publishing = [] + + for instance in context: + if not instance.data.get("publish", True): + # Skip inactive instances + continue + + version_doc = instance.data.get("versionEntity") + if not version_doc: + self.log.debug("Instance %s doesn't have version." % instance) + continue + + version_data = version_doc.get("data", {}) + families = version_data.get("families", []) + + if "workfile" in families: + workfile = instance + else: + publishing.append(instance) + + if workfile is None: + self.log.warn("No workfile in this publish session.") + else: + workfile_version_doc = workfile.data["versionEntity"] + # link all loaded versions in scene into workfile + for version in context.data.get("loadedVersions", []): + self.add_link( + link_type="reference", + input_id=version["version"], + version_doc=workfile_version_doc, + ) + # link workfile to all publishing versions + for instance in publishing: + self.add_link( + link_type="generative", + input_id=workfile_version_doc["_id"], + version_doc=instance.data["versionEntity"], + ) + + # link versions as dependencies to the instance + for instance in publishing: + for input_version in instance.data.get("inputVersions") or []: + self.add_link( + link_type="generative", + input_id=input_version, + version_doc=instance.data["versionEntity"], + ) + + if workfile is not None: + publishing.append(workfile) + self.write_links_to_database(publishing) + + def add_link(self, link_type, input_id, version_doc): + """Add dependency link data into version document + + Args: + link_type (str): Type of link, one of 'reference' or 'generative' + input_id (str or ObjectId): Document Id of input version + version_doc (dict): The version document that takes the input + + Returns: + None + + """ + # NOTE: + # using OrderedDict() here is just for ensuring field order between + # python versions, if we ever need to use mongodb operation '$addToSet' + # to update and avoid duplicating elements in 'inputLinks' array in the + # future. + link = OrderedDict() + link["type"] = link_type + link["id"] = io.ObjectId(input_id) + link["linkedBy"] = "publish" + + if "inputLinks" not in version_doc["data"]: + version_doc["data"]["inputLinks"] = [] + version_doc["data"]["inputLinks"].append(link) + + def write_links_to_database(self, instances): + """Iter instances in context to update database + + If `versionEntity.data.inputLinks` not None in `instance.data`, doc + in database will be updated. + + """ + for instance in instances: + version_doc = instance.data.get("versionEntity") + if version_doc is None: + continue + + input_links = version_doc["data"].get("inputLinks") + if input_links is None: + continue + + io.update_one({"_id": version_doc["_id"]}, + {"$set": {"data.inputLinks": input_links}}) diff --git a/openpype/plugins/publish/integrate_new.py b/openpype/plugins/publish/integrate_new.py index 753ed78083..1b0b8da2ff 100644 --- a/openpype/plugins/publish/integrate_new.py +++ b/openpype/plugins/publish/integrate_new.py @@ -172,21 +172,26 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin): anatomy_data["hierarchy"] = hierarchy # Make sure task name in anatomy data is same as on instance.data - task_name = instance.data.get("task") - if task_name: - anatomy_data["task"] = task_name - else: - # Just set 'task_name' variable to context task - task_name = anatomy_data["task"] - - # Find task type for current task name - # - this should be already prepared on instance asset_tasks = ( asset_entity.get("data", {}).get("tasks") ) or {} - task_info = asset_tasks.get(task_name) or {} - task_type = task_info.get("type") - instance.data["task_type"] = task_type + task_name = instance.data.get("task") + if task_name: + task_info = asset_tasks.get(task_name) or {} + task_type = task_info.get("type") + + project_task_types = project_entity["config"]["tasks"] + task_code = project_task_types.get(task_type, {}).get("short_name") + anatomy_data["task"] = { + "name": task_name, + "type": task_type, + "short": task_code + } + + else: + # Just set 'task_name' variable to context task + task_name = anatomy_data["task"]["name"] + task_type = anatomy_data["task"]["type"] # Fill family in anatomy data anatomy_data["family"] = instance.data.get("family") @@ -804,11 +809,8 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin): # - is there a chance that task name is not filled in anatomy # data? # - should we use context task in that case? - task_name = ( - instance.data["anatomyData"]["task"] - or io.Session["AVALON_TASK"] - ) - task_type = instance.data["task_type"] + task_name = instance.data["anatomyData"]["task"]["name"] + task_type = instance.data["anatomyData"]["task"]["type"] filtering_criteria = { "families": instance.data["family"], "hosts": instance.context.data["hostName"], @@ -1029,29 +1031,8 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin): """ local_site = 'studio' # default remote_site = None - sync_server_presets = None - - if (instance.context.data["system_settings"] - ["modules"] - ["sync_server"] - ["enabled"]): - sync_server_presets = (instance.context.data["project_settings"] - ["global"] - ["sync_server"]) - - local_site_id = openpype.api.get_local_site_id() - if sync_server_presets["enabled"]: - local_site = sync_server_presets["config"].\ - get("active_site", "studio").strip() - if local_site == 'local': - local_site = local_site_id - - remote_site = sync_server_presets["config"].get("remote_site") - if remote_site == local_site: - remote_site = None - - if remote_site == 'local': - remote_site = local_site_id + always_accesible = [] + sync_project_presets = None rec = { "_id": io.ObjectId(), @@ -1066,12 +1047,93 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin): if sites: rec["sites"] = sites else: + system_sync_server_presets = ( + instance.context.data["system_settings"] + ["modules"] + ["sync_server"]) + log.debug("system_sett:: {}".format(system_sync_server_presets)) + + if system_sync_server_presets["enabled"]: + sync_project_presets = ( + instance.context.data["project_settings"] + ["global"] + ["sync_server"]) + + if sync_project_presets and sync_project_presets["enabled"]: + local_site, remote_site = self._get_sites(sync_project_presets) + + always_accesible = sync_project_presets["config"]. \ + get("always_accessible_on", []) + + already_attached_sites = {} meta = {"name": local_site, "created_dt": datetime.now()} rec["sites"] = [meta] + already_attached_sites[meta["name"]] = meta["created_dt"] - if remote_site: - meta = {"name": remote_site.strip()} - rec["sites"].append(meta) + if sync_project_presets and sync_project_presets["enabled"]: + if remote_site and \ + remote_site not in already_attached_sites.keys(): + # add remote + meta = {"name": remote_site.strip()} + rec["sites"].append(meta) + already_attached_sites[meta["name"]] = None + + # add skeleton for site where it should be always synced to + for always_on_site in always_accesible: + if always_on_site not in already_attached_sites.keys(): + meta = {"name": always_on_site.strip()} + rec["sites"].append(meta) + already_attached_sites[meta["name"]] = None + + # add alternative sites + rec = self._add_alternative_sites(system_sync_server_presets, + already_attached_sites, + rec) + + log.debug("final sites:: {}".format(rec["sites"])) + + return rec + + def _get_sites(self, sync_project_presets): + """Returns tuple (local_site, remote_site)""" + local_site_id = openpype.api.get_local_site_id() + local_site = sync_project_presets["config"]. \ + get("active_site", "studio").strip() + + if local_site == 'local': + local_site = local_site_id + + remote_site = sync_project_presets["config"].get("remote_site") + + if remote_site == 'local': + remote_site = local_site_id + + return local_site, remote_site + + def _add_alternative_sites(self, + system_sync_server_presets, + already_attached_sites, + rec): + """Loop through all configured sites and add alternatives. + + See SyncServerModule.handle_alternate_site + """ + conf_sites = system_sync_server_presets.get("sites", {}) + + for site_name, site_info in conf_sites.items(): + alt_sites = set(site_info.get("alternative_sites", [])) + already_attached_keys = list(already_attached_sites.keys()) + for added_site in already_attached_keys: + if added_site in alt_sites: + if site_name in already_attached_keys: + continue + meta = {"name": site_name} + real_created = already_attached_sites[added_site] + # alt site inherits state of 'created_dt' + if real_created: + meta["created_dt"] = real_created + rec["sites"].append(meta) + already_attached_sites[meta["name"]] = real_created return rec diff --git a/openpype/plugins/publish/validate_editorial_asset_name.py b/openpype/plugins/publish/validate_editorial_asset_name.py index eebba61af3..7359ccf360 100644 --- a/openpype/plugins/publish/validate_editorial_asset_name.py +++ b/openpype/plugins/publish/validate_editorial_asset_name.py @@ -12,6 +12,12 @@ class ValidateEditorialAssetName(pyblish.api.ContextPlugin): order = pyblish.api.ValidatorOrder label = "Validate Editorial Asset Name" + hosts = [ + "hiero", + "standalonepublisher", + "resolve", + "flame" + ] def process(self, context): diff --git a/openpype/plugins/publish/validate_version.py b/openpype/plugins/publish/validate_version.py index 927e024476..e48ce6e3c3 100644 --- a/openpype/plugins/publish/validate_version.py +++ b/openpype/plugins/publish/validate_version.py @@ -21,8 +21,9 @@ class ValidateVersion(pyblish.api.InstancePlugin): if latest_version is not None: msg = ( - "Version `{0}` that you are trying to publish, already exists" - " in the database. Version in database: `{1}`. Please version " - "up your workfile to a higher version number than: `{1}`." - ).format(version, latest_version) + "Version `{0}` from instance `{1}` that you are trying to" + " publish, already exists in the database. Version in" + " database: `{2}`. Please version up your workfile to a higher" + " version number than: `{2}`." + ).format(version, instance.data["name"], latest_version) assert (int(version) > int(latest_version)), msg diff --git a/openpype/pype_commands.py b/openpype/pype_commands.py index 521d9159d6..519e7c285b 100644 --- a/openpype/pype_commands.py +++ b/openpype/pype_commands.py @@ -80,7 +80,7 @@ class PypeCommands: standalonepublish.main() @staticmethod - def publish(paths, targets=None): + def publish(paths, targets=None, gui=False): """Start headless publishing. Publish use json from passed paths argument. @@ -89,20 +89,35 @@ class PypeCommands: paths (list): Paths to jsons. targets (string): What module should be targeted (to choose validator for example) + gui (bool): Show publish UI. Raises: RuntimeError: When there is no path to process. """ - if not any(paths): - raise RuntimeError("No publish paths specified") - + from openpype.modules import ModulesManager from openpype import install, uninstall from openpype.api import Logger + from openpype.tools.utils.host_tools import show_publish + from openpype.tools.utils.lib import qt_app_context # Register target and host import pyblish.api import pyblish.util + log = Logger.get_logger() + + install() + + manager = ModulesManager() + + publish_paths = manager.collect_plugin_paths()["publish"] + + for path in publish_paths: + pyblish.api.register_plugin_path(path) + + if not any(paths): + raise RuntimeError("No publish paths specified") + env = get_app_environments_for_context( os.environ["AVALON_PROJECT"], os.environ["AVALON_ASSET"], @@ -111,32 +126,39 @@ class PypeCommands: ) os.environ.update(env) - log = Logger.get_logger() - - install() - - pyblish.api.register_target("filesequence") pyblish.api.register_host("shell") if targets: for target in targets: + print(f"setting target: {target}") pyblish.api.register_target(target) + else: + pyblish.api.register_target("filesequence") os.environ["OPENPYPE_PUBLISH_DATA"] = os.pathsep.join(paths) log.info("Running publish ...") - # Error exit as soon as any error occurs. - error_format = "Failed {plugin.__name__}: {error} -- {error.traceback}" + plugins = pyblish.api.discover() + print("Using plugins:") + for plugin in plugins: + print(plugin) - for result in pyblish.util.publish_iter(): - if result["error"]: - log.error(error_format.format(**result)) - uninstall() - sys.exit(1) + if gui: + with qt_app_context(): + show_publish() + else: + # Error exit as soon as any error occurs. + error_format = ("Failed {plugin.__name__}: " + "{error} -- {error.traceback}") + + for result in pyblish.util.publish_iter(): + if result["error"]: + log.error(error_format.format(**result)) + # uninstall() + sys.exit(1) log.info("Publish finished.") - uninstall() @staticmethod def remotepublishfromapp(project, batch_dir, host_name, @@ -345,3 +367,35 @@ class PypeCommands: cmd = "pytest {} {} {}".format(folder, mark_str, pyargs_str) print("Running {}".format(cmd)) subprocess.run(cmd) + + def syncserver(self, active_site): + """Start running sync_server in background.""" + import signal + os.environ["OPENPYPE_LOCAL_ID"] = active_site + + def signal_handler(sig, frame): + print("You pressed Ctrl+C. Process ended.") + sync_server_module.server_exit() + sys.exit(0) + + signal.signal(signal.SIGINT, signal_handler) + signal.signal(signal.SIGTERM, signal_handler) + + from openpype.modules import ModulesManager + + manager = ModulesManager() + sync_server_module = manager.modules_by_name["sync_server"] + + sync_server_module.server_init() + sync_server_module.server_start() + + import time + while True: + time.sleep(1.0) + + def repack_version(self, directory): + """Repacking OpenPype version.""" + from openpype.tools.repack_version import VersionRepacker + + version_packer = VersionRepacker(directory) + version_packer.process() diff --git a/openpype/scripts/otio_burnin.py b/openpype/scripts/otio_burnin.py index 206abfc0b4..68f4728bc7 100644 --- a/openpype/scripts/otio_burnin.py +++ b/openpype/scripts/otio_burnin.py @@ -37,7 +37,7 @@ TIMECODE_KEY = "{timecode}" SOURCE_TIMECODE_KEY = "{source_timecode}" -def _streams(source): +def _get_ffprobe_data(source): """Reimplemented from otio burnins to be able use full path to ffprobe :param str source: source media file :rtype: [{}, ...] @@ -47,7 +47,7 @@ def _streams(source): out = proc.communicate()[0] if proc.returncode != 0: raise RuntimeError("Failed to run: %s" % command) - return json.loads(out)['streams'] + return json.loads(out) def get_fps(str_value): @@ -69,10 +69,10 @@ def get_fps(str_value): return str(fps) -def _prores_codec_args(ffprobe_data, source_ffmpeg_cmd): +def _prores_codec_args(stream_data, source_ffmpeg_cmd): output = [] - tags = ffprobe_data.get("tags") or {} + tags = stream_data.get("tags") or {} encoder = tags.get("encoder") or "" if encoder.endswith("prores_ks"): codec_name = "prores_ks" @@ -85,7 +85,7 @@ def _prores_codec_args(ffprobe_data, source_ffmpeg_cmd): output.extend(["-codec:v", codec_name]) - pix_fmt = ffprobe_data.get("pix_fmt") + pix_fmt = stream_data.get("pix_fmt") if pix_fmt: output.extend(["-pix_fmt", pix_fmt]) @@ -99,7 +99,7 @@ def _prores_codec_args(ffprobe_data, source_ffmpeg_cmd): "ap4h": "4444", "ap4x": "4444xq" } - codec_tag_str = ffprobe_data.get("codec_tag_string") + codec_tag_str = stream_data.get("codec_tag_string") if codec_tag_str: profile = codec_tag_to_profile_map.get(codec_tag_str) if profile: @@ -108,7 +108,7 @@ def _prores_codec_args(ffprobe_data, source_ffmpeg_cmd): return output -def _h264_codec_args(ffprobe_data, source_ffmpeg_cmd): +def _h264_codec_args(stream_data, source_ffmpeg_cmd): output = ["-codec:v", "h264"] # Use arguments from source if are available source arguments @@ -125,7 +125,7 @@ def _h264_codec_args(ffprobe_data, source_ffmpeg_cmd): if arg in copy_args: output.extend([arg, args[idx + 1]]) - pix_fmt = ffprobe_data.get("pix_fmt") + pix_fmt = stream_data.get("pix_fmt") if pix_fmt: output.extend(["-pix_fmt", pix_fmt]) @@ -135,11 +135,11 @@ def _h264_codec_args(ffprobe_data, source_ffmpeg_cmd): return output -def _dnxhd_codec_args(ffprobe_data, source_ffmpeg_cmd): +def _dnxhd_codec_args(stream_data, source_ffmpeg_cmd): output = ["-codec:v", "dnxhd"] # Use source profile (profiles in metadata are not usable in args directly) - profile = ffprobe_data.get("profile") or "" + profile = stream_data.get("profile") or "" # Lower profile and replace space with underscore cleaned_profile = profile.lower().replace(" ", "_") dnx_profiles = { @@ -153,7 +153,7 @@ def _dnxhd_codec_args(ffprobe_data, source_ffmpeg_cmd): if cleaned_profile in dnx_profiles: output.extend(["-profile:v", cleaned_profile]) - pix_fmt = ffprobe_data.get("pix_fmt") + pix_fmt = stream_data.get("pix_fmt") if pix_fmt: output.extend(["-pix_fmt", pix_fmt]) @@ -162,28 +162,29 @@ def _dnxhd_codec_args(ffprobe_data, source_ffmpeg_cmd): def get_codec_args(ffprobe_data, source_ffmpeg_cmd): - codec_name = ffprobe_data.get("codec_name") + stream_data = ffprobe_data["streams"][0] + codec_name = stream_data.get("codec_name") # Codec "prores" if codec_name == "prores": - return _prores_codec_args(ffprobe_data, source_ffmpeg_cmd) + return _prores_codec_args(stream_data, source_ffmpeg_cmd) # Codec "h264" if codec_name == "h264": - return _h264_codec_args(ffprobe_data, source_ffmpeg_cmd) + return _h264_codec_args(stream_data, source_ffmpeg_cmd) # Coded DNxHD if codec_name == "dnxhd": - return _dnxhd_codec_args(ffprobe_data, source_ffmpeg_cmd) + return _dnxhd_codec_args(stream_data, source_ffmpeg_cmd) output = [] if codec_name: output.extend(["-codec:v", codec_name]) - bit_rate = ffprobe_data.get("bit_rate") + bit_rate = stream_data.get("bit_rate") if bit_rate: output.extend(["-b:v", bit_rate]) - pix_fmt = ffprobe_data.get("pix_fmt") + pix_fmt = stream_data.get("pix_fmt") if pix_fmt: output.extend(["-pix_fmt", pix_fmt]) @@ -244,15 +245,16 @@ class ModifiedBurnins(ffmpeg_burnins.Burnins): } def __init__( - self, source, streams=None, options_init=None, first_frame=None + self, source, ffprobe_data=None, options_init=None, first_frame=None ): - if not streams: - streams = _streams(source) + if not ffprobe_data: + ffprobe_data = _get_ffprobe_data(source) + self.ffprobe_data = ffprobe_data self.first_frame = first_frame self.input_args = [] - super().__init__(source, streams) + super().__init__(source, ffprobe_data["streams"]) if options_init: self.options_init.update(options_init) @@ -492,8 +494,6 @@ def example(input_path, output_path): 'bg_opacity': 0.5, 'font_size': 52 } - # First frame in burnin - start_frame = 2000 # Options init sets burnin look burnin = ModifiedBurnins(input_path, options_init=options_init) # Static text @@ -564,11 +564,11 @@ def burnins_from_data( "shot": "sh0010" } """ - streams = None + ffprobe_data = None if full_input_path: - streams = _streams(full_input_path) + ffprobe_data = _get_ffprobe_data(full_input_path) - burnin = ModifiedBurnins(input_path, streams, options, first_frame) + burnin = ModifiedBurnins(input_path, ffprobe_data, options, first_frame) frame_start = data.get("frame_start") frame_end = data.get("frame_end") @@ -595,6 +595,14 @@ def burnins_from_data( if source_timecode is None: source_timecode = stream.get("tags", {}).get("timecode") + if source_timecode is None: + # Use "format" key from ffprobe data + # - this is used e.g. in mxf extension + input_format = burnin.ffprobe_data.get("format") or {} + source_timecode = input_format.get("timecode") + if source_timecode is None: + source_timecode = input_format.get("tags", {}).get("timecode") + if source_timecode is not None: data[SOURCE_TIMECODE_KEY[1:-1]] = SOURCE_TIMECODE_KEY @@ -684,8 +692,9 @@ def burnins_from_data( ffmpeg_args.append("-g 1") else: - ffprobe_data = burnin._streams[0] - ffmpeg_args.extend(get_codec_args(ffprobe_data, source_ffmpeg_cmd)) + ffmpeg_args.extend( + get_codec_args(burnin.ffprobe_data, source_ffmpeg_cmd) + ) # Use group one (same as `-intra` argument, which is deprecated) ffmpeg_args_str = " ".join(ffmpeg_args) diff --git a/openpype/settings/defaults/project_anatomy/imageio.json b/openpype/settings/defaults/project_anatomy/imageio.json index fc34ef6813..09ab398c37 100644 --- a/openpype/settings/defaults/project_anatomy/imageio.json +++ b/openpype/settings/defaults/project_anatomy/imageio.json @@ -28,6 +28,9 @@ "viewer": { "viewerProcess": "sRGB" }, + "baking": { + "viewerProcess": "rec709" + }, "workfile": { "colorManagement": "Nuke", "OCIO_config": "nuke-default", diff --git a/openpype/settings/defaults/project_anatomy/templates.json b/openpype/settings/defaults/project_anatomy/templates.json index 53abd35ed5..9a03b893bf 100644 --- a/openpype/settings/defaults/project_anatomy/templates.json +++ b/openpype/settings/defaults/project_anatomy/templates.json @@ -6,8 +6,8 @@ "frame": "{frame:0>{@frame_padding}}" }, "work": { - "folder": "{root[work]}/{project[name]}/{hierarchy}/{asset}/work/{task}", - "file": "{project[code]}_{asset}_{task}_{@version}<_{comment}>.{ext}", + "folder": "{root[work]}/{project[name]}/{hierarchy}/{asset}/work/{task[name]}", + "file": "{project[code]}_{asset}_{task[name]}_{@version}<_{comment}>.{ext}", "path": "{@folder}/{@file}" }, "render": { diff --git a/openpype/settings/defaults/project_settings/global.json b/openpype/settings/defaults/project_settings/global.json index 45c1a59d17..55732f80ce 100644 --- a/openpype/settings/defaults/project_settings/global.json +++ b/openpype/settings/defaults/project_settings/global.json @@ -1,5 +1,8 @@ { "publish": { + "CollectAnatomyInstanceData": { + "follow_workfile_version": false + }, "ValidateEditorialAssetName": { "enabled": true, "optional": false @@ -49,6 +52,7 @@ "burnin", "ftrackreview" ], + "burnins": [], "ffmpeg_args": { "video_filters": [], "audio_filters": [], @@ -315,10 +319,11 @@ }, "project_folder_structure": "{\"__project_root__\": {\"prod\": {}, \"resources\": {\"footage\": {\"plates\": {}, \"offline\": {}}, \"audio\": {}, \"art_dept\": {}}, \"editorial\": {}, \"assets[ftrack.Library]\": {\"characters[ftrack]\": {}, \"locations[ftrack]\": {}}, \"shots[ftrack.Sequence]\": {\"scripts\": {}, \"editorial[ftrack.Folder]\": {}}}}", "sync_server": { - "enabled": true, + "enabled": false, "config": { "retry_cnt": "3", "loop_delay": "60", + "always_accessible_on": [], "active_site": "studio", "remote_site": "studio" }, diff --git a/openpype/settings/defaults/project_settings/maya.json b/openpype/settings/defaults/project_settings/maya.json index 689d6418ba..73c75ef3ee 100644 --- a/openpype/settings/defaults/project_settings/maya.json +++ b/openpype/settings/defaults/project_settings/maya.json @@ -42,7 +42,8 @@ "enabled": true, "defaults": [ "Main" - ] + ], + "aov_separator": "underscore" }, "CreateAnimation": { "enabled": true, diff --git a/openpype/settings/defaults/project_settings/nuke.json b/openpype/settings/defaults/project_settings/nuke.json index 069994d0e8..c3e229b8e8 100644 --- a/openpype/settings/defaults/project_settings/nuke.json +++ b/openpype/settings/defaults/project_settings/nuke.json @@ -110,7 +110,20 @@ }, "ExtractReviewDataMov": { "enabled": true, - "viewer_lut_raw": false + "viewer_lut_raw": false, + "outputs": { + "baking": { + "filter": { + "task_types": [], + "families": [] + }, + "extension": "mov", + "viewer_process_override": "", + "bake_viewer_process": true, + "bake_viewer_input_process": true, + "add_tags": [] + } + } }, "ExtractSlateFrame": { "viewer_lut_raw": false diff --git a/openpype/settings/defaults/project_settings/standalonepublisher.json b/openpype/settings/defaults/project_settings/standalonepublisher.json index 50c1e34366..6858c4f34d 100644 --- a/openpype/settings/defaults/project_settings/standalonepublisher.json +++ b/openpype/settings/defaults/project_settings/standalonepublisher.json @@ -173,9 +173,9 @@ "workfile_families": [], "texture_families": [], "color_space": [ - "linsRGB", - "raw", - "acesg" + "sRGB", + "Raw", + "ACEScg" ], "input_naming_patterns": { "workfile": [ diff --git a/openpype/settings/defaults/project_settings/webpublisher.json b/openpype/settings/defaults/project_settings/webpublisher.json index f57b79a609..9db98acd5a 100644 --- a/openpype/settings/defaults/project_settings/webpublisher.json +++ b/openpype/settings/defaults/project_settings/webpublisher.json @@ -115,6 +115,9 @@ "default_task_type": "Default task type" } } + }, + "CollectTVPaintInstances": { + "layer_name_regex": "(?PL[0-9]{3}_\\w+)_(?P.+)" } } } \ No newline at end of file diff --git a/openpype/settings/defaults/system_settings/modules.json b/openpype/settings/defaults/system_settings/modules.json index beb1eb4f24..f0caa153de 100644 --- a/openpype/settings/defaults/system_settings/modules.json +++ b/openpype/settings/defaults/system_settings/modules.json @@ -167,6 +167,16 @@ "ffmpeg": 48 } }, + "royalrender": { + "enabled": false, + "rr_paths": { + "default": { + "windows": "", + "darwin": "", + "linux": "" + } + } + }, "log_viewer": { "enabled": true }, @@ -178,5 +188,13 @@ }, "slack": { "enabled": false + }, + "job_queue": { + "server_url": "", + "jobs_root": { + "windows": "", + "darwin": "", + "linux": "" + } } } \ No newline at end of file diff --git a/openpype/settings/entities/dict_conditional.py b/openpype/settings/entities/dict_conditional.py index 0cb8827991..5f1c172f31 100644 --- a/openpype/settings/entities/dict_conditional.py +++ b/openpype/settings/entities/dict_conditional.py @@ -762,6 +762,17 @@ class SyncServerProviders(DictConditionalEntity): enum_children = [] for provider_code, configurables in system_settings_schema.items(): + # any site could be exposed or vendorized by different site + # eg studio site content could be mapped on sftp site, single file + # accessible via 2 different protocols (sites) + configurables.append( + { + "type": "list", + "key": "alternative_sites", + "label": "Alternative sites", + "object_type": "text" + } + ) label = provider_code_to_label.get(provider_code) or provider_code enum_children.append({ diff --git a/openpype/settings/entities/schemas/projects_schema/schema_project_syncserver.json b/openpype/settings/entities/schemas/projects_schema/schema_project_syncserver.json index 3211babd43..7e1b0114f5 100644 --- a/openpype/settings/entities/schemas/projects_schema/schema_project_syncserver.json +++ b/openpype/settings/entities/schemas/projects_schema/schema_project_syncserver.json @@ -26,113 +26,33 @@ "key": "loop_delay", "label": "Loop Delay" }, + { + "type": "list", + "key": "always_accessible_on", + "label": "Always accessible on sites", + "object_type": "text" + }, + { + "type": "splitter" + }, { "type": "text", "key": "active_site", - "label": "Active Site" + "label": "User Default Active Site" }, { "type": "text", "key": "remote_site", - "label": "Remote Site" + "label": "User Default Remote Site" } ] }, { - "type": "dict-modifiable", + "type": "sync-server-sites", "collapsible": true, "key": "sites", "label": "Sites", - "collapsible_key": false, - "object_type": { - "type": "dict", - "children": [ - { - "type": "dict", - "key": "gdrive", - "label": "Google Drive", - "collapsible": true, - "children": [ - { - "type": "path", - "key": "credentials_url", - "label": "Credentials url", - "multiplatform": true - } - ] - }, - { - "type": "dict", - "key": "dropbox", - "label": "Dropbox", - "collapsible": true, - "children": [ - { - "type": "text", - "key": "token", - "label": "Access Token" - }, - { - "type": "text", - "key": "team_folder_name", - "label": "Team Folder Name" - }, - { - "type": "text", - "key": "acting_as_member", - "label": "Acting As Member" - } - ] - }, - { - "type": "dict", - "key": "sftp", - "label": "SFTP", - "collapsible": true, - "children": [ - { - "type": "text", - "key": "sftp_host", - "label": "SFTP host" - }, - { - "type": "number", - "key": "sftp_port", - "label": "SFTP port" - }, - { - "type": "text", - "key": "sftp_user", - "label": "SFTP user" - }, - { - "type": "text", - "key": "sftp_pass", - "label": "SFTP pass" - }, - { - "type": "path", - "key": "sftp_key", - "label": "SFTP user ssh key", - "multiplatform": true - }, - { - "type": "text", - "key": "sftp_key_pass", - "label": "SFTP user ssh key password" - } - ] - }, - { - "type": "dict-modifiable", - "key": "root", - "label": "Roots", - "collapsable": false, - "collapsable_key": false, - "object_type": "text" - } - ] - } + "collapsible_key": false } ] } diff --git a/openpype/settings/entities/schemas/projects_schema/schema_project_webpublisher.json b/openpype/settings/entities/schemas/projects_schema/schema_project_webpublisher.json index 91337da2b2..78f38f111d 100644 --- a/openpype/settings/entities/schemas/projects_schema/schema_project_webpublisher.json +++ b/openpype/settings/entities/schemas/projects_schema/schema_project_webpublisher.json @@ -62,8 +62,25 @@ } } ] + }, + { + "type": "dict", + "collapsible": true, + "key": "CollectTVPaintInstances", + "label": "Collect TVPaint Instances", + "children": [ + { + "type": "label", + "label": "Regex helps to extract render layer and pass names from TVPaint layer name.
The regex must contain named groups 'layer' and 'pass' which are used for creation of RenderPass instances.

Example layer name: \"L001_Person_Hand\"
Example regex: \"(?P<layer>L[0-9]{3}_\\w+)_(?P<pass>.+)\"
Extracted layer: \"L001_Person\"
Extracted pass: \"Hand\"" + }, + { + "type": "text", + "key": "layer_name_regex", + "label": "Layer name regex" + } + ] } ] } ] -} \ No newline at end of file +} diff --git a/openpype/settings/entities/schemas/projects_schema/schemas/schema_anatomy_imageio.json b/openpype/settings/entities/schemas/projects_schema/schemas/schema_anatomy_imageio.json index 7423d6fd3e..380ea4a83d 100644 --- a/openpype/settings/entities/schemas/projects_schema/schemas/schema_anatomy_imageio.json +++ b/openpype/settings/entities/schemas/projects_schema/schemas/schema_anatomy_imageio.json @@ -131,6 +131,19 @@ } ] }, + { + "key": "baking", + "type": "dict", + "label": "Extract-review baking profile", + "collapsible": false, + "children": [ + { + "type": "text", + "key": "viewerProcess", + "label": "Viewer Process" + } + ] + }, { "key": "workfile", "type": "dict", @@ -363,7 +376,7 @@ "key": "maya", "type": "dict", "label": "Maya", - "children": [ + "children": [ { "key": "colorManagementPreference", "type": "dict", diff --git a/openpype/settings/entities/schemas/projects_schema/schemas/schema_anatomy_templates.json b/openpype/settings/entities/schemas/projects_schema/schemas/schema_anatomy_templates.json index a8534e7e29..e208069e6f 100644 --- a/openpype/settings/entities/schemas/projects_schema/schemas/schema_anatomy_templates.json +++ b/openpype/settings/entities/schemas/projects_schema/schemas/schema_anatomy_templates.json @@ -11,6 +11,10 @@ "type": "dict", "key": "defaults", "children": [ + { + "type": "label", + "label": "The list of existing placeholders is available here:
https://openpype.io/docs/admin_settings_project_anatomy/#available-template-keys " + }, { "type": "number", "key": "version_padding", diff --git a/openpype/settings/entities/schemas/projects_schema/schemas/schema_global_publish.json b/openpype/settings/entities/schemas/projects_schema/schemas/schema_global_publish.json index c50f383f02..d146f3cf15 100644 --- a/openpype/settings/entities/schemas/projects_schema/schemas/schema_global_publish.json +++ b/openpype/settings/entities/schemas/projects_schema/schemas/schema_global_publish.json @@ -4,6 +4,20 @@ "key": "publish", "label": "Publish plugins", "children": [ + { + "type": "dict", + "collapsible": true, + "key": "CollectAnatomyInstanceData", + "label": "Collect Anatomy Instance Data", + "is_group": true, + "children": [ + { + "type": "boolean", + "key": "follow_workfile_version", + "label": "Follow workfile version" + } + ] + }, { "type": "dict", "collapsible": true, @@ -198,6 +212,12 @@ "type": "schema", "name": "schema_representation_tags" }, + { + "key": "burnins", + "label": "Link to a burnin by name", + "type": "list", + "object_type": "text" + }, { "key": "ffmpeg_args", "label": "FFmpeg arguments", diff --git a/openpype/settings/entities/schemas/projects_schema/schemas/schema_maya_create.json b/openpype/settings/entities/schemas/projects_schema/schemas/schema_maya_create.json index 44a35af7c1..e50357cc40 100644 --- a/openpype/settings/entities/schemas/projects_schema/schemas/schema_maya_create.json +++ b/openpype/settings/entities/schemas/projects_schema/schemas/schema_maya_create.json @@ -46,6 +46,18 @@ "key": "defaults", "label": "Default Subsets", "object_type": "text" + }, + { + "key": "aov_separator", + "label": "AOV Separator character", + "type": "enum", + "multiselection": false, + "default": "underscore", + "enum_items": [ + {"dash": "- (dash)"}, + {"underscore": "_ (underscore)"}, + {"dot": ". (dot)"} + ] } ] }, diff --git a/openpype/settings/entities/schemas/projects_schema/schemas/schema_nuke_publish.json b/openpype/settings/entities/schemas/projects_schema/schemas/schema_nuke_publish.json index 74b2592d29..39390f355a 100644 --- a/openpype/settings/entities/schemas/projects_schema/schemas/schema_nuke_publish.json +++ b/openpype/settings/entities/schemas/projects_schema/schemas/schema_nuke_publish.json @@ -24,6 +24,9 @@ { "nukenodes": "nukenodes" }, + { + "model": "model" + }, { "camera": "camera" }, @@ -167,7 +170,67 @@ "type": "boolean", "key": "viewer_lut_raw", "label": "Viewer LUT raw" + }, + { + "key": "outputs", + "label": "Output Definitions", + "type": "dict-modifiable", + "highlight_content": true, + "object_type": { + "type": "dict", + "children": [ + { + "type": "dict", + "collapsible": false, + "key": "filter", + "label": "Filtering", + "children": [ + { + "key": "task_types", + "label": "Task types", + "type": "task-types-enum" + }, + { + "key": "families", + "label": "Families", + "type": "list", + "object_type": "text" + } + ] + }, + { + "type": "separator" + }, + { + "type": "text", + "key": "extension", + "label": "File extension" + }, + { + "type": "text", + "key": "viewer_process_override", + "label": "Viewer Process colorspace profile override" + }, + { + "type": "boolean", + "key": "bake_viewer_process", + "label": "Bake Viewer Process" + }, + { + "type": "boolean", + "key": "bake_viewer_input_process", + "label": "Bake Viewer Input Process (LUTs)" + }, + { + "key": "add_tags", + "label": "Add additional tags to representations", + "type": "list", + "object_type": "text" + } + ] + } } + ] }, { diff --git a/openpype/settings/entities/schemas/system_schema/schema_modules.json b/openpype/settings/entities/schemas/system_schema/schema_modules.json index a2b31772e9..52595914ed 100644 --- a/openpype/settings/entities/schemas/system_schema/schema_modules.json +++ b/openpype/settings/entities/schemas/system_schema/schema_modules.json @@ -180,6 +180,31 @@ } ] }, + { + "type": "dict", + "key": "royalrender", + "label": "Royal Render", + "require_restart": true, + "collapsible": true, + "checkbox_key": "enabled", + "children": [ + { + "type": "boolean", + "key": "enabled", + "label": "Enabled" + }, + { + "type": "dict-modifiable", + "object_type": { + "type": "path", + "multiplatform": true + }, + "key": "rr_paths", + "required_keys": ["default"], + "label": "Royal Render Root Paths" + } + ] + }, { "type": "dict", "key": "log_viewer", @@ -237,6 +262,38 @@ } ] }, + { + "type": "dict", + "key": "job_queue", + "label": "Job Queue", + "require_restart": true, + "collapsible": true, + "children": [ + { + "type": "label", + "label": "Address of machine where job queue server is running." + }, + { + "type": "text", + "key": "server_url", + "label": "Server Rest URL" + }, + { + "type": "separator" + }, + { + "type": "label", + "label": "Jobs root is used as temporary directory for workers where source is copied and render output can be stored." + }, + { + "key": "jobs_root", + "label": "Jobs root", + "type": "path", + "multipath": false, + "multiplatform": true + } + ] + }, { "type": "dynamic_schema", "name": "system_settings/modules" diff --git a/openpype/settings/lib.py b/openpype/settings/lib.py index 60ed54bd4a..ff75562413 100644 --- a/openpype/settings/lib.py +++ b/openpype/settings/lib.py @@ -856,6 +856,7 @@ def get_anatomy_settings( apply_local_settings_on_anatomy_settings( result, local_settings, project_name, site_name ) + return result diff --git a/openpype/style/__init__.py b/openpype/style/__init__.py index fd39e93b5d..cb0595d522 100644 --- a/openpype/style/__init__.py +++ b/openpype/style/__init__.py @@ -128,9 +128,13 @@ def _load_font(): _FONT_IDS = [] fonts_dirpath = os.path.join(current_dir, "fonts") font_dirs = [] - font_dirs.append(os.path.join(fonts_dirpath, "Montserrat")) - font_dirs.append(os.path.join(fonts_dirpath, "Spartan")) - font_dirs.append(os.path.join(fonts_dirpath, "RobotoMono", "static")) + font_dirs.append(os.path.join(fonts_dirpath, "Noto_Sans")) + font_dirs.append(os.path.join( + fonts_dirpath, + "Noto_Sans_Mono", + "static", + "NotoSansMono" + )) loaded_fonts = [] for font_dir in font_dirs: diff --git a/openpype/style/data.json b/openpype/style/data.json index b92ee61764..026eaf4264 100644 --- a/openpype/style/data.json +++ b/openpype/style/data.json @@ -18,7 +18,6 @@ "green-light": "hsl(155, 80%, 80%)" }, "color": { - "font": "#D3D8DE", "font-hover": "#F0F2F5", "font-disabled": "#99A3B2", @@ -50,8 +49,22 @@ "border": "#373D48", "border-hover": "rgba(168, 175, 189, .3)", - "border-focus": "hsl(200, 60%, 60%)", + "border-focus": "rgb(92, 173, 214)", + "tab-widget": { + "bg": "#21252B", + "bg-selected": "#434a56", + "bg-hover": "#373D48", + "color": "#99A3B2", + "color-selected": "#F0F2F5", + "color-hover": "#F0F2F5" + }, + "nice-checkbox": { + "bg-checked": "#56a06f", + "bg-unchecked": "#434b56", + "bg-checker": "#D3D8DE", + "bg-checker-hover": "#F0F2F5" + }, "loader": { "asset-view": { "selected": "rgba(168, 175, 189, 0.6)", @@ -71,6 +84,34 @@ "bg-expander-hover": "#2d6c9f", "bg-expander-selected-hover": "#3784c5" } + }, + "settings": { + "invalid-light": "#C93636", + "invalid-dark": "#AD2E2E", + + "modified-light": "#46b1f3", + "modified-mid": "#189AEA", + "modified-dark": "#106AA2", + + "studio-light": "#73C990", + "studio-dark": "#56a06f", + "studio-label-hover": "#FFFFFF", + + "project-light": "#FFA64D", + "project-mid": "#FF8C1A", + "project-dark": "#E67300", + + "label-fg": "#969b9e", + "label-fg-hover": "#b8c1c5", + + "breadcrumbs-btn-bg": "rgba(127, 127, 127, 60)", + "breadcrumbs-btn-bg-hover": "rgba(127, 127, 127, 90)", + + "content-hightlighted": "rgba(19, 26, 32, 15)", + "focus-border": "#839caf", + "image-btn": "#bfccd6", + "image-btn-hover": "#189aea", + "image-btn-disabled": "#bfccd6" } } } diff --git a/openpype/style/fonts/Montserrat/Montserrat-Black.ttf b/openpype/style/fonts/Montserrat/Montserrat-Black.ttf deleted file mode 100644 index 437b1157cb..0000000000 Binary files a/openpype/style/fonts/Montserrat/Montserrat-Black.ttf and /dev/null differ diff --git a/openpype/style/fonts/Montserrat/Montserrat-BlackItalic.ttf b/openpype/style/fonts/Montserrat/Montserrat-BlackItalic.ttf deleted file mode 100644 index 52348354c2..0000000000 Binary files a/openpype/style/fonts/Montserrat/Montserrat-BlackItalic.ttf and /dev/null differ diff --git a/openpype/style/fonts/Montserrat/Montserrat-Bold.ttf b/openpype/style/fonts/Montserrat/Montserrat-Bold.ttf deleted file mode 100644 index 221819bca0..0000000000 Binary files a/openpype/style/fonts/Montserrat/Montserrat-Bold.ttf and /dev/null differ diff --git a/openpype/style/fonts/Montserrat/Montserrat-BoldItalic.ttf b/openpype/style/fonts/Montserrat/Montserrat-BoldItalic.ttf deleted file mode 100644 index 9ae2bd240f..0000000000 Binary files a/openpype/style/fonts/Montserrat/Montserrat-BoldItalic.ttf and /dev/null differ diff --git a/openpype/style/fonts/Montserrat/Montserrat-ExtraBold.ttf b/openpype/style/fonts/Montserrat/Montserrat-ExtraBold.ttf deleted file mode 100644 index 80ea8061b0..0000000000 Binary files a/openpype/style/fonts/Montserrat/Montserrat-ExtraBold.ttf and /dev/null differ diff --git a/openpype/style/fonts/Montserrat/Montserrat-ExtraBoldItalic.ttf b/openpype/style/fonts/Montserrat/Montserrat-ExtraBoldItalic.ttf deleted file mode 100644 index 6c961e1cc9..0000000000 Binary files a/openpype/style/fonts/Montserrat/Montserrat-ExtraBoldItalic.ttf and /dev/null differ diff --git a/openpype/style/fonts/Montserrat/Montserrat-ExtraLight.ttf b/openpype/style/fonts/Montserrat/Montserrat-ExtraLight.ttf deleted file mode 100644 index ca0bbb6569..0000000000 Binary files a/openpype/style/fonts/Montserrat/Montserrat-ExtraLight.ttf and /dev/null differ diff --git a/openpype/style/fonts/Montserrat/Montserrat-ExtraLightItalic.ttf b/openpype/style/fonts/Montserrat/Montserrat-ExtraLightItalic.ttf deleted file mode 100644 index f3c1559ec7..0000000000 Binary files a/openpype/style/fonts/Montserrat/Montserrat-ExtraLightItalic.ttf and /dev/null differ diff --git a/openpype/style/fonts/Montserrat/Montserrat-Italic.ttf b/openpype/style/fonts/Montserrat/Montserrat-Italic.ttf deleted file mode 100644 index eb4232a0c2..0000000000 Binary files a/openpype/style/fonts/Montserrat/Montserrat-Italic.ttf and /dev/null differ diff --git a/openpype/style/fonts/Montserrat/Montserrat-Light.ttf b/openpype/style/fonts/Montserrat/Montserrat-Light.ttf deleted file mode 100644 index 990857de8e..0000000000 Binary files a/openpype/style/fonts/Montserrat/Montserrat-Light.ttf and /dev/null differ diff --git a/openpype/style/fonts/Montserrat/Montserrat-LightItalic.ttf b/openpype/style/fonts/Montserrat/Montserrat-LightItalic.ttf deleted file mode 100644 index 209604046b..0000000000 Binary files a/openpype/style/fonts/Montserrat/Montserrat-LightItalic.ttf and /dev/null differ diff --git a/openpype/style/fonts/Montserrat/Montserrat-Medium.ttf b/openpype/style/fonts/Montserrat/Montserrat-Medium.ttf deleted file mode 100644 index 6e079f6984..0000000000 Binary files a/openpype/style/fonts/Montserrat/Montserrat-Medium.ttf and /dev/null differ diff --git a/openpype/style/fonts/Montserrat/Montserrat-MediumItalic.ttf b/openpype/style/fonts/Montserrat/Montserrat-MediumItalic.ttf deleted file mode 100644 index 0dc3ac9c29..0000000000 Binary files a/openpype/style/fonts/Montserrat/Montserrat-MediumItalic.ttf and /dev/null differ diff --git a/openpype/style/fonts/Montserrat/Montserrat-Regular.ttf b/openpype/style/fonts/Montserrat/Montserrat-Regular.ttf deleted file mode 100644 index 8d443d5d56..0000000000 Binary files a/openpype/style/fonts/Montserrat/Montserrat-Regular.ttf and /dev/null differ diff --git a/openpype/style/fonts/Montserrat/Montserrat-SemiBold.ttf b/openpype/style/fonts/Montserrat/Montserrat-SemiBold.ttf deleted file mode 100644 index f8a43f2b20..0000000000 Binary files a/openpype/style/fonts/Montserrat/Montserrat-SemiBold.ttf and /dev/null differ diff --git a/openpype/style/fonts/Montserrat/Montserrat-SemiBoldItalic.ttf b/openpype/style/fonts/Montserrat/Montserrat-SemiBoldItalic.ttf deleted file mode 100644 index 336c56ec0c..0000000000 Binary files a/openpype/style/fonts/Montserrat/Montserrat-SemiBoldItalic.ttf and /dev/null differ diff --git a/openpype/style/fonts/Montserrat/Montserrat-Thin.ttf b/openpype/style/fonts/Montserrat/Montserrat-Thin.ttf deleted file mode 100644 index b9858757eb..0000000000 Binary files a/openpype/style/fonts/Montserrat/Montserrat-Thin.ttf and /dev/null differ diff --git a/openpype/style/fonts/Montserrat/Montserrat-ThinItalic.ttf b/openpype/style/fonts/Montserrat/Montserrat-ThinItalic.ttf deleted file mode 100644 index e488998ec7..0000000000 Binary files a/openpype/style/fonts/Montserrat/Montserrat-ThinItalic.ttf and /dev/null differ diff --git a/openpype/style/fonts/Noto_Sans/NotoSans-Bold.ttf b/openpype/style/fonts/Noto_Sans/NotoSans-Bold.ttf new file mode 100644 index 0000000000..54ad879b41 Binary files /dev/null and b/openpype/style/fonts/Noto_Sans/NotoSans-Bold.ttf differ diff --git a/openpype/style/fonts/Noto_Sans/NotoSans-BoldItalic.ttf b/openpype/style/fonts/Noto_Sans/NotoSans-BoldItalic.ttf new file mode 100644 index 0000000000..530a82835d Binary files /dev/null and b/openpype/style/fonts/Noto_Sans/NotoSans-BoldItalic.ttf differ diff --git a/openpype/style/fonts/Noto_Sans/NotoSans-Italic.ttf b/openpype/style/fonts/Noto_Sans/NotoSans-Italic.ttf new file mode 100644 index 0000000000..27ff1ed60a Binary files /dev/null and b/openpype/style/fonts/Noto_Sans/NotoSans-Italic.ttf differ diff --git a/openpype/style/fonts/Noto_Sans/NotoSans-Regular.ttf b/openpype/style/fonts/Noto_Sans/NotoSans-Regular.ttf new file mode 100644 index 0000000000..10589e277e Binary files /dev/null and b/openpype/style/fonts/Noto_Sans/NotoSans-Regular.ttf differ diff --git a/openpype/style/fonts/Spartan/OFL.txt b/openpype/style/fonts/Noto_Sans/OFL.txt similarity index 98% rename from openpype/style/fonts/Spartan/OFL.txt rename to openpype/style/fonts/Noto_Sans/OFL.txt index 808b610ffd..c9857270cc 100644 --- a/openpype/style/fonts/Spartan/OFL.txt +++ b/openpype/style/fonts/Noto_Sans/OFL.txt @@ -1,4 +1,4 @@ -Copyright 2020 The Spartan Project Authors (https://github.com/bghryct/Spartan-MB) +Copyright 2012 Google Inc. All Rights Reserved. This Font Software is licensed under the SIL Open Font License, Version 1.1. This license is copied below, and is also available with a FAQ at: diff --git a/openpype/style/fonts/Noto_Sans_Mono/NotoSansMono-VariableFont_wdth,wght.ttf b/openpype/style/fonts/Noto_Sans_Mono/NotoSansMono-VariableFont_wdth,wght.ttf new file mode 100644 index 0000000000..9dabd9e7a4 Binary files /dev/null and b/openpype/style/fonts/Noto_Sans_Mono/NotoSansMono-VariableFont_wdth,wght.ttf differ diff --git a/openpype/style/fonts/Montserrat/OFL.txt b/openpype/style/fonts/Noto_Sans_Mono/OFL.txt similarity index 97% rename from openpype/style/fonts/Montserrat/OFL.txt rename to openpype/style/fonts/Noto_Sans_Mono/OFL.txt index f435ed8b5e..c9857270cc 100644 --- a/openpype/style/fonts/Montserrat/OFL.txt +++ b/openpype/style/fonts/Noto_Sans_Mono/OFL.txt @@ -1,4 +1,4 @@ -Copyright 2011 The Montserrat Project Authors (https://github.com/JulietaUla/Montserrat) +Copyright 2012 Google Inc. All Rights Reserved. This Font Software is licensed under the SIL Open Font License, Version 1.1. This license is copied below, and is also available with a FAQ at: diff --git a/openpype/style/fonts/Noto_Sans_Mono/README.txt b/openpype/style/fonts/Noto_Sans_Mono/README.txt new file mode 100644 index 0000000000..b8a8fdb965 --- /dev/null +++ b/openpype/style/fonts/Noto_Sans_Mono/README.txt @@ -0,0 +1,99 @@ +Noto Sans Mono Variable Font +============================ + +This download contains Noto Sans Mono as both a variable font and static fonts. + +Noto Sans Mono is a variable font with these axes: + wdth + wght + +This means all the styles are contained in a single file: + NotoSansMono-VariableFont_wdth,wght.ttf + +If your app fully supports variable fonts, you can now pick intermediate styles +that aren’t available as static fonts. Not all apps support variable fonts, and +in those cases you can use the static font files for Noto Sans Mono: + static/NotoSansMono_ExtraCondensed/NotoSansMono_ExtraCondensed-Thin.ttf + static/NotoSansMono_ExtraCondensed/NotoSansMono_ExtraCondensed-ExtraLight.ttf + static/NotoSansMono_ExtraCondensed/NotoSansMono_ExtraCondensed-Light.ttf + static/NotoSansMono_ExtraCondensed/NotoSansMono_ExtraCondensed-Regular.ttf + static/NotoSansMono_ExtraCondensed/NotoSansMono_ExtraCondensed-Medium.ttf + static/NotoSansMono_ExtraCondensed/NotoSansMono_ExtraCondensed-SemiBold.ttf + static/NotoSansMono_ExtraCondensed/NotoSansMono_ExtraCondensed-Bold.ttf + static/NotoSansMono_ExtraCondensed/NotoSansMono_ExtraCondensed-ExtraBold.ttf + static/NotoSansMono_ExtraCondensed/NotoSansMono_ExtraCondensed-Black.ttf + static/NotoSansMono_Condensed/NotoSansMono_Condensed-Thin.ttf + static/NotoSansMono_Condensed/NotoSansMono_Condensed-ExtraLight.ttf + static/NotoSansMono_Condensed/NotoSansMono_Condensed-Light.ttf + static/NotoSansMono_Condensed/NotoSansMono_Condensed-Regular.ttf + static/NotoSansMono_Condensed/NotoSansMono_Condensed-Medium.ttf + static/NotoSansMono_Condensed/NotoSansMono_Condensed-SemiBold.ttf + static/NotoSansMono_Condensed/NotoSansMono_Condensed-Bold.ttf + static/NotoSansMono_Condensed/NotoSansMono_Condensed-ExtraBold.ttf + static/NotoSansMono_Condensed/NotoSansMono_Condensed-Black.ttf + static/NotoSansMono_SemiCondensed/NotoSansMono_SemiCondensed-Thin.ttf + static/NotoSansMono_SemiCondensed/NotoSansMono_SemiCondensed-ExtraLight.ttf + static/NotoSansMono_SemiCondensed/NotoSansMono_SemiCondensed-Light.ttf + static/NotoSansMono_SemiCondensed/NotoSansMono_SemiCondensed-Regular.ttf + static/NotoSansMono_SemiCondensed/NotoSansMono_SemiCondensed-Medium.ttf + static/NotoSansMono_SemiCondensed/NotoSansMono_SemiCondensed-SemiBold.ttf + static/NotoSansMono_SemiCondensed/NotoSansMono_SemiCondensed-Bold.ttf + static/NotoSansMono_SemiCondensed/NotoSansMono_SemiCondensed-ExtraBold.ttf + static/NotoSansMono_SemiCondensed/NotoSansMono_SemiCondensed-Black.ttf + static/NotoSansMono/NotoSansMono-Thin.ttf + static/NotoSansMono/NotoSansMono-ExtraLight.ttf + static/NotoSansMono/NotoSansMono-Light.ttf + static/NotoSansMono/NotoSansMono-Regular.ttf + static/NotoSansMono/NotoSansMono-Medium.ttf + static/NotoSansMono/NotoSansMono-SemiBold.ttf + static/NotoSansMono/NotoSansMono-Bold.ttf + static/NotoSansMono/NotoSansMono-ExtraBold.ttf + static/NotoSansMono/NotoSansMono-Black.ttf + +Get started +----------- + +1. Install the font files you want to use + +2. Use your app's font picker to view the font family and all the +available styles + +Learn more about variable fonts +------------------------------- + + https://developers.google.com/web/fundamentals/design-and-ux/typography/variable-fonts + https://variablefonts.typenetwork.com + https://medium.com/variable-fonts + +In desktop apps + + https://theblog.adobe.com/can-variable-fonts-illustrator-cc + https://helpx.adobe.com/nz/photoshop/using/fonts.html#variable_fonts + +Online + + https://developers.google.com/fonts/docs/getting_started + https://developer.mozilla.org/en-US/docs/Web/CSS/CSS_Fonts/Variable_Fonts_Guide + https://developer.microsoft.com/en-us/microsoft-edge/testdrive/demos/variable-fonts + +Installing fonts + + MacOS: https://support.apple.com/en-us/HT201749 + Linux: https://www.google.com/search?q=how+to+install+a+font+on+gnu%2Blinux + Windows: https://support.microsoft.com/en-us/help/314960/how-to-install-or-remove-a-font-in-windows + +Android Apps + + https://developers.google.com/fonts/docs/android + https://developer.android.com/guide/topics/ui/look-and-feel/downloadable-fonts + +License +------- +Please read the full license text (OFL.txt) to understand the permissions, +restrictions and requirements for usage, redistribution, and modification. + +You can use them freely in your products & projects - print or digital, +commercial or otherwise. + +This isn't legal advice, please consider consulting a lawyer and see the full +license for all details. diff --git a/openpype/style/fonts/Noto_Sans_Mono/static/NotoSansMono/NotoSansMono-Black.ttf b/openpype/style/fonts/Noto_Sans_Mono/static/NotoSansMono/NotoSansMono-Black.ttf new file mode 100644 index 0000000000..75fe4b4fe9 Binary files /dev/null and b/openpype/style/fonts/Noto_Sans_Mono/static/NotoSansMono/NotoSansMono-Black.ttf differ diff --git a/openpype/style/fonts/Noto_Sans_Mono/static/NotoSansMono/NotoSansMono-Bold.ttf b/openpype/style/fonts/Noto_Sans_Mono/static/NotoSansMono/NotoSansMono-Bold.ttf new file mode 100644 index 0000000000..9cefe497da Binary files /dev/null and b/openpype/style/fonts/Noto_Sans_Mono/static/NotoSansMono/NotoSansMono-Bold.ttf differ diff --git a/openpype/style/fonts/Noto_Sans_Mono/static/NotoSansMono/NotoSansMono-ExtraBold.ttf b/openpype/style/fonts/Noto_Sans_Mono/static/NotoSansMono/NotoSansMono-ExtraBold.ttf new file mode 100644 index 0000000000..9961afc716 Binary files /dev/null and b/openpype/style/fonts/Noto_Sans_Mono/static/NotoSansMono/NotoSansMono-ExtraBold.ttf differ diff --git a/openpype/style/fonts/Noto_Sans_Mono/static/NotoSansMono/NotoSansMono-ExtraLight.ttf b/openpype/style/fonts/Noto_Sans_Mono/static/NotoSansMono/NotoSansMono-ExtraLight.ttf new file mode 100644 index 0000000000..03ab3f87f2 Binary files /dev/null and b/openpype/style/fonts/Noto_Sans_Mono/static/NotoSansMono/NotoSansMono-ExtraLight.ttf differ diff --git a/openpype/style/fonts/Noto_Sans_Mono/static/NotoSansMono/NotoSansMono-Light.ttf b/openpype/style/fonts/Noto_Sans_Mono/static/NotoSansMono/NotoSansMono-Light.ttf new file mode 100644 index 0000000000..19a5af2422 Binary files /dev/null and b/openpype/style/fonts/Noto_Sans_Mono/static/NotoSansMono/NotoSansMono-Light.ttf differ diff --git a/openpype/style/fonts/Noto_Sans_Mono/static/NotoSansMono/NotoSansMono-Medium.ttf b/openpype/style/fonts/Noto_Sans_Mono/static/NotoSansMono/NotoSansMono-Medium.ttf new file mode 100644 index 0000000000..62231544b0 Binary files /dev/null and b/openpype/style/fonts/Noto_Sans_Mono/static/NotoSansMono/NotoSansMono-Medium.ttf differ diff --git a/openpype/style/fonts/Noto_Sans_Mono/static/NotoSansMono/NotoSansMono-Regular.ttf b/openpype/style/fonts/Noto_Sans_Mono/static/NotoSansMono/NotoSansMono-Regular.ttf new file mode 100644 index 0000000000..a850b21ca3 Binary files /dev/null and b/openpype/style/fonts/Noto_Sans_Mono/static/NotoSansMono/NotoSansMono-Regular.ttf differ diff --git a/openpype/style/fonts/Noto_Sans_Mono/static/NotoSansMono/NotoSansMono-SemiBold.ttf b/openpype/style/fonts/Noto_Sans_Mono/static/NotoSansMono/NotoSansMono-SemiBold.ttf new file mode 100644 index 0000000000..0f4dffc421 Binary files /dev/null and b/openpype/style/fonts/Noto_Sans_Mono/static/NotoSansMono/NotoSansMono-SemiBold.ttf differ diff --git a/openpype/style/fonts/Noto_Sans_Mono/static/NotoSansMono/NotoSansMono-Thin.ttf b/openpype/style/fonts/Noto_Sans_Mono/static/NotoSansMono/NotoSansMono-Thin.ttf new file mode 100644 index 0000000000..0ecd83c350 Binary files /dev/null and b/openpype/style/fonts/Noto_Sans_Mono/static/NotoSansMono/NotoSansMono-Thin.ttf differ diff --git a/openpype/style/fonts/Noto_Sans_Mono/static/NotoSansMono_Condensed/NotoSansMono_Condensed-Black.ttf b/openpype/style/fonts/Noto_Sans_Mono/static/NotoSansMono_Condensed/NotoSansMono_Condensed-Black.ttf new file mode 100644 index 0000000000..77ef132a1c Binary files /dev/null and b/openpype/style/fonts/Noto_Sans_Mono/static/NotoSansMono_Condensed/NotoSansMono_Condensed-Black.ttf differ diff --git a/openpype/style/fonts/Noto_Sans_Mono/static/NotoSansMono_Condensed/NotoSansMono_Condensed-Bold.ttf b/openpype/style/fonts/Noto_Sans_Mono/static/NotoSansMono_Condensed/NotoSansMono_Condensed-Bold.ttf new file mode 100644 index 0000000000..41dbc9e543 Binary files /dev/null and b/openpype/style/fonts/Noto_Sans_Mono/static/NotoSansMono_Condensed/NotoSansMono_Condensed-Bold.ttf differ diff --git a/openpype/style/fonts/Noto_Sans_Mono/static/NotoSansMono_Condensed/NotoSansMono_Condensed-ExtraBold.ttf b/openpype/style/fonts/Noto_Sans_Mono/static/NotoSansMono_Condensed/NotoSansMono_Condensed-ExtraBold.ttf new file mode 100644 index 0000000000..640ae09cec Binary files /dev/null and b/openpype/style/fonts/Noto_Sans_Mono/static/NotoSansMono_Condensed/NotoSansMono_Condensed-ExtraBold.ttf differ diff --git a/openpype/style/fonts/Noto_Sans_Mono/static/NotoSansMono_Condensed/NotoSansMono_Condensed-ExtraLight.ttf b/openpype/style/fonts/Noto_Sans_Mono/static/NotoSansMono_Condensed/NotoSansMono_Condensed-ExtraLight.ttf new file mode 100644 index 0000000000..02fe86abbb Binary files /dev/null and b/openpype/style/fonts/Noto_Sans_Mono/static/NotoSansMono_Condensed/NotoSansMono_Condensed-ExtraLight.ttf differ diff --git a/openpype/style/fonts/Noto_Sans_Mono/static/NotoSansMono_Condensed/NotoSansMono_Condensed-Light.ttf b/openpype/style/fonts/Noto_Sans_Mono/static/NotoSansMono_Condensed/NotoSansMono_Condensed-Light.ttf new file mode 100644 index 0000000000..a0dfac1f80 Binary files /dev/null and b/openpype/style/fonts/Noto_Sans_Mono/static/NotoSansMono_Condensed/NotoSansMono_Condensed-Light.ttf differ diff --git a/openpype/style/fonts/Noto_Sans_Mono/static/NotoSansMono_Condensed/NotoSansMono_Condensed-Medium.ttf b/openpype/style/fonts/Noto_Sans_Mono/static/NotoSansMono_Condensed/NotoSansMono_Condensed-Medium.ttf new file mode 100644 index 0000000000..72a1fa5a87 Binary files /dev/null and b/openpype/style/fonts/Noto_Sans_Mono/static/NotoSansMono_Condensed/NotoSansMono_Condensed-Medium.ttf differ diff --git a/openpype/style/fonts/Noto_Sans_Mono/static/NotoSansMono_Condensed/NotoSansMono_Condensed-Regular.ttf b/openpype/style/fonts/Noto_Sans_Mono/static/NotoSansMono_Condensed/NotoSansMono_Condensed-Regular.ttf new file mode 100644 index 0000000000..8e8591cd89 Binary files /dev/null and b/openpype/style/fonts/Noto_Sans_Mono/static/NotoSansMono_Condensed/NotoSansMono_Condensed-Regular.ttf differ diff --git a/openpype/style/fonts/Noto_Sans_Mono/static/NotoSansMono_Condensed/NotoSansMono_Condensed-SemiBold.ttf b/openpype/style/fonts/Noto_Sans_Mono/static/NotoSansMono_Condensed/NotoSansMono_Condensed-SemiBold.ttf new file mode 100644 index 0000000000..b7843ceb04 Binary files /dev/null and b/openpype/style/fonts/Noto_Sans_Mono/static/NotoSansMono_Condensed/NotoSansMono_Condensed-SemiBold.ttf differ diff --git a/openpype/style/fonts/Noto_Sans_Mono/static/NotoSansMono_Condensed/NotoSansMono_Condensed-Thin.ttf b/openpype/style/fonts/Noto_Sans_Mono/static/NotoSansMono_Condensed/NotoSansMono_Condensed-Thin.ttf new file mode 100644 index 0000000000..42f4493555 Binary files /dev/null and b/openpype/style/fonts/Noto_Sans_Mono/static/NotoSansMono_Condensed/NotoSansMono_Condensed-Thin.ttf differ diff --git a/openpype/style/fonts/Noto_Sans_Mono/static/NotoSansMono_ExtraCondensed/NotoSansMono_ExtraCondensed-Black.ttf b/openpype/style/fonts/Noto_Sans_Mono/static/NotoSansMono_ExtraCondensed/NotoSansMono_ExtraCondensed-Black.ttf new file mode 100644 index 0000000000..6ad6ad9188 Binary files /dev/null and b/openpype/style/fonts/Noto_Sans_Mono/static/NotoSansMono_ExtraCondensed/NotoSansMono_ExtraCondensed-Black.ttf differ diff --git a/openpype/style/fonts/Noto_Sans_Mono/static/NotoSansMono_ExtraCondensed/NotoSansMono_ExtraCondensed-Bold.ttf b/openpype/style/fonts/Noto_Sans_Mono/static/NotoSansMono_ExtraCondensed/NotoSansMono_ExtraCondensed-Bold.ttf new file mode 100644 index 0000000000..4cdda1512c Binary files /dev/null and b/openpype/style/fonts/Noto_Sans_Mono/static/NotoSansMono_ExtraCondensed/NotoSansMono_ExtraCondensed-Bold.ttf differ diff --git a/openpype/style/fonts/Noto_Sans_Mono/static/NotoSansMono_ExtraCondensed/NotoSansMono_ExtraCondensed-ExtraBold.ttf b/openpype/style/fonts/Noto_Sans_Mono/static/NotoSansMono_ExtraCondensed/NotoSansMono_ExtraCondensed-ExtraBold.ttf new file mode 100644 index 0000000000..0d428829a9 Binary files /dev/null and b/openpype/style/fonts/Noto_Sans_Mono/static/NotoSansMono_ExtraCondensed/NotoSansMono_ExtraCondensed-ExtraBold.ttf differ diff --git a/openpype/style/fonts/Noto_Sans_Mono/static/NotoSansMono_ExtraCondensed/NotoSansMono_ExtraCondensed-ExtraLight.ttf b/openpype/style/fonts/Noto_Sans_Mono/static/NotoSansMono_ExtraCondensed/NotoSansMono_ExtraCondensed-ExtraLight.ttf new file mode 100644 index 0000000000..c3b01f97c4 Binary files /dev/null and b/openpype/style/fonts/Noto_Sans_Mono/static/NotoSansMono_ExtraCondensed/NotoSansMono_ExtraCondensed-ExtraLight.ttf differ diff --git a/openpype/style/fonts/Noto_Sans_Mono/static/NotoSansMono_ExtraCondensed/NotoSansMono_ExtraCondensed-Light.ttf b/openpype/style/fonts/Noto_Sans_Mono/static/NotoSansMono_ExtraCondensed/NotoSansMono_ExtraCondensed-Light.ttf new file mode 100644 index 0000000000..be5b1209e8 Binary files /dev/null and b/openpype/style/fonts/Noto_Sans_Mono/static/NotoSansMono_ExtraCondensed/NotoSansMono_ExtraCondensed-Light.ttf differ diff --git a/openpype/style/fonts/Noto_Sans_Mono/static/NotoSansMono_ExtraCondensed/NotoSansMono_ExtraCondensed-Medium.ttf b/openpype/style/fonts/Noto_Sans_Mono/static/NotoSansMono_ExtraCondensed/NotoSansMono_ExtraCondensed-Medium.ttf new file mode 100644 index 0000000000..5fbb4d9a55 Binary files /dev/null and b/openpype/style/fonts/Noto_Sans_Mono/static/NotoSansMono_ExtraCondensed/NotoSansMono_ExtraCondensed-Medium.ttf differ diff --git a/openpype/style/fonts/Noto_Sans_Mono/static/NotoSansMono_ExtraCondensed/NotoSansMono_ExtraCondensed-Regular.ttf b/openpype/style/fonts/Noto_Sans_Mono/static/NotoSansMono_ExtraCondensed/NotoSansMono_ExtraCondensed-Regular.ttf new file mode 100644 index 0000000000..eac82bf3b4 Binary files /dev/null and b/openpype/style/fonts/Noto_Sans_Mono/static/NotoSansMono_ExtraCondensed/NotoSansMono_ExtraCondensed-Regular.ttf differ diff --git a/openpype/style/fonts/Noto_Sans_Mono/static/NotoSansMono_ExtraCondensed/NotoSansMono_ExtraCondensed-SemiBold.ttf b/openpype/style/fonts/Noto_Sans_Mono/static/NotoSansMono_ExtraCondensed/NotoSansMono_ExtraCondensed-SemiBold.ttf new file mode 100644 index 0000000000..9a75e32feb Binary files /dev/null and b/openpype/style/fonts/Noto_Sans_Mono/static/NotoSansMono_ExtraCondensed/NotoSansMono_ExtraCondensed-SemiBold.ttf differ diff --git a/openpype/style/fonts/Noto_Sans_Mono/static/NotoSansMono_ExtraCondensed/NotoSansMono_ExtraCondensed-Thin.ttf b/openpype/style/fonts/Noto_Sans_Mono/static/NotoSansMono_ExtraCondensed/NotoSansMono_ExtraCondensed-Thin.ttf new file mode 100644 index 0000000000..b710820d7e Binary files /dev/null and b/openpype/style/fonts/Noto_Sans_Mono/static/NotoSansMono_ExtraCondensed/NotoSansMono_ExtraCondensed-Thin.ttf differ diff --git a/openpype/style/fonts/Noto_Sans_Mono/static/NotoSansMono_SemiCondensed/NotoSansMono_SemiCondensed-Black.ttf b/openpype/style/fonts/Noto_Sans_Mono/static/NotoSansMono_SemiCondensed/NotoSansMono_SemiCondensed-Black.ttf new file mode 100644 index 0000000000..ef0f93add8 Binary files /dev/null and b/openpype/style/fonts/Noto_Sans_Mono/static/NotoSansMono_SemiCondensed/NotoSansMono_SemiCondensed-Black.ttf differ diff --git a/openpype/style/fonts/Noto_Sans_Mono/static/NotoSansMono_SemiCondensed/NotoSansMono_SemiCondensed-Bold.ttf b/openpype/style/fonts/Noto_Sans_Mono/static/NotoSansMono_SemiCondensed/NotoSansMono_SemiCondensed-Bold.ttf new file mode 100644 index 0000000000..bb7091a355 Binary files /dev/null and b/openpype/style/fonts/Noto_Sans_Mono/static/NotoSansMono_SemiCondensed/NotoSansMono_SemiCondensed-Bold.ttf differ diff --git a/openpype/style/fonts/Noto_Sans_Mono/static/NotoSansMono_SemiCondensed/NotoSansMono_SemiCondensed-ExtraBold.ttf b/openpype/style/fonts/Noto_Sans_Mono/static/NotoSansMono_SemiCondensed/NotoSansMono_SemiCondensed-ExtraBold.ttf new file mode 100644 index 0000000000..a737a65a72 Binary files /dev/null and b/openpype/style/fonts/Noto_Sans_Mono/static/NotoSansMono_SemiCondensed/NotoSansMono_SemiCondensed-ExtraBold.ttf differ diff --git a/openpype/style/fonts/Noto_Sans_Mono/static/NotoSansMono_SemiCondensed/NotoSansMono_SemiCondensed-ExtraLight.ttf b/openpype/style/fonts/Noto_Sans_Mono/static/NotoSansMono_SemiCondensed/NotoSansMono_SemiCondensed-ExtraLight.ttf new file mode 100644 index 0000000000..2a95000602 Binary files /dev/null and b/openpype/style/fonts/Noto_Sans_Mono/static/NotoSansMono_SemiCondensed/NotoSansMono_SemiCondensed-ExtraLight.ttf differ diff --git a/openpype/style/fonts/Noto_Sans_Mono/static/NotoSansMono_SemiCondensed/NotoSansMono_SemiCondensed-Light.ttf b/openpype/style/fonts/Noto_Sans_Mono/static/NotoSansMono_SemiCondensed/NotoSansMono_SemiCondensed-Light.ttf new file mode 100644 index 0000000000..07906bdabe Binary files /dev/null and b/openpype/style/fonts/Noto_Sans_Mono/static/NotoSansMono_SemiCondensed/NotoSansMono_SemiCondensed-Light.ttf differ diff --git a/openpype/style/fonts/Noto_Sans_Mono/static/NotoSansMono_SemiCondensed/NotoSansMono_SemiCondensed-Medium.ttf b/openpype/style/fonts/Noto_Sans_Mono/static/NotoSansMono_SemiCondensed/NotoSansMono_SemiCondensed-Medium.ttf new file mode 100644 index 0000000000..89d75e39f8 Binary files /dev/null and b/openpype/style/fonts/Noto_Sans_Mono/static/NotoSansMono_SemiCondensed/NotoSansMono_SemiCondensed-Medium.ttf differ diff --git a/openpype/style/fonts/Noto_Sans_Mono/static/NotoSansMono_SemiCondensed/NotoSansMono_SemiCondensed-Regular.ttf b/openpype/style/fonts/Noto_Sans_Mono/static/NotoSansMono_SemiCondensed/NotoSansMono_SemiCondensed-Regular.ttf new file mode 100644 index 0000000000..0c654e79b1 Binary files /dev/null and b/openpype/style/fonts/Noto_Sans_Mono/static/NotoSansMono_SemiCondensed/NotoSansMono_SemiCondensed-Regular.ttf differ diff --git a/openpype/style/fonts/Noto_Sans_Mono/static/NotoSansMono_SemiCondensed/NotoSansMono_SemiCondensed-SemiBold.ttf b/openpype/style/fonts/Noto_Sans_Mono/static/NotoSansMono_SemiCondensed/NotoSansMono_SemiCondensed-SemiBold.ttf new file mode 100644 index 0000000000..e93689fefd Binary files /dev/null and b/openpype/style/fonts/Noto_Sans_Mono/static/NotoSansMono_SemiCondensed/NotoSansMono_SemiCondensed-SemiBold.ttf differ diff --git a/openpype/style/fonts/Noto_Sans_Mono/static/NotoSansMono_SemiCondensed/NotoSansMono_SemiCondensed-Thin.ttf b/openpype/style/fonts/Noto_Sans_Mono/static/NotoSansMono_SemiCondensed/NotoSansMono_SemiCondensed-Thin.ttf new file mode 100644 index 0000000000..b4f1804a74 Binary files /dev/null and b/openpype/style/fonts/Noto_Sans_Mono/static/NotoSansMono_SemiCondensed/NotoSansMono_SemiCondensed-Thin.ttf differ diff --git a/openpype/style/fonts/RobotoMono/LICENSE.txt b/openpype/style/fonts/RobotoMono/LICENSE.txt deleted file mode 100644 index d645695673..0000000000 --- a/openpype/style/fonts/RobotoMono/LICENSE.txt +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/openpype/style/fonts/RobotoMono/README.txt b/openpype/style/fonts/RobotoMono/README.txt deleted file mode 100644 index 1bc1b1cfa2..0000000000 --- a/openpype/style/fonts/RobotoMono/README.txt +++ /dev/null @@ -1,77 +0,0 @@ -Roboto Mono Variable Font -========================= - -This download contains Roboto Mono as both variable fonts and static fonts. - -Roboto Mono is a variable font with this axis: - wght - -This means all the styles are contained in these files: - RobotoMono-VariableFont_wght.ttf - RobotoMono-Italic-VariableFont_wght.ttf - -If your app fully supports variable fonts, you can now pick intermediate styles -that aren’t available as static fonts. Not all apps support variable fonts, and -in those cases you can use the static font files for Roboto Mono: - static/RobotoMono-Thin.ttf - static/RobotoMono-ExtraLight.ttf - static/RobotoMono-Light.ttf - static/RobotoMono-Regular.ttf - static/RobotoMono-Medium.ttf - static/RobotoMono-SemiBold.ttf - static/RobotoMono-Bold.ttf - static/RobotoMono-ThinItalic.ttf - static/RobotoMono-ExtraLightItalic.ttf - static/RobotoMono-LightItalic.ttf - static/RobotoMono-Italic.ttf - static/RobotoMono-MediumItalic.ttf - static/RobotoMono-SemiBoldItalic.ttf - static/RobotoMono-BoldItalic.ttf - -Get started ------------ - -1. Install the font files you want to use - -2. Use your app's font picker to view the font family and all the -available styles - -Learn more about variable fonts -------------------------------- - - https://developers.google.com/web/fundamentals/design-and-ux/typography/variable-fonts - https://variablefonts.typenetwork.com - https://medium.com/variable-fonts - -In desktop apps - - https://theblog.adobe.com/can-variable-fonts-illustrator-cc - https://helpx.adobe.com/nz/photoshop/using/fonts.html#variable_fonts - -Online - - https://developers.google.com/fonts/docs/getting_started - https://developer.mozilla.org/en-US/docs/Web/CSS/CSS_Fonts/Variable_Fonts_Guide - https://developer.microsoft.com/en-us/microsoft-edge/testdrive/demos/variable-fonts - -Installing fonts - - MacOS: https://support.apple.com/en-us/HT201749 - Linux: https://www.google.com/search?q=how+to+install+a+font+on+gnu%2Blinux - Windows: https://support.microsoft.com/en-us/help/314960/how-to-install-or-remove-a-font-in-windows - -Android Apps - - https://developers.google.com/fonts/docs/android - https://developer.android.com/guide/topics/ui/look-and-feel/downloadable-fonts - -License -------- -Please read the full license text (LICENSE.txt) to understand the permissions, -restrictions and requirements for usage, redistribution, and modification. - -You can use them freely in your products & projects - print or digital, -commercial or otherwise. - -This isn't legal advice, please consider consulting a lawyer and see the full -license for all details. diff --git a/openpype/style/fonts/RobotoMono/RobotoMono-Italic-VariableFont_wght.ttf b/openpype/style/fonts/RobotoMono/RobotoMono-Italic-VariableFont_wght.ttf deleted file mode 100644 index d30055a9e8..0000000000 Binary files a/openpype/style/fonts/RobotoMono/RobotoMono-Italic-VariableFont_wght.ttf and /dev/null differ diff --git a/openpype/style/fonts/RobotoMono/RobotoMono-VariableFont_wght.ttf b/openpype/style/fonts/RobotoMono/RobotoMono-VariableFont_wght.ttf deleted file mode 100644 index d2b4746196..0000000000 Binary files a/openpype/style/fonts/RobotoMono/RobotoMono-VariableFont_wght.ttf and /dev/null differ diff --git a/openpype/style/fonts/RobotoMono/static/RobotoMono-Bold.ttf b/openpype/style/fonts/RobotoMono/static/RobotoMono-Bold.ttf deleted file mode 100644 index 900fce6848..0000000000 Binary files a/openpype/style/fonts/RobotoMono/static/RobotoMono-Bold.ttf and /dev/null differ diff --git a/openpype/style/fonts/RobotoMono/static/RobotoMono-BoldItalic.ttf b/openpype/style/fonts/RobotoMono/static/RobotoMono-BoldItalic.ttf deleted file mode 100644 index 4bfe29ae89..0000000000 Binary files a/openpype/style/fonts/RobotoMono/static/RobotoMono-BoldItalic.ttf and /dev/null differ diff --git a/openpype/style/fonts/RobotoMono/static/RobotoMono-ExtraLight.ttf b/openpype/style/fonts/RobotoMono/static/RobotoMono-ExtraLight.ttf deleted file mode 100644 index d535884553..0000000000 Binary files a/openpype/style/fonts/RobotoMono/static/RobotoMono-ExtraLight.ttf and /dev/null differ diff --git a/openpype/style/fonts/RobotoMono/static/RobotoMono-ExtraLightItalic.ttf b/openpype/style/fonts/RobotoMono/static/RobotoMono-ExtraLightItalic.ttf deleted file mode 100644 index b28960a0ee..0000000000 Binary files a/openpype/style/fonts/RobotoMono/static/RobotoMono-ExtraLightItalic.ttf and /dev/null differ diff --git a/openpype/style/fonts/RobotoMono/static/RobotoMono-Italic.ttf b/openpype/style/fonts/RobotoMono/static/RobotoMono-Italic.ttf deleted file mode 100644 index 4ee4dc49b4..0000000000 Binary files a/openpype/style/fonts/RobotoMono/static/RobotoMono-Italic.ttf and /dev/null differ diff --git a/openpype/style/fonts/RobotoMono/static/RobotoMono-Light.ttf b/openpype/style/fonts/RobotoMono/static/RobotoMono-Light.ttf deleted file mode 100644 index 276af4c55a..0000000000 Binary files a/openpype/style/fonts/RobotoMono/static/RobotoMono-Light.ttf and /dev/null differ diff --git a/openpype/style/fonts/RobotoMono/static/RobotoMono-LightItalic.ttf b/openpype/style/fonts/RobotoMono/static/RobotoMono-LightItalic.ttf deleted file mode 100644 index a2801c2168..0000000000 Binary files a/openpype/style/fonts/RobotoMono/static/RobotoMono-LightItalic.ttf and /dev/null differ diff --git a/openpype/style/fonts/RobotoMono/static/RobotoMono-Medium.ttf b/openpype/style/fonts/RobotoMono/static/RobotoMono-Medium.ttf deleted file mode 100644 index 8461be77a3..0000000000 Binary files a/openpype/style/fonts/RobotoMono/static/RobotoMono-Medium.ttf and /dev/null differ diff --git a/openpype/style/fonts/RobotoMono/static/RobotoMono-MediumItalic.ttf b/openpype/style/fonts/RobotoMono/static/RobotoMono-MediumItalic.ttf deleted file mode 100644 index a3bfaa115a..0000000000 Binary files a/openpype/style/fonts/RobotoMono/static/RobotoMono-MediumItalic.ttf and /dev/null differ diff --git a/openpype/style/fonts/RobotoMono/static/RobotoMono-Regular.ttf b/openpype/style/fonts/RobotoMono/static/RobotoMono-Regular.ttf deleted file mode 100644 index 7c4ce36a44..0000000000 Binary files a/openpype/style/fonts/RobotoMono/static/RobotoMono-Regular.ttf and /dev/null differ diff --git a/openpype/style/fonts/RobotoMono/static/RobotoMono-SemiBold.ttf b/openpype/style/fonts/RobotoMono/static/RobotoMono-SemiBold.ttf deleted file mode 100644 index 15ee6c6e40..0000000000 Binary files a/openpype/style/fonts/RobotoMono/static/RobotoMono-SemiBold.ttf and /dev/null differ diff --git a/openpype/style/fonts/RobotoMono/static/RobotoMono-SemiBoldItalic.ttf b/openpype/style/fonts/RobotoMono/static/RobotoMono-SemiBoldItalic.ttf deleted file mode 100644 index 8e21497793..0000000000 Binary files a/openpype/style/fonts/RobotoMono/static/RobotoMono-SemiBoldItalic.ttf and /dev/null differ diff --git a/openpype/style/fonts/RobotoMono/static/RobotoMono-Thin.ttf b/openpype/style/fonts/RobotoMono/static/RobotoMono-Thin.ttf deleted file mode 100644 index ee8a3fd41a..0000000000 Binary files a/openpype/style/fonts/RobotoMono/static/RobotoMono-Thin.ttf and /dev/null differ diff --git a/openpype/style/fonts/RobotoMono/static/RobotoMono-ThinItalic.ttf b/openpype/style/fonts/RobotoMono/static/RobotoMono-ThinItalic.ttf deleted file mode 100644 index 40b01e40de..0000000000 Binary files a/openpype/style/fonts/RobotoMono/static/RobotoMono-ThinItalic.ttf and /dev/null differ diff --git a/openpype/style/fonts/Spartan/README.txt b/openpype/style/fonts/Spartan/README.txt deleted file mode 100644 index 9db64aff0b..0000000000 --- a/openpype/style/fonts/Spartan/README.txt +++ /dev/null @@ -1,71 +0,0 @@ -Spartan Variable Font -===================== - -This download contains Spartan as both a variable font and static fonts. - -Spartan is a variable font with this axis: - wght - -This means all the styles are contained in a single file: - Spartan-VariableFont_wght.ttf - -If your app fully supports variable fonts, you can now pick intermediate styles -that aren’t available as static fonts. Not all apps support variable fonts, and -in those cases you can use the static font files for Spartan: - static/Spartan-Thin.ttf - static/Spartan-ExtraLight.ttf - static/Spartan-Light.ttf - static/Spartan-Regular.ttf - static/Spartan-Medium.ttf - static/Spartan-SemiBold.ttf - static/Spartan-Bold.ttf - static/Spartan-ExtraBold.ttf - static/Spartan-Black.ttf - -Get started ------------ - -1. Install the font files you want to use - -2. Use your app's font picker to view the font family and all the -available styles - -Learn more about variable fonts -------------------------------- - - https://developers.google.com/web/fundamentals/design-and-ux/typography/variable-fonts - https://variablefonts.typenetwork.com - https://medium.com/variable-fonts - -In desktop apps - - https://theblog.adobe.com/can-variable-fonts-illustrator-cc - https://helpx.adobe.com/nz/photoshop/using/fonts.html#variable_fonts - -Online - - https://developers.google.com/fonts/docs/getting_started - https://developer.mozilla.org/en-US/docs/Web/CSS/CSS_Fonts/Variable_Fonts_Guide - https://developer.microsoft.com/en-us/microsoft-edge/testdrive/demos/variable-fonts - -Installing fonts - - MacOS: https://support.apple.com/en-us/HT201749 - Linux: https://www.google.com/search?q=how+to+install+a+font+on+gnu%2Blinux - Windows: https://support.microsoft.com/en-us/help/314960/how-to-install-or-remove-a-font-in-windows - -Android Apps - - https://developers.google.com/fonts/docs/android - https://developer.android.com/guide/topics/ui/look-and-feel/downloadable-fonts - -License -------- -Please read the full license text (OFL.txt) to understand the permissions, -restrictions and requirements for usage, redistribution, and modification. - -You can use them freely in your products & projects - print or digital, -commercial or otherwise. However, you can't sell the fonts on their own. - -This isn't legal advice, please consider consulting a lawyer and see the full -license for all details. diff --git a/openpype/style/fonts/Spartan/Spartan-Black.ttf b/openpype/style/fonts/Spartan/Spartan-Black.ttf deleted file mode 100644 index 5d3147011e..0000000000 Binary files a/openpype/style/fonts/Spartan/Spartan-Black.ttf and /dev/null differ diff --git a/openpype/style/fonts/Spartan/Spartan-Bold.ttf b/openpype/style/fonts/Spartan/Spartan-Bold.ttf deleted file mode 100644 index 5fe4b702b2..0000000000 Binary files a/openpype/style/fonts/Spartan/Spartan-Bold.ttf and /dev/null differ diff --git a/openpype/style/fonts/Spartan/Spartan-ExtraBold.ttf b/openpype/style/fonts/Spartan/Spartan-ExtraBold.ttf deleted file mode 100644 index 1030b6dec0..0000000000 Binary files a/openpype/style/fonts/Spartan/Spartan-ExtraBold.ttf and /dev/null differ diff --git a/openpype/style/fonts/Spartan/Spartan-ExtraLight.ttf b/openpype/style/fonts/Spartan/Spartan-ExtraLight.ttf deleted file mode 100644 index aced3a9e94..0000000000 Binary files a/openpype/style/fonts/Spartan/Spartan-ExtraLight.ttf and /dev/null differ diff --git a/openpype/style/fonts/Spartan/Spartan-Light.ttf b/openpype/style/fonts/Spartan/Spartan-Light.ttf deleted file mode 100644 index 3bb6efa40e..0000000000 Binary files a/openpype/style/fonts/Spartan/Spartan-Light.ttf and /dev/null differ diff --git a/openpype/style/fonts/Spartan/Spartan-Medium.ttf b/openpype/style/fonts/Spartan/Spartan-Medium.ttf deleted file mode 100644 index 94b22ecc08..0000000000 Binary files a/openpype/style/fonts/Spartan/Spartan-Medium.ttf and /dev/null differ diff --git a/openpype/style/fonts/Spartan/Spartan-Regular.ttf b/openpype/style/fonts/Spartan/Spartan-Regular.ttf deleted file mode 100644 index 7560322e3f..0000000000 Binary files a/openpype/style/fonts/Spartan/Spartan-Regular.ttf and /dev/null differ diff --git a/openpype/style/fonts/Spartan/Spartan-SemiBold.ttf b/openpype/style/fonts/Spartan/Spartan-SemiBold.ttf deleted file mode 100644 index 7a5f74adb3..0000000000 Binary files a/openpype/style/fonts/Spartan/Spartan-SemiBold.ttf and /dev/null differ diff --git a/openpype/style/fonts/Spartan/Spartan-Thin.ttf b/openpype/style/fonts/Spartan/Spartan-Thin.ttf deleted file mode 100644 index 4caa0b2be9..0000000000 Binary files a/openpype/style/fonts/Spartan/Spartan-Thin.ttf and /dev/null differ diff --git a/openpype/style/fonts/Spartan/Spartan-VariableFont_wght.ttf b/openpype/style/fonts/Spartan/Spartan-VariableFont_wght.ttf deleted file mode 100644 index b2dd7c3076..0000000000 Binary files a/openpype/style/fonts/Spartan/Spartan-VariableFont_wght.ttf and /dev/null differ diff --git a/openpype/style/style.css b/openpype/style/style.css index 1e457f97f6..a60c3592d7 100644 --- a/openpype/style/style.css +++ b/openpype/style/style.css @@ -19,8 +19,8 @@ Enabled vs Disabled logic in most of stylesheets */ * { - font-size: 9pt; - font-family: "Spartan"; + font-size: 10pt; + font-family: "Noto Sans"; font-weight: 450; outline: none; } @@ -325,47 +325,38 @@ QTabWidget::pane { /* move to the right to not mess with borders of widget underneath */ QTabWidget::tab-bar { - left: 2px; + alignment: left; } QTabBar::tab { - padding: 5px; - border-left: 3px solid transparent; border-top: 1px solid {color:border}; + border-left: 1px solid {color:border}; border-right: 1px solid {color:border}; - /* must be single like because of Nuke*/ - background: qlineargradient(x1: 0, y1: 1, x2: 0, y2: 0,stop: 0.5 {color:bg}, stop: 1.0 {color:bg-inputs}); + padding: 5px; + background: {color:tab-widget:bg}; + color: {color:tab-widget:color}; } QTabBar::tab:selected { - background: {color:grey-lighter}; - border-left: 3px solid {color:border-focus}; - /* must be single like because of Nuke*/ - background: qlineargradient(x1: 0, y1: 1, x2: 0, y2: 0,stop: 0.5 {color:bg}, stop: 1.0 {color:border}); -} - -QTabBar::tab:!selected { - background: {color:grey-light}; + border-left-color: {color:tab-widget:bg-selected}; + border-right-color: {color:tab-widget:bg-selected}; + border-top-color: {color:border-focus}; + background: {color:tab-widget:bg-selected}; + color: {color:tab-widget:color-selected}; } +QTabBar::tab:!selected {} QTabBar::tab:!selected:hover { - background: {color:grey-lighter}; + background: {color:tab-widget:bg-hover}; + color: {color:tab-widget:color-hover}; } -QTabBar::tab:first { - border-left: 1px solid {color:border}; -} -QTabBar::tab:first:selected { - margin-left: 0; - border-left: 3px solid {color:border-focus}; -} - -QTabBar::tab:last:selected { - margin-right: 0; -} - -QTabBar::tab:only-one { - margin: 0; +QTabBar::tab:first {} +QTabBar::tab:first:selected {} +QTabBar::tab:last:!selected { + border-right: 1px solid {color:border}; } +QTabBar::tab:last:selected {} +QTabBar::tab:only-one {} QHeaderView { border: 0px solid {color:border}; @@ -669,15 +660,6 @@ QScrollBar::add-page:vertical, QScrollBar::sub-page:vertical { background: none; } -/* Globally used names */ -#Separator { - background: {color:bg-menu-separator}; -} - -#IconButton { - padding: 4px 4px 4px 4px; -} - /* Password dialog*/ #PasswordBtn { border: none; @@ -722,20 +704,19 @@ QScrollBar::add-page:vertical, QScrollBar::sub-page:vertical { } #CompleterView::item { - padding: 2px 4px 2px 4px; - border-left: 3px solid {color:bg-view}; + background: {color:bg-view-hover}; + color: {color:font}; + padding-left: 0px; } #CompleterView::item:hover { - border-left-color: {palette:blue-base}; - background: {color:bg-view-selection}; - color: {color:font}; + background: {color:bg-view-hover}; } /* Launcher specific stylesheets */ #IconView[mode="icon"] { /* font size can't be set on items */ - font-size: 8pt; + font-size: 9pt; border: 0px; padding: 0px; margin: 0px; @@ -766,9 +747,45 @@ QScrollBar::add-page:vertical, QScrollBar::sub-page:vertical { border-radius: 0.1em; } +/* Subset Manager */ +#SubsetManagerDetailsText {} +#SubsetManagerDetailsText[state="invalid"] { + border: 1px solid #ff0000; +} + +/* Creator */ +#CreatorsView::item { + padding: 1px 5px; +} + +#CreatorFamilyLabel { + font-size: 10pt; + font-weight: bold; +} + +/* Scene Inventory */ +#ButtonWithMenu { + padding-right: 16px; + border: 1px solid #4A4949; + border-radius: 2px; +} +#ButtonWithMenu::menu-button { + border: 1px solid #4A4949; + width: 12px; + border-top-left-radius: 0px; + border-top-right-radius: 2px; + border-bottom-right-radius: 2px; + border-bottom-left-radius: 0px; +} + +#ButtonWithMenu[state="1"], #ButtonWithMenu[state="1"]::menu-button, #ButtonWithMenu[state="1"]::menu-button:hover { + border-color: green; +} + /* Python console interpreter */ #PythonInterpreterOutput, #PythonCodeEditor { - font-family: "Roboto Mono"; + font-family: "Noto Sans Mono"; + border-radius: 0px; } #SubsetView::item, #RepresentationView:item { @@ -786,7 +803,7 @@ QScrollBar::add-page:vertical, QScrollBar::sub-page:vertical { /* New Create/Publish UI */ #PublishLogConsole { - font-family: "Roboto Mono"; + font-family: "Noto Sans Mono"; } #VariantInput[state="new"], #VariantInput[state="new"]:focus, #VariantInput[state="new"]:hover { @@ -945,7 +962,235 @@ QScrollBar::add-page:vertical, QScrollBar::sub-page:vertical { background: transparent; } +/* Settings - NOT USED YET +- we need to define font family for settings UI */ + +#SettingsMainWidget { + background: #141a1f; +} +/* Change focus borders. */ +#SettingsMainWidget QAbstractSpinBox:focus, #SettingsMainWidget QLineEdit:focus, #SettingsMainWidget QPlainTextEdit:focus, #SettingsMainWidget QTextEdit:focus { + border-color: {color:settings:focus-border}; +} +/* Modify tab widget for settings */ +#SettingsMainWidget QTabWidget::pane { + border-top-style: none; +} + +#SettingsMainWidget QTabBar { + background: transparent; +} + +#SettingsMainWidget QTabBar::tab { + border: none; + border-top-left-radius: 4px; + border-top-right-radius: 4px; + padding: 5px; +} + +#SettingsMainWidget QTabBar::tab:selected { + background: {color:bg}; + border-color: #9B9B9B; + border-bottom-color: #C2C7CB; +} + +#SettingsMainWidget QTabBar::tab:!selected { + margin-top: 2px; + background: #21252B; +} + +#SettingsMainWidget QTabBar::tab:!selected:hover { + background: #333840; +} + +#SettingsMainWidget QTabBar::tab:first:selected { + margin-left: 0; +} + +#SettingsMainWidget QTabBar::tab:last:selected { + margin-right: 0; +} + +#SettingsMainWidget QTabBar::tab:only-one { + margin: 0; +} + +#SettingsToolIconBtn { + border: 0px solid #bfccd6; + background-color: transparent; +} + +#SettingsToolBtn { + border: 1px solid #bfccd6; + border-radius: 10px; + background-color: transparent; +} + +#SettingsToolBtn:hover { + border-color: #189aea; + color: {color:settings:modified-light}; + background-color: transparent; +} +#SettingsToolBtn:disabled { + background-color: #464b54; +} + +#ExpandToggleBtn { + background: transparent; +} + +#SettingsLabel { + background: transparent; + color: {color:settings:label-fg}; +} +#SettingsLabel:hover {color: {color:settings:label-fg-hover};} +#SettingsLabel[state="studio"] {color: {color:settings:studio-light};} +#SettingsLabel[state="studio"]:hover {color: {color:settings:studio-label-hover};} +#SettingsLabel[state="modified"] {color: {color:settings:modified-mid};} +#SettingsLabel[state="modified"]:hover {color: {color:settings:modified-light};} +#SettingsLabel[state="overriden-modified"] {color: {color:settings:modified-mid};} +#SettingsLabel[state="overriden-modified"]:hover {color: {color:settings:modified-light};} +#SettingsLabel[state="overriden"] {color: {color:settings:project-mid};} +#SettingsLabel[state="overriden"]:hover {color: {color:settings:project-light};} +#SettingsLabel[state="invalid"] {color:{color:settings:invalid-dark};} +#SettingsLabel[state="invalid"]:hover {color: {color:settings:invalid-dark};} + +/* TODO Replace these with explicit widget types if possible */ +#SettingsMainWidget QWidget[input-state="modified"] { + border-color: {color:settings:modified-mid}; +} +#SettingsMainWidget QWidget[input-state="overriden-modified"] { + border-color: {color:settings:modified-mid}; +} +#SettingsMainWidget QWidget[input-state="overriden"] { + border-color: {color:settings:project-mid}; +} +#SettingsMainWidget QWidget[input-state="invalid"] { + border-color: {color:settings:invalid-dark}; +} + +#GroupWidget { + border-bottom: 1px solid #21252B; +} + +#ProjectListWidget QLabel { + background: transparent; + font-weight: bold; +} + +#MultiSelectionComboBox { + font-size: 12px; +} + +#DictKey[state="modified"] {border-color: {color:settings:modified-mid};} +#DictKey[state="invalid"] {border-color: {color:settings:invalid-dark};} + +#ExpandLabel { + font-weight: bold; + color: {color:settings:label-fg}; +} +#ExpandLabel:hover { + color: {color:settings:label-fg-hover}; +} + +#ContentWidget { + background-color: transparent; +} +#ContentWidget[content_state="hightlighted"] { + background-color: {color:settings:content-hightlighted}; +} + +#SideLineWidget { + background-color: #333942; + border-style: solid; + border-color: #4e5254; + border-left-width: 3px; + border-bottom-width: 0px; + border-right-width: 0px; + border-top-width: 0px; +} + +#SideLineWidget:hover { + border-color: #7d8386; +} + +#SideLineWidget[state="child-studio"] {border-color: {color:settings:studio-dark};} +#SideLineWidget[state="child-studio"]:hover {border-color: {color:settings:studio-light};} + +#SideLineWidget[state="child-modified"] {border-color: {color:settings:modified-dark};} +#SideLineWidget[state="child-modified"]:hover {border-color: {color:settings:modified-mid};} + +#SideLineWidget[state="child-invalid"] {border-color: {color:settings:invalid-dark};} +#SideLineWidget[state="child-invalid"]:hover {border-color: {color:settings:invalid-light};} + +#SideLineWidget[state="child-overriden"] {border-color: {color:settings:project-dark};} +#SideLineWidget[state="child-overriden"]:hover {border-color: {color:settings:project-mid};} + +#SideLineWidget[state="child-overriden-modified"] {border-color: {color:settings:modified-dark};} +#SideLineWidget[state="child-overriden-modified"]:hover {border-color: {color:settings:modified-mid};} + +#DictAsWidgetBody { + background: transparent; +} +#DictAsWidgetBody[show_borders="1"] { + border: 1px solid #4e5254; + border-radius: 5px; +} + +#ShadowWidget { + font-size: 36pt; +} + +#BreadcrumbsPathInput { + padding: 2px; + font-size: 9pt; +} + +#BreadcrumbsButton { + padding-right: 12px; + font-size: 9pt; + background: transparent; +} + +#BreadcrumbsButton[empty="1"] { + padding-right: 0px; +} + +#BreadcrumbsButton::menu-button { + border: none; + width: 12px; + background: {color:settings:breadcrumbs-btn-bg}; +} +#BreadcrumbsButton::menu-button:hover { + background: {color:settings:breadcrumbs-btn-bg-hover}; +} + +#BreadcrumbsPanel { + border: 1px solid #4e5254; + border-radius: 5px; + background: #21252B; +} + +/* Globally used names */ +#Separator { + background: {color:bg-menu-separator}; +} + +#IconButton { + padding: 4px 4px 4px 4px; +} + #NiceCheckbox { /* Default size hint of NiceCheckbox is defined by font size. */ font-size: 7pt; } + +#ImageButton { + padding: 0; + background: transparent; + font-size: 11pt; +} + +#ImageButton:disabled { + background: {color:bg-buttons-disabled}; +} diff --git a/openpype/tests/mongo_performance.py b/openpype/tests/mongo_performance.py index 9220c6c730..2df3363f4b 100644 --- a/openpype/tests/mongo_performance.py +++ b/openpype/tests/mongo_performance.py @@ -104,8 +104,8 @@ class TestPerformance(): "name": "mb", "parent": {"oid": '{}'.format(id)}, "data": { - "path": "C:\\projects\\test_performance\\Assets\\Cylinder\\publish\\workfile\\workfileLookdev\\{}\\{}".format(version_str, file_name), # noqa - "template": "{root[work]}\\{project[name]}\\{hierarchy}\\{asset}\\publish\\{family}\\{subset}\\v{version:0>3}\\{project[code]}_{asset}_{subset}_v{version:0>3}<_{output}><.{frame:0>4}>.{representation}" # noqa + "path": "C:\\projects\\test_performance\\Assets\\Cylinder\\publish\\workfile\\workfileLookdev\\{}\\{}".format(version_str, file_name), # noqa: E501 + "template": "{root[work]}\\{project[name]}\\{hierarchy}\\{asset}\\publish\\{family}\\{subset}\\v{version:0>3}\\{project[code]}_{asset}_{subset}_v{version:0>3}<_{output}><.{frame:0>4}>.{representation}" # noqa: E501 }, "type": "representation", "schema": "openpype:representation-2.0" @@ -188,21 +188,21 @@ class TestPerformance(): create_files=False): ret = [ { - "path": "{root[work]}" + "{root[work]}/test_performance/Assets/Cylinder/publish/workfile/workfileLookdev/v{:03d}/test_Cylinder_A_workfileLookdev_v{:03d}.dat".format(i, i), #noqa + "path": "{root[work]}" + "{root[work]}/test_performance/Assets/Cylinder/publish/workfile/workfileLookdev/v{:03d}/test_Cylinder_A_workfileLookdev_v{:03d}.dat".format(i, i), # noqa: E501 "_id": '{}'.format(file_id), "hash": "temphash", "sites": self.get_sites(self.MAX_NUMBER_OF_SITES), "size": random.randint(0, self.MAX_FILE_SIZE_B) }, { - "path": "{root[work]}" + "/test_performance/Assets/Cylinder/publish/workfile/workfileLookdev/v{:03d}/test_Cylinder_B_workfileLookdev_v{:03d}.dat".format(i, i), #noqa + "path": "{root[work]}" + "/test_performance/Assets/Cylinder/publish/workfile/workfileLookdev/v{:03d}/test_Cylinder_B_workfileLookdev_v{:03d}.dat".format(i, i), # noqa: E501 "_id": '{}'.format(file_id2), "hash": "temphash", "sites": self.get_sites(self.MAX_NUMBER_OF_SITES), "size": random.randint(0, self.MAX_FILE_SIZE_B) }, { - "path": "{root[work]}" + "/test_performance/Assets/Cylinder/publish/workfile/workfileLookdev/v{:03d}/test_Cylinder_C_workfileLookdev_v{:03d}.dat".format(i, i), #noqa + "path": "{root[work]}" + "/test_performance/Assets/Cylinder/publish/workfile/workfileLookdev/v{:03d}/test_Cylinder_C_workfileLookdev_v{:03d}.dat".format(i, i), # noqa: E501 "_id": '{}'.format(file_id3), "hash": "temphash", "sites": self.get_sites(self.MAX_NUMBER_OF_SITES), @@ -223,8 +223,8 @@ class TestPerformance(): ret = {} ret['{}'.format(file_id)] = { "path": "{root[work]}" + - "/test_performance/Assets/Cylinder/publish/workfile/workfileLookdev/" #noqa - "v{:03d}/test_CylinderA_workfileLookdev_v{:03d}.mb".format(i, i), # noqa + "/test_performance/Assets/Cylinder/publish/workfile/workfileLookdev/" # noqa: E501 + "v{:03d}/test_CylinderA_workfileLookdev_v{:03d}.mb".format(i, i), # noqa: E501 "hash": "temphash", "sites": ["studio"], "size": 87236 @@ -232,16 +232,16 @@ class TestPerformance(): ret['{}'.format(file_id2)] = { "path": "{root[work]}" + - "/test_performance/Assets/Cylinder/publish/workfile/workfileLookdev/" #noqa - "v{:03d}/test_CylinderB_workfileLookdev_v{:03d}.mb".format(i, i), # noqa + "/test_performance/Assets/Cylinder/publish/workfile/workfileLookdev/" # noqa: E501 + "v{:03d}/test_CylinderB_workfileLookdev_v{:03d}.mb".format(i, i), # noqa: E501 "hash": "temphash", "sites": ["studio"], "size": 87236 } ret['{}'.format(file_id3)] = { "path": "{root[work]}" + - "/test_performance/Assets/Cylinder/publish/workfile/workfileLookdev/" #noqa - "v{:03d}/test_CylinderC_workfileLookdev_v{:03d}.mb".format(i, i), # noqa + "/test_performance/Assets/Cylinder/publish/workfile/workfileLookdev/" # noqa: E501 + "v{:03d}/test_CylinderC_workfileLookdev_v{:03d}.mb".format(i, i), # noqa: E501 "hash": "temphash", "sites": ["studio"], "size": 87236 diff --git a/openpype/tools/assetcreator/app.py b/openpype/tools/assetcreator/app.py index 5c2553e81e..58697e8aa3 100644 --- a/openpype/tools/assetcreator/app.py +++ b/openpype/tools/assetcreator/app.py @@ -1,16 +1,12 @@ import os import sys -import json from subprocess import Popen -try: - import ftrack_api_old as ftrack_api -except Exception: - import ftrack_api + +import ftrack_api +from Qt import QtWidgets, QtCore from openpype.api import get_current_project_settings -from openpype import lib as pypelib -from avalon.vendor.Qt import QtWidgets, QtCore +from openpype.tools.utils.lib import qt_app_context from avalon import io, api, style, schema -from avalon.tools import lib as parentlib from . import widget, model module = sys.modules[__name__] @@ -630,7 +626,7 @@ def show(parent=None, debug=False, context=None): if debug is True: io.install() - with parentlib.application(): + with qt_app_context(): window = Window(parent, context) window.setStyleSheet(style.load_stylesheet()) window.show() diff --git a/openpype/tools/assetcreator/model.py b/openpype/tools/assetcreator/model.py index 3af1d77127..f84541ca2a 100644 --- a/openpype/tools/assetcreator/model.py +++ b/openpype/tools/assetcreator/model.py @@ -1,8 +1,7 @@ import re import logging -import collections -from avalon.vendor.Qt import QtCore, QtWidgets +from Qt import QtCore, QtWidgets from avalon.vendor import qtawesome from avalon import io from avalon import style diff --git a/openpype/tools/assetlinks/__init__.py b/openpype/tools/assetlinks/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/openpype/tools/assetlinks/widgets.py b/openpype/tools/assetlinks/widgets.py new file mode 100644 index 0000000000..22e8848a60 --- /dev/null +++ b/openpype/tools/assetlinks/widgets.py @@ -0,0 +1,90 @@ + +from Qt import QtWidgets + + +class SimpleLinkView(QtWidgets.QWidget): + + def __init__(self, dbcon, parent=None): + super(SimpleLinkView, self).__init__(parent=parent) + self.dbcon = dbcon + + # TODO: display selected target + + in_text = QtWidgets.QLabel("Inputs") + in_view = QtWidgets.QListWidget(parent=self) + out_text = QtWidgets.QLabel("Outputs") + out_view = QtWidgets.QListWidget(parent=self) + + layout = QtWidgets.QGridLayout(self) + layout.setContentsMargins(0, 0, 0, 0) + layout.addWidget(in_text, 0, 0) + layout.addWidget(in_view, 1, 0) + layout.addWidget(out_text, 0, 1) + layout.addWidget(out_view, 1, 1) + + self._in_view = in_view + self._out_view = out_view + + def clear(self): + self._in_view.clear() + self._out_view.clear() + + def set_version(self, version_doc): + self.clear() + if not version_doc or not self.isVisible(): + return + + # inputs + # + for link in version_doc["data"].get("inputLinks", []): + # Backwards compatibility for "input" key used as "id" + if "id" not in link: + link_id = link["input"] + else: + link_id = link["id"] + version = self.dbcon.find_one( + {"_id": link_id, "type": "version"}, + projection={"name": 1, "parent": 1} + ) + if not version: + continue + subset = self.dbcon.find_one( + {"_id": version["parent"], "type": "subset"}, + projection={"name": 1, "parent": 1} + ) + if not subset: + continue + asset = self.dbcon.find_one( + {"_id": subset["parent"], "type": "asset"}, + projection={"name": 1} + ) + + self._in_view.addItem("{asset} {subset} v{version:0>3}".format( + asset=asset["name"], + subset=subset["name"], + version=version["name"], + )) + + # outputs + # + outputs = self.dbcon.find( + {"type": "version", "data.inputLinks.input": version_doc["_id"]}, + projection={"name": 1, "parent": 1} + ) + for version in outputs or []: + subset = self.dbcon.find_one( + {"_id": version["parent"], "type": "subset"}, + projection={"name": 1, "parent": 1} + ) + if not subset: + continue + asset = self.dbcon.find_one( + {"_id": subset["parent"], "type": "asset"}, + projection={"name": 1} + ) + + self._out_view.addItem("{asset} {subset} v{version:0>3}".format( + asset=asset["name"], + subset=subset["name"], + version=version["name"], + )) diff --git a/openpype/tools/context_dialog/window.py b/openpype/tools/context_dialog/window.py index 124a1beda3..5d8a2ad62e 100644 --- a/openpype/tools/context_dialog/window.py +++ b/openpype/tools/context_dialog/window.py @@ -6,16 +6,14 @@ from avalon.api import AvalonMongoDB from openpype import style from openpype.tools.utils.lib import center_window -from openpype.tools.utils.widgets import AssetWidget +from openpype.tools.utils.assets_widget import SingleSelectAssetsWidget from openpype.tools.utils.constants import ( - TASK_NAME_ROLE, PROJECT_NAME_ROLE ) +from openpype.tools.utils.tasks_widget import TasksWidget from openpype.tools.utils.models import ( ProjectModel, - ProjectSortFilterProxy, - TasksModel, - TasksProxyModel + ProjectSortFilterProxy ) @@ -67,8 +65,8 @@ class ContextDialog(QtWidgets.QDialog): project_combobox.setModel(project_proxy) # Assets widget - assets_widget = AssetWidget( - dbcon, multiselection=False, parent=left_side_widget + assets_widget = SingleSelectAssetsWidget( + dbcon, parent=left_side_widget ) left_side_layout = QtWidgets.QVBoxLayout(left_side_widget) @@ -77,15 +75,11 @@ class ContextDialog(QtWidgets.QDialog): left_side_layout.addWidget(assets_widget) # Right side of window contains only tasks - task_view = QtWidgets.QListView(main_splitter) - task_model = TasksModel(dbcon) - task_proxy = TasksProxyModel() - task_proxy.setSourceModel(task_model) - task_view.setModel(task_proxy) + tasks_widget = TasksWidget(dbcon, main_splitter) # Add widgets to main splitter main_splitter.addWidget(left_side_widget) - main_splitter.addWidget(task_view) + main_splitter.addWidget(tasks_widget) # Set stretch of both sides main_splitter.setStretchFactor(0, 7) @@ -119,9 +113,7 @@ class ContextDialog(QtWidgets.QDialog): assets_widget.selection_changed.connect(self._on_asset_change) assets_widget.refresh_triggered.connect(self._on_asset_refresh_trigger) assets_widget.refreshed.connect(self._on_asset_widget_refresh_finished) - task_view.selectionModel().selectionChanged.connect( - self._on_task_change - ) + tasks_widget.task_changed.connect(self._on_task_change) ok_btn.clicked.connect(self._on_ok_click) self._dbcon = dbcon @@ -133,9 +125,7 @@ class ContextDialog(QtWidgets.QDialog): self._assets_widget = assets_widget - self._task_view = task_view - self._task_model = task_model - self._task_proxy = task_proxy + self._tasks_widget = tasks_widget self._ok_btn = ok_btn @@ -279,15 +269,13 @@ class ContextDialog(QtWidgets.QDialog): self._dbcon.Session["AVALON_ASSET"] = self._set_context_asset self._assets_widget.setEnabled(False) self._assets_widget.select_assets(self._set_context_asset) - self._set_asset_to_task_model() + self._set_asset_to_tasks_widget() else: self._assets_widget.setEnabled(True) self._assets_widget.set_current_asset_btn_visibility(False) # Refresh tasks - self._task_model.refresh() - # Sort tasks - self._task_proxy.sort(0, QtCore.Qt.AscendingOrder) + self._tasks_widget.refresh() self._ignore_value_changes = False @@ -314,20 +302,16 @@ class ContextDialog(QtWidgets.QDialog): """Selected assets have changed""" if self._ignore_value_changes: return - self._set_asset_to_task_model() + self._set_asset_to_tasks_widget() def _on_task_change(self): self._validate_strict() - def _set_asset_to_task_model(self): + def _set_asset_to_tasks_widget(self): # filter None docs they are silo - asset_docs = self._assets_widget.get_selected_assets() - asset_ids = [asset_doc["_id"] for asset_doc in asset_docs] - asset_id = None - if asset_ids: - asset_id = asset_ids[0] - self._task_model.set_asset_id(asset_id) - self._task_proxy.sort(0, QtCore.Qt.AscendingOrder) + asset_id = self._assets_widget.get_selected_asset_id() + + self._tasks_widget.set_asset_id(asset_id) def _confirm_values(self): """Store values to output.""" @@ -347,19 +331,11 @@ class ContextDialog(QtWidgets.QDialog): def get_selected_asset(self): """Currently selected asset in asset widget.""" - asset_name = None - for asset_doc in self._assets_widget.get_selected_assets(): - asset_name = asset_doc["name"] - break - return asset_name + return self._assets_widget.get_selected_asset_name() def get_selected_task(self): """Currently selected task.""" - task_name = None - index = self._task_view.selectionModel().currentIndex() - if index.isValid(): - task_name = index.data(TASK_NAME_ROLE) - return task_name + return self._tasks_widget.get_selected_task_name() def _validate_strict(self): if not self._strict: diff --git a/openpype/tools/creator/__init__.py b/openpype/tools/creator/__init__.py new file mode 100644 index 0000000000..585b8bdf80 --- /dev/null +++ b/openpype/tools/creator/__init__.py @@ -0,0 +1,9 @@ +from .window import ( + show, + CreatorWindow +) + +__all__ = ( + "show", + "CreatorWindow" +) diff --git a/openpype/tools/creator/constants.py b/openpype/tools/creator/constants.py new file mode 100644 index 0000000000..26a25dc010 --- /dev/null +++ b/openpype/tools/creator/constants.py @@ -0,0 +1,8 @@ +from Qt import QtCore + + +FAMILY_ROLE = QtCore.Qt.UserRole + 1 +ITEM_ID_ROLE = QtCore.Qt.UserRole + 2 + +SEPARATOR = "---" +SEPARATORS = {"---", "---separator---"} diff --git a/openpype/tools/creator/model.py b/openpype/tools/creator/model.py new file mode 100644 index 0000000000..6907e8f0aa --- /dev/null +++ b/openpype/tools/creator/model.py @@ -0,0 +1,55 @@ +import uuid +from Qt import QtGui, QtCore + +from avalon import api + +from . constants import ( + FAMILY_ROLE, + ITEM_ID_ROLE +) + + +class CreatorsModel(QtGui.QStandardItemModel): + def __init__(self, *args, **kwargs): + super(CreatorsModel, self).__init__(*args, **kwargs) + + self._creators_by_id = {} + + def reset(self): + # TODO change to refresh when clearing is not needed + self.clear() + self._creators_by_id = {} + + items = [] + creators = api.discover(api.Creator) + for creator in creators: + item_id = str(uuid.uuid4()) + self._creators_by_id[item_id] = creator + + label = creator.label or creator.family + item = QtGui.QStandardItem(label) + item.setEditable(False) + item.setData(item_id, ITEM_ID_ROLE) + item.setData(creator.family, FAMILY_ROLE) + items.append(item) + + if not items: + item = QtGui.QStandardItem("No registered families") + item.setEnabled(False) + item.setData(QtCore.Qt.ItemIsEnabled, False) + items.append(item) + + self.invisibleRootItem().appendRows(items) + + def get_creator_by_id(self, item_id): + return self._creators_by_id.get(item_id) + + def get_indexes_by_family(self, family): + indexes = [] + for row in range(self.rowCount()): + index = self.index(row, 0) + item_id = index.data(ITEM_ID_ROLE) + creator_plugin = self._creators_by_id.get(item_id) + if creator_plugin and creator_plugin.family == family: + indexes.append(index) + return indexes diff --git a/openpype/tools/creator/widgets.py b/openpype/tools/creator/widgets.py new file mode 100644 index 0000000000..89c90cc048 --- /dev/null +++ b/openpype/tools/creator/widgets.py @@ -0,0 +1,266 @@ +import re +import inspect + +from Qt import QtWidgets, QtCore, QtGui + +from avalon.vendor import qtawesome + +from openpype import style +from openpype.pipeline.create import SUBSET_NAME_ALLOWED_SYMBOLS + + +class CreateErrorMessageBox(QtWidgets.QDialog): + def __init__( + self, + family, + subset_name, + asset_name, + exc_msg, + formatted_traceback, + parent=None + ): + super(CreateErrorMessageBox, self).__init__(parent) + self.setWindowTitle("Creation failed") + self.setFocusPolicy(QtCore.Qt.StrongFocus) + self.setWindowFlags( + self.windowFlags() | QtCore.Qt.WindowStaysOnTopHint + ) + + body_layout = QtWidgets.QVBoxLayout(self) + + main_label = ( + "Failed to create" + ) + main_label_widget = QtWidgets.QLabel(main_label, self) + body_layout.addWidget(main_label_widget) + + item_name_template = ( + "Family: {}
" + "Subset: {}
" + "Asset: {}
" + ) + exc_msg_template = "{}" + + line = self._create_line() + body_layout.addWidget(line) + + item_name = item_name_template.format(family, subset_name, asset_name) + item_name_widget = QtWidgets.QLabel( + item_name.replace("\n", "
"), self + ) + body_layout.addWidget(item_name_widget) + + exc_msg = exc_msg_template.format(exc_msg.replace("\n", "
")) + message_label_widget = QtWidgets.QLabel(exc_msg, self) + body_layout.addWidget(message_label_widget) + + if formatted_traceback: + tb_widget = QtWidgets.QLabel( + formatted_traceback.replace("\n", "
"), self + ) + tb_widget.setTextInteractionFlags( + QtCore.Qt.TextBrowserInteraction + ) + body_layout.addWidget(tb_widget) + + footer_widget = QtWidgets.QWidget(self) + footer_layout = QtWidgets.QHBoxLayout(footer_widget) + button_box = QtWidgets.QDialogButtonBox(QtCore.Qt.Vertical) + button_box.setStandardButtons( + QtWidgets.QDialogButtonBox.StandardButton.Ok + ) + button_box.accepted.connect(self._on_accept) + footer_layout.addWidget(button_box, alignment=QtCore.Qt.AlignRight) + body_layout.addWidget(footer_widget) + + def showEvent(self, event): + self.setStyleSheet(style.load_stylesheet()) + super(CreateErrorMessageBox, self).showEvent(event) + + def _on_accept(self): + self.close() + + def _create_line(self): + line = QtWidgets.QFrame(self) + line.setFixedHeight(2) + line.setFrameShape(QtWidgets.QFrame.HLine) + line.setFrameShadow(QtWidgets.QFrame.Sunken) + return line + + +class SubsetNameValidator(QtGui.QRegExpValidator): + invalid = QtCore.Signal(set) + pattern = "^[{}]*$".format(SUBSET_NAME_ALLOWED_SYMBOLS) + + def __init__(self): + reg = QtCore.QRegExp(self.pattern) + super(SubsetNameValidator, self).__init__(reg) + + def validate(self, text, pos): + results = super(SubsetNameValidator, self).validate(text, pos) + if results[0] == self.Invalid: + self.invalid.emit(self.invalid_chars(text)) + return results + + def invalid_chars(self, text): + invalid = set() + re_valid = re.compile(self.pattern) + for char in text: + if char == " ": + invalid.add("' '") + continue + if not re_valid.match(char): + invalid.add(char) + return invalid + + +class VariantLineEdit(QtWidgets.QLineEdit): + report = QtCore.Signal(str) + colors = { + "empty": (QtGui.QColor("#78879b"), ""), + "exists": (QtGui.QColor("#4E76BB"), "border-color: #4E76BB;"), + "new": (QtGui.QColor("#7AAB8F"), "border-color: #7AAB8F;"), + } + + def __init__(self, *args, **kwargs): + super(VariantLineEdit, self).__init__(*args, **kwargs) + + validator = SubsetNameValidator() + self.setValidator(validator) + self.setToolTip("Only alphanumeric characters (A-Z a-z 0-9), " + "'_' and '.' are allowed.") + + self._status_color = self.colors["empty"][0] + + anim = QtCore.QPropertyAnimation() + anim.setTargetObject(self) + anim.setPropertyName(b"status_color") + anim.setEasingCurve(QtCore.QEasingCurve.InCubic) + anim.setDuration(300) + anim.setStartValue(QtGui.QColor("#C84747")) # `Invalid` status color + self.animation = anim + + validator.invalid.connect(self.on_invalid) + + def on_invalid(self, invalid): + message = "Invalid character: %s" % ", ".join(invalid) + self.report.emit(message) + self.animation.stop() + self.animation.start() + + def as_empty(self): + self._set_border("empty") + self.report.emit("Empty subset name ..") + + def as_exists(self): + self._set_border("exists") + self.report.emit("Existing subset, appending next version.") + + def as_new(self): + self._set_border("new") + self.report.emit("New subset, creating first version.") + + def _set_border(self, status): + qcolor, style = self.colors[status] + self.animation.setEndValue(qcolor) + self.setStyleSheet(style) + + def _get_status_color(self): + return self._status_color + + def _set_status_color(self, color): + self._status_color = color + self.setStyleSheet("border-color: %s;" % color.name()) + + status_color = QtCore.Property( + QtGui.QColor, _get_status_color, _set_status_color + ) + + +class FamilyDescriptionWidget(QtWidgets.QWidget): + """A family description widget. + + Shows a family icon, family name and a help description. + Used in creator header. + + _________________ + | ____ | + | |icon| FAMILY | + | |____| help | + |_________________| + + """ + + SIZE = 35 + + def __init__(self, parent=None): + super(FamilyDescriptionWidget, self).__init__(parent=parent) + + icon_label = QtWidgets.QLabel(self) + icon_label.setSizePolicy( + QtWidgets.QSizePolicy.Maximum, + QtWidgets.QSizePolicy.Maximum + ) + + # Add 4 pixel padding to avoid icon being cut off + icon_label.setFixedWidth(self.SIZE + 4) + icon_label.setFixedHeight(self.SIZE + 4) + + label_layout = QtWidgets.QVBoxLayout() + label_layout.setSpacing(0) + + family_label = QtWidgets.QLabel(self) + family_label.setObjectName("CreatorFamilyLabel") + family_label.setAlignment(QtCore.Qt.AlignBottom | QtCore.Qt.AlignLeft) + + help_label = QtWidgets.QLabel(self) + help_label.setAlignment(QtCore.Qt.AlignTop | QtCore.Qt.AlignLeft) + + label_layout.addWidget(family_label) + label_layout.addWidget(help_label) + + layout = QtWidgets.QHBoxLayout(self) + layout.setContentsMargins(0, 0, 0, 0) + layout.setSpacing(5) + layout.addWidget(icon_label) + layout.addLayout(label_layout) + + self._help_label = help_label + self._family_label = family_label + self._icon_label = icon_label + + def set_item(self, creator_plugin): + """Update elements to display information of a family item. + + Args: + item (dict): A family item as registered with name, help and icon + + Returns: + None + + """ + if not creator_plugin: + self._icon_label.setPixmap(None) + self._family_label.setText("") + self._help_label.setText("") + return + + # Support a font-awesome icon + icon_name = getattr(creator_plugin, "icon", None) or "info-circle" + try: + icon = qtawesome.icon("fa.{}".format(icon_name), color="white") + pixmap = icon.pixmap(self.SIZE, self.SIZE) + except Exception: + print("BUG: Couldn't load icon \"fa.{}\"".format(str(icon_name))) + # Create transparent pixmap + pixmap = QtGui.QPixmap() + pixmap.fill(QtCore.Qt.transparent) + pixmap = pixmap.scaled(self.SIZE, self.SIZE) + + # Parse a clean line from the Creator's docstring + docstring = inspect.getdoc(creator_plugin) + creator_help = docstring.splitlines()[0] if docstring else "" + + self._icon_label.setPixmap(pixmap) + self._family_label.setText(creator_plugin.family) + self._help_label.setText(creator_help) diff --git a/openpype/tools/creator/window.py b/openpype/tools/creator/window.py new file mode 100644 index 0000000000..dca1735121 --- /dev/null +++ b/openpype/tools/creator/window.py @@ -0,0 +1,509 @@ +import sys +import traceback +import re + +from Qt import QtWidgets, QtCore + +from avalon import api, io + +from openpype import style +from openpype.api import get_current_project_settings +from openpype.tools.utils.lib import qt_app_context +from openpype.pipeline.create import SUBSET_NAME_ALLOWED_SYMBOLS + +from .model import CreatorsModel +from .widgets import ( + CreateErrorMessageBox, + VariantLineEdit, + FamilyDescriptionWidget +) +from .constants import ( + ITEM_ID_ROLE, + SEPARATOR, + SEPARATORS +) + +module = sys.modules[__name__] +module.window = None + + +class CreatorWindow(QtWidgets.QDialog): + def __init__(self, parent=None): + super(CreatorWindow, self).__init__(parent) + self.setWindowTitle("Instance Creator") + self.setFocusPolicy(QtCore.Qt.StrongFocus) + if not parent: + self.setWindowFlags( + self.windowFlags() | QtCore.Qt.WindowStaysOnTopHint + ) + + creator_info = FamilyDescriptionWidget(self) + + creators_model = CreatorsModel() + + creators_proxy = QtCore.QSortFilterProxyModel() + creators_proxy.setSourceModel(creators_model) + + creators_view = QtWidgets.QListView(self) + creators_view.setObjectName("CreatorsView") + creators_view.setModel(creators_proxy) + + asset_name_input = QtWidgets.QLineEdit(self) + variant_input = VariantLineEdit(self) + subset_name_input = QtWidgets.QLineEdit(self) + subset_name_input.setEnabled(False) + + subset_button = QtWidgets.QPushButton() + subset_button.setFixedWidth(18) + subset_menu = QtWidgets.QMenu(subset_button) + subset_button.setMenu(subset_menu) + + name_layout = QtWidgets.QHBoxLayout() + name_layout.addWidget(variant_input) + name_layout.addWidget(subset_button) + name_layout.setSpacing(3) + name_layout.setContentsMargins(0, 0, 0, 0) + + body_layout = QtWidgets.QVBoxLayout() + body_layout.setContentsMargins(0, 0, 0, 0) + + body_layout.addWidget(creator_info, 0) + body_layout.addWidget(QtWidgets.QLabel("Family", self), 0) + body_layout.addWidget(creators_view, 1) + body_layout.addWidget(QtWidgets.QLabel("Asset", self), 0) + body_layout.addWidget(asset_name_input, 0) + body_layout.addWidget(QtWidgets.QLabel("Subset", self), 0) + body_layout.addLayout(name_layout, 0) + body_layout.addWidget(subset_name_input, 0) + + useselection_chk = QtWidgets.QCheckBox("Use selection", self) + useselection_chk.setCheckState(QtCore.Qt.Checked) + + create_btn = QtWidgets.QPushButton("Create", self) + # Need to store error_msg to prevent garbage collection + msg_label = QtWidgets.QLabel(self) + + footer_layout = QtWidgets.QVBoxLayout() + footer_layout.addWidget(create_btn, 0) + footer_layout.addWidget(msg_label, 0) + footer_layout.setContentsMargins(0, 0, 0, 0) + + layout = QtWidgets.QVBoxLayout(self) + layout.addLayout(body_layout, 1) + layout.addWidget(useselection_chk, 0, QtCore.Qt.AlignLeft) + layout.addLayout(footer_layout, 0) + + msg_timer = QtCore.QTimer() + msg_timer.setSingleShot(True) + msg_timer.setInterval(5000) + + validation_timer = QtCore.QTimer() + validation_timer.setSingleShot(True) + validation_timer.setInterval(300) + + msg_timer.timeout.connect(self._on_msg_timer) + validation_timer.timeout.connect(self._on_validation_timer) + + create_btn.clicked.connect(self._on_create) + variant_input.returnPressed.connect(self._on_create) + variant_input.textChanged.connect(self._on_data_changed) + variant_input.report.connect(self.echo) + asset_name_input.textChanged.connect(self._on_data_changed) + creators_view.selectionModel().currentChanged.connect( + self._on_selection_changed + ) + + # Store valid states and + self._is_valid = False + create_btn.setEnabled(self._is_valid) + + self._first_show = True + + # Message dialog when something goes wrong during creation + self._message_dialog = None + + self._creator_info = creator_info + self._create_btn = create_btn + self._useselection_chk = useselection_chk + self._variant_input = variant_input + self._subset_name_input = subset_name_input + self._asset_name_input = asset_name_input + + self._creators_model = creators_model + self._creators_proxy = creators_proxy + self._creators_view = creators_view + + self._subset_btn = subset_button + self._subset_menu = subset_menu + + self._msg_label = msg_label + + self._validation_timer = validation_timer + self._msg_timer = msg_timer + + # Defaults + self.resize(300, 500) + variant_input.setFocus() + + def _set_valid_state(self, valid): + if self._is_valid == valid: + return + self._is_valid = valid + self._create_btn.setEnabled(valid) + + def _build_menu(self, default_names=None): + """Create optional predefined subset names + + Args: + default_names(list): all predefined names + + Returns: + None + """ + if not default_names: + default_names = [] + + menu = self._subset_menu + button = self._subset_btn + + # Get and destroy the action group + group = button.findChild(QtWidgets.QActionGroup) + if group: + group.deleteLater() + + state = any(default_names) + button.setEnabled(state) + if state is False: + return + + # Build new action group + group = QtWidgets.QActionGroup(button) + for name in default_names: + if name in SEPARATORS: + menu.addSeparator() + continue + action = group.addAction(name) + menu.addAction(action) + + group.triggered.connect(self._on_action_clicked) + + def _on_action_clicked(self, action): + self._variant_input.setText(action.text()) + + def _on_data_changed(self, *args): + # Set invalid state until it's reconfirmed to be valid by the + # scheduled callback so any form of creation is held back until + # valid again + self._set_valid_state(False) + + self._validation_timer.start() + + def _on_validation_timer(self): + index = self._creators_view.currentIndex() + item_id = index.data(ITEM_ID_ROLE) + creator_plugin = self._creators_model.get_creator_by_id(item_id) + user_input_text = self._variant_input.text() + asset_name = self._asset_name_input.text() + + # Early exit if no asset name + if not asset_name.strip(): + self._build_menu() + self.echo("Asset name is required ..") + self._set_valid_state(False) + return + + asset_doc = None + if creator_plugin: + # Get the asset from the database which match with the name + asset_doc = io.find_one( + {"name": asset_name, "type": "asset"}, + projection={"_id": 1} + ) + + # Get plugin + if not asset_doc or not creator_plugin: + subset_name = user_input_text + self._build_menu() + + if not creator_plugin: + self.echo("No registered families ..") + else: + self.echo("Asset '%s' not found .." % asset_name) + self._set_valid_state(False) + return + + project_name = io.Session["AVALON_PROJECT"] + asset_id = asset_doc["_id"] + task_name = io.Session["AVALON_TASK"] + + # Calculate subset name with Creator plugin + subset_name = creator_plugin.get_subset_name( + user_input_text, task_name, asset_id, project_name + ) + # Force replacement of prohibited symbols + # QUESTION should Creator care about this and here should be only + # validated with schema regex? + + # Allow curly brackets in subset name for dynamic keys + curly_left = "__cbl__" + curly_right = "__cbr__" + tmp_subset_name = ( + subset_name + .replace("{", curly_left) + .replace("}", curly_right) + ) + # Replace prohibited symbols + tmp_subset_name = re.sub( + "[^{}]+".format(SUBSET_NAME_ALLOWED_SYMBOLS), + "", + tmp_subset_name + ) + subset_name = ( + tmp_subset_name + .replace(curly_left, "{") + .replace(curly_right, "}") + ) + self._subset_name_input.setText(subset_name) + + # Get all subsets of the current asset + subset_docs = io.find( + { + "type": "subset", + "parent": asset_id + }, + {"name": 1} + ) + existing_subset_names = set(subset_docs.distinct("name")) + existing_subset_names_low = set( + _name.lower() + for _name in existing_subset_names + ) + + # Defaults to dropdown + defaults = [] + # Check if Creator plugin has set defaults + if ( + creator_plugin.defaults + and isinstance(creator_plugin.defaults, (list, tuple, set)) + ): + defaults = list(creator_plugin.defaults) + + # Replace + compare_regex = re.compile(re.sub( + user_input_text, "(.+)", subset_name, flags=re.IGNORECASE + )) + subset_hints = set() + if user_input_text: + for _name in existing_subset_names: + _result = compare_regex.search(_name) + if _result: + subset_hints |= set(_result.groups()) + + if subset_hints: + if defaults: + defaults.append(SEPARATOR) + defaults.extend(subset_hints) + self._build_menu(defaults) + + # Indicate subset existence + if not user_input_text: + self._variant_input.as_empty() + elif subset_name.lower() in existing_subset_names_low: + # validate existence of subset name with lowered text + # - "renderMain" vs. "rensermain" mean same path item for + # windows + self._variant_input.as_exists() + else: + self._variant_input.as_new() + + # Update the valid state + valid = subset_name.strip() != "" + + self._set_valid_state(valid) + + def _on_selection_changed(self, old_idx, new_idx): + index = self._creators_view.currentIndex() + item_id = index.data(ITEM_ID_ROLE) + + creator_plugin = self._creators_model.get_creator_by_id(item_id) + + self._creator_info.set_item(creator_plugin) + + if creator_plugin is None: + return + + default = None + if hasattr(creator_plugin, "get_default_variant"): + default = creator_plugin.get_default_variant() + + if not default: + if ( + creator_plugin.defaults + and isinstance(creator_plugin.defaults, list) + ): + default = creator_plugin.defaults[0] + else: + default = "Default" + + self._variant_input.setText(default) + + self._on_data_changed() + + def keyPressEvent(self, event): + """Custom keyPressEvent. + + Override keyPressEvent to do nothing so that Maya's panels won't + take focus when pressing "SHIFT" whilst mouse is over viewport or + outliner. This way users don't accidently perform Maya commands + whilst trying to name an instance. + + """ + pass + + def showEvent(self, event): + super(CreatorWindow, self).showEvent(event) + if self._first_show: + self._first_show = False + self.setStyleSheet(style.load_stylesheet()) + + def refresh(self): + self._asset_name_input.setText(io.Session["AVALON_ASSET"]) + + self._creators_model.reset() + + pype_project_setting = ( + get_current_project_settings() + ["global"] + ["tools"] + ["creator"] + ["families_smart_select"] + ) + current_index = None + family = None + task_name = io.Session.get("AVALON_TASK", None) + lowered_task_name = task_name.lower() + if task_name: + for _family, _task_names in pype_project_setting.items(): + _low_task_names = {name.lower() for name in _task_names} + for _task_name in _low_task_names: + if _task_name in lowered_task_name: + family = _family + break + if family: + break + + if family: + indexes = self._creators_model.get_indexes_by_family(family) + if indexes: + index = indexes[0] + current_index = self._creators_proxy.mapFromSource(index) + + if current_index is None or not current_index.isValid(): + current_index = self._creators_proxy.index(0, 0) + + self._creators_view.setCurrentIndex(current_index) + + def _on_create(self): + # Do not allow creation in an invalid state + if not self._is_valid: + return + + index = self._creators_view.currentIndex() + item_id = index.data(ITEM_ID_ROLE) + creator_plugin = self._creators_model.get_creator_by_id(item_id) + if creator_plugin is None: + return + + subset_name = self._subset_name_input.text() + asset_name = self._asset_name_input.text() + use_selection = self._useselection_chk.isChecked() + + variant = self._variant_input.text() + + error_info = None + try: + api.create( + creator_plugin, + subset_name, + asset_name, + options={"useSelection": use_selection}, + data={"variant": variant} + ) + + except api.CreatorError as exc: + self.echo("Creator error: {}".format(str(exc))) + error_info = (str(exc), None) + + except Exception as exc: + self.echo("Program error: %s" % str(exc)) + + exc_type, exc_value, exc_traceback = sys.exc_info() + formatted_traceback = "".join(traceback.format_exception( + exc_type, exc_value, exc_traceback + )) + error_info = (str(exc), formatted_traceback) + + if error_info: + box = CreateErrorMessageBox( + creator_plugin.family, subset_name, asset_name, *error_info + ) + box.show() + # Store dialog so is not garbage collected before is shown + self._message_dialog = box + + else: + self.echo("Created %s .." % subset_name) + + def _on_msg_timer(self): + self._msg_label.setText("") + + def echo(self, message): + self._msg_label.setText(str(message)) + self._msg_timer.start() + + +def show(debug=False, parent=None): + """Display asset creator GUI + + Arguments: + debug (bool, optional): Run loader in debug-mode, + defaults to False + parent (QtCore.QObject, optional): When provided parent the interface + to this QObject. + + """ + + try: + module.window.close() + del(module.window) + except (AttributeError, RuntimeError): + pass + + if debug: + from avalon import mock + for creator in mock.creators: + api.register_plugin(api.Creator, creator) + + import traceback + sys.excepthook = lambda typ, val, tb: traceback.print_last() + + io.install() + + any_project = next( + project for project in io.projects() + if project.get("active", True) is not False + ) + + api.Session["AVALON_PROJECT"] = any_project["name"] + module.project = any_project["name"] + + with qt_app_context(): + window = CreatorWindow(parent) + window.refresh() + window.show() + + module.window = window + + # Pull window to the front. + module.window.raise_() + module.window.activateWindow() diff --git a/openpype/tools/launcher/models.py b/openpype/tools/launcher/models.py index f87871409e..427475cb4b 100644 --- a/openpype/tools/launcher/models.py +++ b/openpype/tools/launcher/models.py @@ -19,102 +19,6 @@ from openpype.lib import ApplicationManager log = logging.getLogger(__name__) -class TaskModel(QtGui.QStandardItemModel): - """A model listing the tasks combined for a list of assets""" - - def __init__(self, dbcon, parent=None): - super(TaskModel, self).__init__(parent=parent) - self.dbcon = dbcon - - self._num_assets = 0 - - self.default_icon = qtawesome.icon( - "fa.male", color=style.colors.default - ) - self.no_task_icon = qtawesome.icon( - "fa.exclamation-circle", color=style.colors.mid - ) - - self._icons = {} - - self._get_task_icons() - - def _get_task_icons(self): - if not self.dbcon.Session.get("AVALON_PROJECT"): - return - - # Get the project configured icons from database - project = self.dbcon.find_one({"type": "project"}) - for task in project["config"].get("tasks") or []: - icon_name = task.get("icon") - if icon_name: - self._icons[task["name"]] = qtawesome.icon( - "fa.{}".format(icon_name), color=style.colors.default - ) - - def set_assets(self, asset_ids=None, asset_docs=None): - """Set assets to track by their database id - - Arguments: - asset_ids (list): List of asset ids. - asset_docs (list): List of asset entities from MongoDB. - - """ - - if asset_docs is None and asset_ids is not None: - # find assets in db by query - asset_docs = list(self.dbcon.find({ - "type": "asset", - "_id": {"$in": asset_ids} - })) - db_assets_ids = tuple(asset_doc["_id"] for asset_doc in asset_docs) - - # check if all assets were found - not_found = tuple( - str(asset_id) - for asset_id in asset_ids - if asset_id not in db_assets_ids - ) - - assert not not_found, "Assets not found by id: {0}".format( - ", ".join(not_found) - ) - - self.clear() - - if not asset_docs: - return - - task_names = set() - for asset_doc in asset_docs: - asset_tasks = asset_doc.get("data", {}).get("tasks") or set() - task_names.update(asset_tasks) - - self.beginResetModel() - - if not task_names: - item = QtGui.QStandardItem(self.no_task_icon, "No task") - item.setEnabled(False) - self.appendRow(item) - - else: - for task_name in sorted(task_names): - icon = self._icons.get(task_name, self.default_icon) - item = QtGui.QStandardItem(icon, task_name) - self.appendRow(item) - - self.endResetModel() - - def headerData(self, section, orientation, role): - if ( - role == QtCore.Qt.DisplayRole - and orientation == QtCore.Qt.Horizontal - and section == 0 - ): - return "Tasks" - return super(TaskModel, self).headerData(section, orientation, role) - - class ActionModel(QtGui.QStandardItemModel): def __init__(self, dbcon, parent=None): super(ActionModel, self).__init__(parent=parent) diff --git a/openpype/tools/launcher/widgets.py b/openpype/tools/launcher/widgets.py index 5e01488ae6..edda8d08b5 100644 --- a/openpype/tools/launcher/widgets.py +++ b/openpype/tools/launcher/widgets.py @@ -6,7 +6,7 @@ from avalon.vendor import qtawesome from .delegates import ActionDelegate from . import lib -from .models import TaskModel, ActionModel +from .models import ActionModel from openpype.tools.flickcharm import FlickCharm from .constants import ( ACTION_ROLE, @@ -90,9 +90,6 @@ class ActionBar(QtWidgets.QWidget): self.project_handler = project_handler self.dbcon = dbcon - layout = QtWidgets.QHBoxLayout(self) - layout.setContentsMargins(8, 0, 8, 0) - view = QtWidgets.QListView(self) view.setProperty("mode", "icon") view.setObjectName("IconView") @@ -116,6 +113,8 @@ class ActionBar(QtWidgets.QWidget): ) view.setItemDelegate(delegate) + layout = QtWidgets.QHBoxLayout(self) + layout.setContentsMargins(0, 0, 0, 0) layout.addWidget(view) self.model = model @@ -261,92 +260,6 @@ class ActionBar(QtWidgets.QWidget): self.action_clicked.emit(action) -class TasksWidget(QtWidgets.QWidget): - """Widget showing active Tasks""" - - task_changed = QtCore.Signal() - selection_mode = ( - QtCore.QItemSelectionModel.Select | QtCore.QItemSelectionModel.Rows - ) - - def __init__(self, dbcon, parent=None): - super(TasksWidget, self).__init__(parent) - - self.dbcon = dbcon - - view = QtWidgets.QTreeView(self) - view.setIndentation(0) - view.setEditTriggers(QtWidgets.QTreeView.NoEditTriggers) - model = TaskModel(self.dbcon) - view.setModel(model) - - layout = QtWidgets.QVBoxLayout(self) - layout.addWidget(view) - - view.selectionModel().selectionChanged.connect(self.task_changed) - - self.model = model - self.view = view - - self._last_selected_task = None - - def set_asset(self, asset_id): - if asset_id is None: - # Asset deselected - self.model.set_assets() - return - - # Try and preserve the last selected task and reselect it - # after switching assets. If there's no currently selected - # asset keep whatever the "last selected" was prior to it. - current = self.get_current_task() - if current: - self._last_selected_task = current - - self.model.set_assets([asset_id]) - - if self._last_selected_task: - self.select_task(self._last_selected_task) - - # Force a task changed emit. - self.task_changed.emit() - - def select_task(self, task_name): - """Select a task by name. - - If the task does not exist in the current model then selection is only - cleared. - - Args: - task (str): Name of the task to select. - - """ - - # Clear selection - self.view.selectionModel().clearSelection() - - # Select the task - for row in range(self.model.rowCount()): - index = self.model.index(row, 0) - _task_name = index.data(QtCore.Qt.DisplayRole) - if _task_name == task_name: - self.view.selectionModel().select(index, self.selection_mode) - # Set the currently active index - self.view.setCurrentIndex(index) - break - - def get_current_task(self): - """Return name of task at current index (selected) - - Returns: - str: Name of the current task. - - """ - index = self.view.currentIndex() - if self.view.selectionModel().isSelected(index): - return index.data(QtCore.Qt.DisplayRole) - - class ActionHistory(QtWidgets.QPushButton): trigger_history = QtCore.Signal(tuple) diff --git a/openpype/tools/launcher/window.py b/openpype/tools/launcher/window.py index 454445824e..c8acbe77c2 100644 --- a/openpype/tools/launcher/window.py +++ b/openpype/tools/launcher/window.py @@ -8,14 +8,15 @@ from avalon.api import AvalonMongoDB from openpype import style from openpype.api import resources -from openpype.tools.utils.widgets import AssetWidget +from openpype.tools.utils.assets_widget import SingleSelectAssetsWidget +from openpype.tools.utils.tasks_widget import TasksWidget + from avalon.vendor import qtawesome from .models import ProjectModel from .lib import get_action_label, ProjectHandler from .widgets import ( ProjectBar, ActionBar, - TasksWidget, ActionHistory, SlidePageWidget ) @@ -91,8 +92,6 @@ class ProjectsPanel(QtWidgets.QWidget): def __init__(self, project_handler, parent=None): super(ProjectsPanel, self).__init__(parent=parent) - layout = QtWidgets.QVBoxLayout(self) - view = ProjectIconView(parent=self) view.setSelectionMode(QtWidgets.QListView.NoSelection) flick = FlickCharm(parent=self) @@ -100,6 +99,8 @@ class ProjectsPanel(QtWidgets.QWidget): view.setModel(project_handler.model) + layout = QtWidgets.QVBoxLayout(self) + layout.setContentsMargins(0, 0, 0, 0) layout.addWidget(view) view.clicked.connect(self.on_clicked) @@ -123,47 +124,36 @@ class AssetsPanel(QtWidgets.QWidget): self.dbcon = dbcon - # project bar - project_bar_widget = QtWidgets.QWidget(self) - - layout = QtWidgets.QHBoxLayout(project_bar_widget) - layout.setSpacing(4) - + # Project bar btn_back_icon = qtawesome.icon("fa.angle-left", color="white") - btn_back = QtWidgets.QPushButton(project_bar_widget) + btn_back = QtWidgets.QPushButton(self) btn_back.setIcon(btn_back_icon) - project_bar = ProjectBar(project_handler, project_bar_widget) + project_bar = ProjectBar(project_handler, self) - layout.addWidget(btn_back) - layout.addWidget(project_bar) - - # assets - assets_proxy_widgets = QtWidgets.QWidget(self) - assets_proxy_widgets.setContentsMargins(0, 0, 0, 0) - assets_layout = QtWidgets.QVBoxLayout(assets_proxy_widgets) - assets_widget = AssetWidget( - dbcon=self.dbcon, parent=assets_proxy_widgets - ) + project_bar_layout = QtWidgets.QHBoxLayout() + project_bar_layout.setContentsMargins(0, 0, 0, 0) + project_bar_layout.setSpacing(4) + project_bar_layout.addWidget(btn_back) + project_bar_layout.addWidget(project_bar) + # Assets widget + assets_widget = SingleSelectAssetsWidget(dbcon=self.dbcon, parent=self) # Make assets view flickable - flick = FlickCharm(parent=self) - flick.activateOn(assets_widget.view) - assets_widget.view.setVerticalScrollMode( - assets_widget.view.ScrollPerPixel - ) - assets_layout.addWidget(assets_widget) + assets_widget.activate_flick_charm() - # tasks + # Tasks widget tasks_widget = TasksWidget(self.dbcon, self) - body = QtWidgets.QSplitter() + + # Body + body = QtWidgets.QSplitter(self) body.setContentsMargins(0, 0, 0, 0) body.setSizePolicy( QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Expanding ) body.setOrientation(QtCore.Qt.Horizontal) - body.addWidget(assets_proxy_widgets) + body.addWidget(assets_widget) body.addWidget(tasks_widget) body.setStretchFactor(0, 100) body.setStretchFactor(1, 65) @@ -171,24 +161,26 @@ class AssetsPanel(QtWidgets.QWidget): # main layout layout = QtWidgets.QVBoxLayout(self) layout.setContentsMargins(0, 0, 0, 0) - layout.setSpacing(0) - layout.addWidget(project_bar_widget) + layout.addLayout(project_bar_layout) layout.addWidget(body) # signals - project_handler.project_changed.connect(self.on_project_changed) - assets_widget.selection_changed.connect(self.on_asset_changed) - assets_widget.refreshed.connect(self.on_asset_changed) - tasks_widget.task_changed.connect(self.on_task_change) + project_handler.project_changed.connect(self._on_project_changed) + assets_widget.selection_changed.connect(self._on_asset_changed) + assets_widget.refreshed.connect(self._on_asset_changed) + tasks_widget.task_changed.connect(self._on_task_change) btn_back.clicked.connect(self.back_clicked) self.project_handler = project_handler self.project_bar = project_bar self.assets_widget = assets_widget - self.tasks_widget = tasks_widget + self._tasks_widget = tasks_widget self._btn_back = btn_back + def select_asset(self, asset_name): + self.assets_widget.select_asset_by_name(asset_name) + def showEvent(self, event): super(AssetsPanel, self).showEvent(event) @@ -197,56 +189,41 @@ class AssetsPanel(QtWidgets.QWidget): btn_size = self.project_bar.height() self._btn_back.setFixedSize(QtCore.QSize(btn_size, btn_size)) - def on_project_changed(self): + def select_task_name(self, task_name): + self._on_asset_changed() + self._tasks_widget.select_task_name(task_name) + + def _on_project_changed(self): self.session_changed.emit() self.assets_widget.refresh() - def on_asset_changed(self): + def _on_asset_changed(self): """Callback on asset selection changed This updates the task view. """ - asset_name = None - asset_silo = None - # Check asset on current index and selected assets - asset_doc = self.assets_widget.get_active_asset_document() - selected_asset_docs = self.assets_widget.get_selected_assets() - # If there are not asset selected docs then active asset is not - # selected - if not selected_asset_docs: - asset_doc = None - elif asset_doc: - # If selected asset doc and current asset are not same than - # something bad happened - if selected_asset_docs[0]["_id"] != asset_doc["_id"]: - asset_doc = None - - if asset_doc: - asset_name = asset_doc["name"] - asset_silo = asset_doc.get("silo") + asset_id = self.assets_widget.get_selected_asset_id() + asset_name = self.assets_widget.get_selected_asset_name() self.dbcon.Session["AVALON_TASK"] = None self.dbcon.Session["AVALON_ASSET"] = asset_name - self.dbcon.Session["AVALON_SILO"] = asset_silo self.session_changed.emit() - asset_id = None - if asset_doc: - asset_id = asset_doc["_id"] - self.tasks_widget.set_asset(asset_id) + self._tasks_widget.set_asset_id(asset_id) - def on_task_change(self): - task_name = self.tasks_widget.get_current_task() + def _on_task_change(self): + task_name = self._tasks_widget.get_selected_task_name() self.dbcon.Session["AVALON_TASK"] = task_name self.session_changed.emit() class LauncherWindow(QtWidgets.QDialog): """Launcher interface""" + message_timeout = 5000 def __init__(self, parent=None): super(LauncherWindow, self).__init__(parent) @@ -283,20 +260,17 @@ class LauncherWindow(QtWidgets.QDialog): actions_bar = ActionBar(project_handler, self.dbcon, self) # statusbar - statusbar = QtWidgets.QWidget() - layout = QtWidgets.QHBoxLayout(statusbar) + message_label = QtWidgets.QLabel(self) - message_label = QtWidgets.QLabel() - message_label.setFixedHeight(15) - - action_history = ActionHistory() + action_history = ActionHistory(self) action_history.setStatusTip("Show Action History") - layout.addWidget(message_label) - layout.addWidget(action_history) + status_layout = QtWidgets.QHBoxLayout() + status_layout.addWidget(message_label, 1) + status_layout.addWidget(action_history, 0) # Vertically split Pages and Actions - body = QtWidgets.QSplitter() + body = QtWidgets.QSplitter(self) body.setContentsMargins(0, 0, 0, 0) body.setSizePolicy( QtWidgets.QSizePolicy.Expanding, @@ -314,19 +288,13 @@ class LauncherWindow(QtWidgets.QDialog): layout = QtWidgets.QVBoxLayout(self) layout.addWidget(body) - layout.addWidget(statusbar) - layout.setSpacing(0) - layout.setContentsMargins(0, 0, 0, 0) + layout.addLayout(status_layout) - self.project_handler = project_handler + message_timer = QtCore.QTimer() + message_timer.setInterval(self.message_timeout) + message_timer.setSingleShot(True) - self.message_label = message_label - self.project_panel = project_panel - self.asset_panel = asset_panel - self.actions_bar = actions_bar - self.action_history = action_history - self.page_slider = page_slider - self._page = 0 + message_timer.timeout.connect(self._on_message_timeout) # signals actions_bar.action_clicked.connect(self.on_action_clicked) @@ -338,6 +306,19 @@ class LauncherWindow(QtWidgets.QDialog): self.resize(520, 740) + self._page = 0 + + self._message_timer = message_timer + + self.project_handler = project_handler + + self._message_label = message_label + self.project_panel = project_panel + self.asset_panel = asset_panel + self.actions_bar = actions_bar + self.action_history = action_history + self.page_slider = page_slider + def showEvent(self, event): self.project_handler.set_active(True) self.project_handler.start_timer(True) @@ -363,9 +344,12 @@ class LauncherWindow(QtWidgets.QDialog): self._page = page self.page_slider.slide_view(page, direction=direction) + def _on_message_timeout(self): + self._message_label.setText("") + def echo(self, message): - self.message_label.setText(str(message)) - QtCore.QTimer.singleShot(5000, lambda: self.message_label.setText("")) + self._message_label.setText(str(message)) + self._message_timer.start() self.log.debug(message) def on_session_changed(self): @@ -425,7 +409,6 @@ class LauncherWindow(QtWidgets.QDialog): def set_session(self, session): project_name = session.get("AVALON_PROJECT") - silo = session.get("AVALON_SILO") asset_name = session.get("AVALON_ASSET") task_name = session.get("AVALON_TASK") @@ -440,13 +423,9 @@ class LauncherWindow(QtWidgets.QDialog): index ) - if silo: - self.asset_panel.assets_widget.set_silo(silo) - if asset_name: - self.asset_panel.assets_widget.select_assets([asset_name]) + self.asset_panel.select_asset(asset_name) if task_name: # requires a forced refresh first - self.asset_panel.on_asset_changed() - self.asset_panel.tasks_widget.select_task(task_name) + self.asset_panel.select_task_name(task_name) diff --git a/openpype/tools/libraryloader/app.py b/openpype/tools/libraryloader/app.py index 710e25bd76..d030aa903d 100644 --- a/openpype/tools/libraryloader/app.py +++ b/openpype/tools/libraryloader/app.py @@ -11,7 +11,7 @@ from openpype.tools.loader.widgets import ( FamilyListView, RepresentationWidget ) -from openpype.tools.utils.widgets import AssetWidget +from openpype.tools.utils.assets_widget import MultiSelectAssetsWidget from openpype.modules import ModulesManager @@ -31,7 +31,7 @@ class LibraryLoaderWindow(QtWidgets.QDialog): message_timeout = 5000 def __init__( - self, parent=None, icon=None, show_projects=False, show_libraries=True + self, parent=None, show_projects=False, show_libraries=True ): super(LibraryLoaderWindow, self).__init__(parent) @@ -76,8 +76,8 @@ class LibraryLoaderWindow(QtWidgets.QDialog): projects_combobox.setItemDelegate(combobox_delegate) # Assets widget - assets_widget = AssetWidget( - dbcon, multiselection=True, parent=left_side_splitter + assets_widget = MultiSelectAssetsWidget( + dbcon, parent=left_side_splitter ) # Families widget @@ -165,7 +165,6 @@ class LibraryLoaderWindow(QtWidgets.QDialog): ) assets_widget.selection_changed.connect(self.on_assetschanged) assets_widget.refresh_triggered.connect(self.on_assetschanged) - assets_widget.view.clicked.connect(self.on_assetview_click) subsets_widget.active_changed.connect(self.on_subsetschanged) subsets_widget.version_changed.connect(self.on_versionschanged) subsets_widget.refreshed.connect(self._on_subset_refresh) @@ -204,11 +203,6 @@ class LibraryLoaderWindow(QtWidgets.QDialog): self._initial_refresh = True self.refresh() - def on_assetview_click(self, *args): - selection_model = self._subsets_widget.view.selectionModel() - if selection_model.selectedIndexes(): - selection_model.clearSelection() - def _set_projects(self): # Store current project old_project_name = self.current_project @@ -348,25 +342,14 @@ class LibraryLoaderWindow(QtWidgets.QDialog): self._families_filter_view.set_enabled_families(set()) self._families_filter_view.refresh() - self._assets_widget.model.stop_fetch_thread() + self._assets_widget.stop_refresh() self._assets_widget.refresh() self._assets_widget.setFocus() def clear_assets_underlines(self): last_asset_ids = self.data["state"]["assetIds"] - if not last_asset_ids: - return - - assets_model = self._assets_widget.model - id_role = assets_model.ObjectIdRole - - for index in tools_lib.iter_model_rows(assets_model, 0): - if index.data(id_role) not in last_asset_ids: - continue - - assets_model.setData( - index, [], assets_model.subsetColorsRole - ) + if last_asset_ids: + self._assets_widget.clear_underlines() def _assetschanged(self): """Selected assets have changed""" @@ -382,12 +365,8 @@ class LibraryLoaderWindow(QtWidgets.QDialog): ) return - # filter None docs they are silo - asset_docs = self._assets_widget.get_selected_assets() - if len(asset_docs) == 0: - return + asset_ids = self._assets_widget.get_selected_asset_ids() - asset_ids = [asset_doc["_id"] for asset_doc in asset_docs] # Start loading self._subsets_widget.set_loading_state( loading=bool(asset_ids), @@ -402,7 +381,7 @@ class LibraryLoaderWindow(QtWidgets.QDialog): # Clear the version information on asset change self._version_info_widget.set_version(None) - self._thumbnail_widget.set_thumbnail(asset_docs) + self._thumbnail_widget.set_thumbnail(asset_ids) self.data["state"]["assetIds"] = asset_ids @@ -421,7 +400,7 @@ class LibraryLoaderWindow(QtWidgets.QDialog): _merged=True, _other=False ) - asset_models = {} + asset_colors = {} asset_ids = [] for subset_node in selected_subsets: asset_ids.extend(subset_node.get("assetIds", [])) @@ -429,30 +408,17 @@ class LibraryLoaderWindow(QtWidgets.QDialog): for subset_node in selected_subsets: for asset_id in asset_ids: - if asset_id not in asset_models: - asset_models[asset_id] = [] + if asset_id not in asset_colors: + asset_colors[asset_id] = [] color = None if asset_id in subset_node.get("assetIds", []): color = subset_node["subsetColor"] - asset_models[asset_id].append(color) + asset_colors[asset_id].append(color) - self.clear_assets_underlines() + self._assets_widget.set_underline_colors(asset_colors) - indexes = self._assets_widget.view.selectionModel().selectedRows() - - assets_model = self._assets_widget.model - for index in indexes: - id = index.data(assets_model.ObjectIdRole) - if id not in asset_models: - continue - - assets_model.setData( - index, asset_models[id], assets_model.subsetColorsRole - ) - # Trigger repaint - self._assets_widget.view.updateGeometries() # Set version in Version Widget self._versionschanged() @@ -489,13 +455,14 @@ class LibraryLoaderWindow(QtWidgets.QDialog): self._version_info_widget.set_version(version_doc) - thumbnail_docs = version_docs - if not thumbnail_docs: - asset_docs = self._assets_widget.get_selected_assets() - if len(asset_docs) > 0: - thumbnail_docs = asset_docs + thumbnail_src_ids = [ + version_doc["_id"] + for version_doc in version_docs + ] + if not thumbnail_src_ids: + thumbnail_src_ids = self._assets_widget.get_selected_asset_ids() - self._thumbnail_widget.set_thumbnail(thumbnail_docs) + self._thumbnail_widget.set_thumbnail(thumbnail_src_ids) version_ids = [doc["_id"] for doc in version_docs or []] if self._repres_widget: @@ -514,8 +481,8 @@ class LibraryLoaderWindow(QtWidgets.QDialog): None """ - asset = context.get("asset", None) - if asset is None: + asset_name = context.get("asset", None) + if asset_name is None: return if refresh: @@ -527,7 +494,7 @@ class LibraryLoaderWindow(QtWidgets.QDialog): # scheduled refresh and the silo tabs are not shown. self._refresh_assets() - self._assets_widget.select_assets(asset) + self._assets_widget.select_asset_by_name(asset_name) def _on_message_timeout(self): self._message_label.setText("") @@ -550,10 +517,7 @@ class LibraryLoaderWindow(QtWidgets.QDialog): return super(LibraryLoaderWindow, self).closeEvent(event) -def show( - debug=False, parent=None, icon=None, - show_projects=False, show_libraries=True -): +def show(debug=False, parent=None, show_projects=False, show_libraries=True): """Display Loader GUI Arguments: @@ -588,9 +552,9 @@ def show( import traceback sys.excepthook = lambda typ, val, tb: traceback.print_last() - with tools_lib.application(): + with tools_lib.qt_app_context(): window = LibraryLoaderWindow( - parent, icon, show_projects, show_libraries + parent, show_projects, show_libraries ) window.show() diff --git a/openpype/tools/loader/app.py b/openpype/tools/loader/app.py index 9a4f2f1984..b6becc3e9f 100644 --- a/openpype/tools/loader/app.py +++ b/openpype/tools/loader/app.py @@ -4,8 +4,8 @@ from Qt import QtWidgets, QtCore from avalon import api, io, pipeline from openpype import style -from openpype.tools.utils.widgets import AssetWidget from openpype.tools.utils import lib +from openpype.tools.utils.assets_widget import MultiSelectAssetsWidget from .widgets import ( SubsetWidget, @@ -65,8 +65,8 @@ class LoaderWindow(QtWidgets.QDialog): left_side_splitter.setOrientation(QtCore.Qt.Vertical) # Assets widget - assets_widget = AssetWidget( - io, multiselection=True, parent=left_side_splitter + assets_widget = MultiSelectAssetsWidget( + io, parent=left_side_splitter ) assets_widget.set_current_asset_btn_visibility(True) @@ -156,8 +156,6 @@ class LoaderWindow(QtWidgets.QDialog): ) assets_widget.selection_changed.connect(self.on_assetschanged) assets_widget.refresh_triggered.connect(self.on_assetschanged) - # TODO do not touch view in asset widget - assets_widget.view.clicked.connect(self.on_assetview_click) subsets_widget.active_changed.connect(self.on_subsetschanged) subsets_widget.version_changed.connect(self.on_versionschanged) subsets_widget.refreshed.connect(self._on_subset_refresh) @@ -216,12 +214,6 @@ class LoaderWindow(QtWidgets.QDialog): # Delay calling blocking methods # ------------------------------- - def on_assetview_click(self, *args): - # TODO do not touch inner attributes of subset widget - selection_model = self._subsets_widget.view.selectionModel() - if selection_model.selectedIndexes(): - selection_model.clearSelection() - def refresh(self): self.echo("Fetching results..") lib.schedule(self._refresh, 50, channel="mongo") @@ -271,7 +263,7 @@ class LoaderWindow(QtWidgets.QDialog): # Refresh families config self._families_filter_view.refresh() # Change to context asset on context change - self._assets_widget.select_assets(io.Session["AVALON_ASSET"]) + self._assets_widget.select_asset_by_name(io.Session["AVALON_ASSET"]) def _refresh(self): """Load assets from database""" @@ -292,20 +284,9 @@ class LoaderWindow(QtWidgets.QDialog): on selection change so they match current selection. """ # TODO do not touch inner attributes of asset widget - last_asset_ids = self.data["state"]["assetIds"] or [] - if not last_asset_ids: - return - - assets_widget = self._assets_widget - id_role = assets_widget.model.ObjectIdRole - - for index in lib.iter_model_rows(assets_widget.model, 0): - if index.data(id_role) not in last_asset_ids: - continue - - assets_widget.model.setData( - index, [], assets_widget.model.subsetColorsRole - ) + last_asset_ids = self.data["state"]["assetIds"] + if last_asset_ids: + self._assets_widget.clear_underlines() def _assetschanged(self): """Selected assets have changed""" @@ -317,9 +298,7 @@ class LoaderWindow(QtWidgets.QDialog): self.clear_assets_underlines() # filter None docs they are silo - asset_docs = self._assets_widget.get_selected_assets() - - asset_ids = [asset_doc["_id"] for asset_doc in asset_docs] + asset_ids = self._assets_widget.get_selected_asset_ids() # Start loading subsets_widget.set_loading_state( loading=bool(asset_ids), @@ -333,7 +312,7 @@ class LoaderWindow(QtWidgets.QDialog): ) # Clear the version information on asset change - self._thumbnail_widget.set_thumbnail(asset_docs) + self._thumbnail_widget.set_thumbnail(asset_ids) self._version_info_widget.set_version(None) self.data["state"]["assetIds"] = asset_ids @@ -353,7 +332,7 @@ class LoaderWindow(QtWidgets.QDialog): _merged=True, _other=False ) - asset_models = {} + asset_colors = {} asset_ids = [] for subset_node in selected_subsets: asset_ids.extend(subset_node.get("assetIds", [])) @@ -361,31 +340,17 @@ class LoaderWindow(QtWidgets.QDialog): for subset_node in selected_subsets: for asset_id in asset_ids: - if asset_id not in asset_models: - asset_models[asset_id] = [] + if asset_id not in asset_colors: + asset_colors[asset_id] = [] color = None if asset_id in subset_node.get("assetIds", []): color = subset_node["subsetColor"] - asset_models[asset_id].append(color) + asset_colors[asset_id].append(color) - self.clear_assets_underlines() + self._assets_widget.set_underline_colors(asset_colors) - # TODO do not use inner attributes of asset widget - assets_widget = self._assets_widget - indexes = assets_widget.view.selectionModel().selectedRows() - - for index in indexes: - id = index.data(assets_widget.model.ObjectIdRole) - if id not in asset_models: - continue - - assets_widget.model.setData( - index, asset_models[id], assets_widget.model.subsetColorsRole - ) - # Trigger repaint - assets_widget.view.updateGeometries() # Set version in Version Widget self._versionschanged() @@ -424,13 +389,14 @@ class LoaderWindow(QtWidgets.QDialog): self._version_info_widget.set_version(version_doc) - thumbnail_docs = version_docs - asset_docs = self._assets_widget.get_selected_assets() - if not thumbnail_docs: - if len(asset_docs) > 0: - thumbnail_docs = asset_docs + thumbnail_src_ids = [ + version_doc["_id"] + for version_doc in version_docs + ] + if not thumbnail_src_ids: + thumbnail_src_ids = self._assets_widget.get_selected_asset_ids() - self._thumbnail_widget.set_thumbnail(thumbnail_docs) + self._thumbnail_widget.set_thumbnail(thumbnail_src_ids) if self._repres_widget is not None: version_ids = [doc["_id"] for doc in version_docs or []] @@ -472,7 +438,7 @@ class LoaderWindow(QtWidgets.QDialog): # scheduled refresh and the silo tabs are not shown. self._refresh() - self._assets_widget.select_assets(asset) + self._assets_widget.select_asset_by_name(asset) def _on_message_timeout(self): self._message_label.setText("") @@ -665,7 +631,7 @@ def show(debug=False, parent=None, use_context=False): api.Session["AVALON_PROJECT"] = any_project["name"] module.project = any_project["name"] - with lib.application(): + with lib.qt_app_context(): window = LoaderWindow(parent) window.show() diff --git a/openpype/tools/loader/model.py b/openpype/tools/loader/model.py index d81fc11cf2..96a52fce97 100644 --- a/openpype/tools/loader/model.py +++ b/openpype/tools/loader/model.py @@ -243,9 +243,9 @@ class SubsetsModel(TreeModel, BaseRepresentationModel): # update availability on active site when version changes if self.sync_server.enabled and version: - site = self.active_site query = self._repre_per_version_pipeline([version["_id"]], - site) + self.active_site, + self.remote_site) docs = list(self.dbcon.aggregate(query)) if docs: repre = docs.pop() @@ -801,47 +801,63 @@ class SubsetsModel(TreeModel, BaseRepresentationModel): {"$unwind": "$files"}, {'$addFields': { 'order_local': { - '$filter': {'input': '$files.sites', 'as': 'p', - 'cond': {'$eq': ['$$p.name', active_site]} - }} + '$filter': { + 'input': '$files.sites', 'as': 'p', + 'cond': {'$eq': ['$$p.name', active_site]} + } + } }}, {'$addFields': { 'order_remote': { - '$filter': {'input': '$files.sites', 'as': 'p', - 'cond': {'$eq': ['$$p.name', remote_site]} - }} + '$filter': { + 'input': '$files.sites', 'as': 'p', + 'cond': {'$eq': ['$$p.name', remote_site]} + } + } }}, {'$addFields': { 'progress_local': {"$arrayElemAt": [{ - '$cond': [{'$size': "$order_local.progress"}, - "$order_local.progress", - # if exists created_dt count is as available - {'$cond': [ - {'$size': "$order_local.created_dt"}, - [1], - [0] - ]} - ]}, 0]} + '$cond': [ + {'$size': "$order_local.progress"}, + "$order_local.progress", + # if exists created_dt count is as available + {'$cond': [ + {'$size': "$order_local.created_dt"}, + [1], + [0] + ]} + ]}, + 0 + ]} }}, {'$addFields': { 'progress_remote': {"$arrayElemAt": [{ - '$cond': [{'$size': "$order_remote.progress"}, - "$order_remote.progress", - # if exists created_dt count is as available - {'$cond': [ - {'$size': "$order_remote.created_dt"}, - [1], - [0] - ]} - ]}, 0]} + '$cond': [ + {'$size': "$order_remote.progress"}, + "$order_remote.progress", + # if exists created_dt count is as available + {'$cond': [ + {'$size': "$order_remote.created_dt"}, + [1], + [0] + ]} + ]}, + 0 + ]} }}, {'$group': { # first group by repre '_id': '$_id', 'parent': {'$first': '$parent'}, - 'avail_ratio_local': {'$first': { - '$divide': [{'$sum': "$progress_local"}, {'$sum': 1}]}}, - 'avail_ratio_remote': {'$first': { - '$divide': [{'$sum': "$progress_remote"}, {'$sum': 1}]}} + 'avail_ratio_local': { + '$first': { + '$divide': [{'$sum': "$progress_local"}, {'$sum': 1}] + } + }, + 'avail_ratio_remote': { + '$first': { + '$divide': [{'$sum': "$progress_remote"}, {'$sum': 1}] + } + } }}, {'$group': { # second group by parent, eg version_id '_id': '$parent', diff --git a/openpype/tools/loader/widgets.py b/openpype/tools/loader/widgets.py index 08b58eebbe..ea45fd4364 100644 --- a/openpype/tools/loader/widgets.py +++ b/openpype/tools/loader/widgets.py @@ -16,11 +16,15 @@ from openpype.tools.utils.delegates import ( VersionDelegate, PrettyTimeDelegate ) -from openpype.tools.utils.widgets import OptionalMenu +from openpype.tools.utils.widgets import ( + OptionalMenu, + PlaceholderLineEdit +) from openpype.tools.utils.views import ( TreeViewSpinner, DeselectableTreeView ) +from openpype.tools.assetlinks.widgets import SimpleLinkView from .model import ( SubsetsModel, @@ -174,7 +178,7 @@ class SubsetWidget(QtWidgets.QWidget): family_proxy = FamiliesFilterProxyModel() family_proxy.setSourceModel(proxy) - subset_filter = QtWidgets.QLineEdit(self) + subset_filter = PlaceholderLineEdit(self) subset_filter.setPlaceholderText("Filter subsets..") group_checkbox = QtWidgets.QCheckBox("Enable Grouping", self) @@ -794,19 +798,24 @@ class ThumbnailWidget(QtWidgets.QLabel): QtCore.Qt.SmoothTransformation ) - def set_thumbnail(self, entity=None): - if not entity: + def set_thumbnail(self, doc_id=None): + if not doc_id: self.set_pixmap() return - if isinstance(entity, (list, tuple)): - if len(entity) == 1: - entity = entity[0] - else: + if isinstance(doc_id, (list, tuple)): + if len(doc_id) < 1: self.set_pixmap() return + doc_id = doc_id[0] - thumbnail_id = entity.get("data", {}).get("thumbnail_id") + doc = self.dbcon.find_one( + {"_id": doc_id}, + {"data.thumbnail_id"} + ) + thumbnail_id = None + if doc: + thumbnail_id = doc.get("data", {}).get("thumbnail_id") if thumbnail_id == self.current_thumb_id: if self.current_thumbnail is None: self.set_pixmap() @@ -841,19 +850,25 @@ class VersionWidget(QtWidgets.QWidget): def __init__(self, dbcon, parent=None): super(VersionWidget, self).__init__(parent=parent) - layout = QtWidgets.QVBoxLayout(self) - layout.setContentsMargins(0, 0, 0, 0) - label = QtWidgets.QLabel("Version", self) data = VersionTextEdit(dbcon, self) data.setReadOnly(True) - layout.addWidget(label) - layout.addWidget(data) + depend_widget = SimpleLinkView(dbcon, self) + + tab = QtWidgets.QTabWidget() + tab.addTab(data, "Version Info") + tab.addTab(depend_widget, "Dependency") + + layout = QtWidgets.QVBoxLayout(self) + layout.setContentsMargins(0, 0, 0, 0) + layout.addWidget(tab) self.data = data + self.depend_widget = depend_widget def set_version(self, version_doc): self.data.set_version(version_doc) + self.depend_widget.set_version(version_doc) class FamilyModel(QtGui.QStandardItemModel): diff --git a/openpype/tools/mayalookassigner/app.py b/openpype/tools/mayalookassigner/app.py index d723387f2d..fb99333f87 100644 --- a/openpype/tools/mayalookassigner/app.py +++ b/openpype/tools/mayalookassigner/app.py @@ -38,6 +38,7 @@ class App(QtWidgets.QWidget): # Store callback references self._callbacks = [] + self._connections_set_up = False filename = get_workfile() @@ -46,17 +47,10 @@ class App(QtWidgets.QWidget): self.setWindowFlags(QtCore.Qt.Window) self.setParent(parent) - # Force to delete the window on close so it triggers - # closeEvent only once. Otherwise it's retriggered when - # the widget gets garbage collected. - self.setAttribute(QtCore.Qt.WA_DeleteOnClose) - self.resize(750, 500) self.setup_ui() - self.setup_connections() - # Force refresh check on initialization self._on_renderlayer_switch() @@ -111,6 +105,16 @@ class App(QtWidgets.QWidget): asset_outliner.view.setColumnWidth(0, 200) look_outliner.view.setColumnWidth(0, 150) + asset_outliner.selection_changed.connect( + self.on_asset_selection_changed) + + asset_outliner.refreshed.connect( + lambda: self.echo("Loaded assets..") + ) + + look_outliner.menu_apply_action.connect(self.on_process_selected) + remove_unused_btn.clicked.connect(remove_unused_looks) + # Open widgets self.asset_outliner = asset_outliner self.look_outliner = look_outliner @@ -123,15 +127,8 @@ class App(QtWidgets.QWidget): def setup_connections(self): """Connect interactive widgets with actions""" - - self.asset_outliner.selection_changed.connect( - self.on_asset_selection_changed) - - self.asset_outliner.refreshed.connect( - lambda: self.echo("Loaded assets..")) - - self.look_outliner.menu_apply_action.connect(self.on_process_selected) - self.remove_unused.clicked.connect(remove_unused_looks) + if self._connections_set_up: + return # Maya renderlayer switch callback callback = om.MEventMessage.addEventCallback( @@ -139,14 +136,23 @@ class App(QtWidgets.QWidget): self._on_renderlayer_switch ) self._callbacks.append(callback) + self._connections_set_up = True - def closeEvent(self, event): - + def remove_connection(self): # Delete callbacks for callback in self._callbacks: om.MMessage.removeCallback(callback) - return super(App, self).closeEvent(event) + self._callbacks = [] + self._connections_set_up = False + + def showEvent(self, event): + self.setup_connections() + super(App, self).showEvent(event) + + def closeEvent(self, event): + self.remove_connection() + super(App, self).closeEvent(event) def _on_renderlayer_switch(self, *args): """Callback that updates on Maya renderlayer switch""" diff --git a/openpype/tools/project_manager/project_manager/images/bin.png b/openpype/tools/project_manager/project_manager/images/bin.png new file mode 100644 index 0000000000..9e5bc7a943 Binary files /dev/null and b/openpype/tools/project_manager/project_manager/images/bin.png differ diff --git a/openpype/tools/project_manager/project_manager/model.py b/openpype/tools/project_manager/project_manager/model.py index b7ab9e40d0..0c02872b4c 100644 --- a/openpype/tools/project_manager/project_manager/model.py +++ b/openpype/tools/project_manager/project_manager/model.py @@ -3,6 +3,10 @@ import copy import json from uuid import uuid4 +from pymongo import UpdateOne, DeleteOne + +from Qt import QtCore, QtGui + from .constants import ( IDENTIFIER_ROLE, ITEM_TYPE_ROLE, @@ -15,9 +19,6 @@ from .constants import ( from .style import ResourceCache from openpype.lib import CURRENT_DOC_SCHEMAS -from pymongo import UpdateOne, DeleteOne -from avalon.vendor import qtawesome -from Qt import QtCore, QtGui class ProjectModel(QtGui.QStandardItemModel): diff --git a/openpype/tools/project_manager/project_manager/style.py b/openpype/tools/project_manager/project_manager/style.py index 17e269c1f6..d3d6857a63 100644 --- a/openpype/tools/project_manager/project_manager/style.py +++ b/openpype/tools/project_manager/project_manager/style.py @@ -1,9 +1,14 @@ +import os +from Qt import QtCore, QtGui + from avalon.vendor import qtawesome class ResourceCache: + # TODO use colors from OpenPype style colors = { "standard": "#bfccd6", + "disabled": "#969696", "new": "#2d9a4c", "warning": "#c83232" } @@ -58,11 +63,62 @@ class ResourceCache: }, "refresh": qtawesome.icon( "fa.refresh", - color=cls.colors["standard"] - ) + color=cls.colors["standard"], + color_disabled=cls.colors["disabled"] + ), + "remove": cls.get_remove_icon() } return cls.icons @classmethod def get_color(cls, color_name): return cls.colors[color_name] + + @classmethod + def get_remove_icon(cls): + src_image = get_remove_image() + normal_pix = paint_image_with_color( + src_image, + QtGui.QColor(cls.colors["standard"]) + ) + disabled_pix = paint_image_with_color( + src_image, + QtGui.QColor(cls.colors["disabled"]) + ) + icon = QtGui.QIcon(normal_pix) + icon.addPixmap(disabled_pix, QtGui.QIcon.Disabled, QtGui.QIcon.On) + icon.addPixmap(disabled_pix, QtGui.QIcon.Disabled, QtGui.QIcon.Off) + return icon + + +def get_remove_image(): + image_path = os.path.join( + os.path.dirname(os.path.abspath(__file__)), + "images", + "bin.png" + ) + return QtGui.QImage(image_path) + + +def paint_image_with_color(image, color): + """TODO: This function should be imported from utils. + + At the moment of creation is not available yet. + """ + width = image.width() + height = image.height() + + alpha_mask = image.createAlphaMask() + alpha_region = QtGui.QRegion(QtGui.QBitmap.fromImage(alpha_mask)) + + pixmap = QtGui.QPixmap(width, height) + pixmap.fill(QtCore.Qt.transparent) + + painter = QtGui.QPainter(pixmap) + painter.setClipRegion(alpha_region) + painter.setPen(QtCore.Qt.NoPen) + painter.setBrush(color) + painter.drawRect(QtCore.QRect(0, 0, width, height)) + painter.end() + + return pixmap diff --git a/openpype/tools/project_manager/project_manager/widgets.py b/openpype/tools/project_manager/project_manager/widgets.py index 8c2f693f11..b4d791b6d5 100644 --- a/openpype/tools/project_manager/project_manager/widgets.py +++ b/openpype/tools/project_manager/project_manager/widgets.py @@ -288,3 +288,127 @@ class CreateProjectDialog(QtWidgets.QDialog): project_codes.add(project_code) return project_names, project_codes + + +class _SameSizeBtns(QtWidgets.QPushButton): + """Button that keep width of all button added as related. + + This happens without changing min/max/fix size of button. Which is + welcomed for multidisplay desktops with different resolution. + """ + def __init__(self, *args, **kwargs): + super(_SameSizeBtns, self).__init__(*args, **kwargs) + self._related_btns = [] + + def add_related_btn(self, btn): + """Add related button which should be checked for width. + + Args: + btn (_SameSizeBtns): Other object of _SameSizeBtns. + """ + self._related_btns.append(btn) + + def hint_width(self): + """Get size hint of button not related to others.""" + return super(_SameSizeBtns, self).sizeHint().width() + + def sizeHint(self): + """Calculate size hint based on size hint of this button and related. + + If width is lower than any other button it is changed to higher. + """ + result = super(_SameSizeBtns, self).sizeHint() + width = result.width() + for btn in self._related_btns: + btn_width = btn.hint_width() + if btn_width > width: + width = btn_width + + result.setWidth(width) + return result + + +class ConfirmProjectDeletion(QtWidgets.QDialog): + """Dialog which confirms deletion of a project.""" + def __init__(self, project_name, parent): + super(ConfirmProjectDeletion, self).__init__(parent) + + self.setWindowTitle("Delete project?") + + message = ( + "Project \"{}\" with all related data will be" + " permanently removed from the database (This actions won't remove" + " any files on disk)." + ).format(project_name) + message_label = QtWidgets.QLabel(message, self) + message_label.setWordWrap(True) + + question_label = QtWidgets.QLabel("Are you sure?", self) + + confirm_input = QtWidgets.QLineEdit(self) + confirm_input.setPlaceholderText("Type \"Delete\" to confirm...") + + cancel_btn = _SameSizeBtns("Cancel", self) + cancel_btn.setToolTip("Cancel deletion of the project") + confirm_btn = _SameSizeBtns("Delete", self) + confirm_btn.setEnabled(False) + confirm_btn.setToolTip("Confirm deletion") + + cancel_btn.add_related_btn(confirm_btn) + confirm_btn.add_related_btn(cancel_btn) + + btns_layout = QtWidgets.QHBoxLayout() + btns_layout.addStretch(1) + btns_layout.addWidget(cancel_btn, 0) + btns_layout.addWidget(confirm_btn, 0) + + layout = QtWidgets.QVBoxLayout(self) + layout.addWidget(message_label, 0) + layout.addStretch(1) + layout.addWidget(question_label, 0) + layout.addWidget(confirm_input, 0) + layout.addLayout(btns_layout) + + cancel_btn.clicked.connect(self._on_cancel_click) + confirm_btn.clicked.connect(self._on_confirm_click) + confirm_input.textChanged.connect(self._on_confirm_text_change) + confirm_input.returnPressed.connect(self._on_enter_clicked) + + self._cancel_btn = cancel_btn + self._confirm_btn = confirm_btn + self._confirm_input = confirm_input + self._result = 0 + + self.setMinimumWidth(480) + self.setMaximumWidth(650) + self.setMaximumHeight(250) + + def exec_(self, *args, **kwargs): + super(ConfirmProjectDeletion, self).exec_(*args, **kwargs) + return self._result + + def showEvent(self, event): + """Reset result on show.""" + super(ConfirmProjectDeletion, self).showEvent(event) + self._result = 0 + minimum_size_hint = self.minimumSizeHint() + self.resize(self.width(), minimum_size_hint.height() + 30) + + def result(self): + """Get result of dialog 1 for confirm 0 for cancel.""" + return self._result + + def _on_cancel_click(self): + self.close() + + def _on_confirm_click(self): + self._result = 1 + self.close() + + def _on_enter_clicked(self): + if self._confirm_btn.isEnabled(): + self._on_confirm_click() + + def _on_confirm_text_change(self): + enabled = self._confirm_input.text().lower() == "delete" + self._confirm_btn.setEnabled(enabled) diff --git a/openpype/tools/project_manager/project_manager/window.py b/openpype/tools/project_manager/project_manager/window.py index a19031ceda..a05811e813 100644 --- a/openpype/tools/project_manager/project_manager/window.py +++ b/openpype/tools/project_manager/project_manager/window.py @@ -11,6 +11,7 @@ from . import ( CreateProjectDialog, PROJECT_NAME_ROLE ) +from .widgets import ConfirmProjectDeletion from .style import ResourceCache from openpype.style import load_stylesheet from openpype.lib import is_admin_password_required @@ -77,6 +78,10 @@ class ProjectManagerWindow(QtWidgets.QWidget): ) create_folders_btn.setEnabled(False) + remove_projects_btn = QtWidgets.QPushButton(project_widget) + remove_projects_btn.setIcon(ResourceCache.get_icon("remove")) + remove_projects_btn.setObjectName("IconBtn") + project_layout = QtWidgets.QHBoxLayout(project_widget) project_layout.setContentsMargins(0, 0, 0, 0) project_layout.addWidget(project_combobox, 0) @@ -84,6 +89,7 @@ class ProjectManagerWindow(QtWidgets.QWidget): project_layout.addWidget(create_project_btn, 0) project_layout.addWidget(create_folders_btn) project_layout.addStretch(1) + project_layout.addWidget(remove_projects_btn) # Helper buttons helper_btns_widget = QtWidgets.QWidget(top_part_widget) @@ -145,11 +151,13 @@ class ProjectManagerWindow(QtWidgets.QWidget): refresh_projects_btn.clicked.connect(self._on_project_refresh) create_project_btn.clicked.connect(self._on_project_create) create_folders_btn.clicked.connect(self._on_create_folders) + remove_projects_btn.clicked.connect(self._on_remove_project) project_combobox.currentIndexChanged.connect(self._on_project_change) save_btn.clicked.connect(self._on_save_click) add_asset_btn.clicked.connect(self._on_add_asset) add_task_btn.clicked.connect(self._on_add_task) + self._dbcon = dbcon self._project_model = project_model self._project_proxy_model = project_proxy @@ -162,6 +170,7 @@ class ProjectManagerWindow(QtWidgets.QWidget): self._project_combobox = project_combobox self._create_project_btn = create_project_btn self._create_folders_btn = create_folders_btn + self._remove_projects_btn = remove_projects_btn self._add_asset_btn = add_asset_btn self._add_task_btn = add_task_btn @@ -171,6 +180,7 @@ class ProjectManagerWindow(QtWidgets.QWidget): def _set_project(self, project_name=None): self._create_folders_btn.setEnabled(project_name is not None) + self._remove_projects_btn.setEnabled(project_name is not None) self._project_proxy_model.set_filter_default(project_name is not None) self.hierarchy_view.set_project(project_name) @@ -252,6 +262,19 @@ class ProjectManagerWindow(QtWidgets.QWidget): exc_info=True ) + def _on_remove_project(self): + project_name = self._current_project() + dialog = ConfirmProjectDeletion(project_name, self) + result = dialog.exec_() + if result != 1: + return + + database = self._dbcon.database + if project_name in database.collection_names(): + collection = database[project_name] + collection.drop() + self.refresh_projects() + def show_message(self, message): # TODO add nicer message pop self.message_label.setText(message) diff --git a/openpype/tools/publisher/constants.py b/openpype/tools/publisher/constants.py index cf0850bde8..dc44aade45 100644 --- a/openpype/tools/publisher/constants.py +++ b/openpype/tools/publisher/constants.py @@ -6,7 +6,6 @@ CONTEXT_LABEL = "Options" # Allowed symbols for subset name (and variant) # - characters, numbers, unsercore and dash -SUBSET_NAME_ALLOWED_SYMBOLS = "a-zA-Z0-9_." VARIANT_TOOLTIP = ( "Variant may contain alphabetical characters (a-Z)" "\nnumerical characters (0-9) dot (\".\") or underscore (\"_\")." @@ -23,7 +22,6 @@ FAMILY_ROLE = QtCore.Qt.UserRole + 5 __all__ = ( "CONTEXT_ID", - "SUBSET_NAME_ALLOWED_SYMBOLS", "VARIANT_TOOLTIP", "INSTANCE_ID_ROLE", diff --git a/openpype/tools/publisher/widgets/create_dialog.py b/openpype/tools/publisher/widgets/create_dialog.py index 0206f038fb..84fc6d4e97 100644 --- a/openpype/tools/publisher/widgets/create_dialog.py +++ b/openpype/tools/publisher/widgets/create_dialog.py @@ -9,11 +9,13 @@ except Exception: commonmark = None from Qt import QtWidgets, QtCore, QtGui -from openpype.pipeline.create import CreatorError +from openpype.pipeline.create import ( + CreatorError, + SUBSET_NAME_ALLOWED_SYMBOLS +) from .widgets import IconValuePixmapLabel from ..constants import ( - SUBSET_NAME_ALLOWED_SYMBOLS, VARIANT_TOOLTIP, CREATOR_IDENTIFIER_ROLE, FAMILY_ROLE diff --git a/openpype/tools/publisher/widgets/list_view_widgets.py b/openpype/tools/publisher/widgets/list_view_widgets.py index e87ea3e130..4b2082e523 100644 --- a/openpype/tools/publisher/widgets/list_view_widgets.py +++ b/openpype/tools/publisher/widgets/list_view_widgets.py @@ -785,7 +785,7 @@ class InstanceListView(AbstractInstanceView): group_index = self._instance_model.index( group_item.row(), group_item.column() ) - proxy_index = self.mapFromSource(group_index) + proxy_index = self._proxy_model.mapFromSource(group_index) self._instance_view.setExpanded(proxy_index, expanded) def _on_group_toggle_request(self, group_name, state): @@ -810,6 +810,6 @@ class InstanceListView(AbstractInstanceView): self._change_active_instances(instance_ids, active) - proxy_index = self.mapFromSource(group_item.index()) + proxy_index = self._proxy_model.mapFromSource(group_item.index()) if not self._instance_view.isExpanded(proxy_index): self._instance_view.expand(proxy_index) diff --git a/openpype/tools/publisher/widgets/widgets.py b/openpype/tools/publisher/widgets/widgets.py index 606985c058..fe00ee78d3 100644 --- a/openpype/tools/publisher/widgets/widgets.py +++ b/openpype/tools/publisher/widgets/widgets.py @@ -9,7 +9,7 @@ from avalon.vendor import qtawesome from openpype.widgets.attribute_defs import create_widget_for_attr_def from openpype.tools.flickcharm import FlickCharm - +from openpype.pipeline.create import SUBSET_NAME_ALLOWED_SYMBOLS from .models import ( AssetsHierarchyModel, TasksModel, @@ -21,7 +21,6 @@ from .icons import ( ) from ..constants import ( - SUBSET_NAME_ALLOWED_SYMBOLS, VARIANT_TOOLTIP ) diff --git a/openpype/tools/repack_version.py b/openpype/tools/repack_version.py new file mode 100644 index 0000000000..0172264c79 --- /dev/null +++ b/openpype/tools/repack_version.py @@ -0,0 +1,164 @@ +# -*- coding: utf-8 -*- +"""Script to rehash and repack current version.""" + +import enlighten +import blessed +from pathlib import Path +import platform +from zipfile import ZipFile +from typing import List +import hashlib +import sys +from igniter.bootstrap_repos import OpenPypeVersion + + +class VersionRepacker: + + def __init__(self, directory: str): + self._term = blessed.Terminal() + self._manager = enlighten.get_manager() + self._last_increment = 0 + self.version_path = Path(directory) + self.zip_path = self.version_path.parent + _version = {} + with open(self.version_path / "openpype" / "version.py") as fp: + exec(fp.read(), _version) + self._version_py = _version["__version__"] + del _version + + def _print(self, msg: str, message_type: int = 0) -> None: + """Print message to console. + + Args: + msg (str): message to print + message_type (int): type of message (0 info, 1 error, 2 note) + + """ + if message_type == 0: + header = self._term.aquamarine3(">>> ") + elif message_type == 1: + header = self._term.orangered2("!!! ") + elif message_type == 2: + header = self._term.tan1("... ") + else: + header = self._term.darkolivegreen3("--- ") + + print("{}{}".format(header, msg)) + + @staticmethod + def sha256sum(filename): + """Calculate sha256 for content of the file. + + Args: + filename (str): Path to file. + + Returns: + str: hex encoded sha256 + + """ + h = hashlib.sha256() + b = bytearray(128 * 1024) + mv = memoryview(b) + with open(filename, 'rb', buffering=0) as f: + for n in iter(lambda: f.readinto(mv), 0): + h.update(mv[:n]) + return h.hexdigest() + + @staticmethod + def _filter_dir(path: Path, path_filter: List) -> List[Path]: + """Recursively crawl over path and filter.""" + result = [] + for item in path.iterdir(): + if item.name in path_filter: + continue + if item.name.startswith('.'): + continue + if item.is_dir(): + result.extend(VersionRepacker._filter_dir(item, path_filter)) + else: + result.append(item) + return result + + def process(self): + if (self.version_path / "pyproject.toml").exists(): + self._print( + ("This cannot run on OpenPype sources. " + "Please run it on extracted version."), 1) + return + self._print(f"Rehashing and zipping {self.version_path}") + version = OpenPypeVersion.version_in_str(self.version_path.name) + if not version: + self._print("Cannot get version from directory", 1) + return + + self._print(f"Detected version is {version}") + # replace version in version.py + self._replace_version(version, self.version_path) + self._print("Recalculating checksums ...", 2) + + checksums = [] + + file_list = VersionRepacker._filter_dir(self.version_path, []) + progress_bar = enlighten.Counter( + total=len(file_list), desc="Calculating checksums", + nits="%", color="green") + for file in file_list: + checksums.append(( + VersionRepacker.sha256sum(file.as_posix()), + file.resolve().relative_to(self.version_path), + file + )) + progress_bar.update() + progress_bar.close() + + progress_bar = enlighten.Counter( + total=len(checksums), desc="Zipping directory", + nits="%", color=(56, 211, 159)) + + zip_filename = self.zip_path / f"openpype-v{version}.zip" + with ZipFile(zip_filename, "w") as zip_file: + + for item in checksums: + if item[1].as_posix() == "checksums": + progress_bar.update() + continue + zip_file.write(item[2], item[1]) + progress_bar.update() + + checksums_str = "" + for c in checksums: + file_str = c[1] + if platform.system().lower() == "windows": + file_str = c[1].as_posix().replace("\\", "/") + checksums_str += "{}:{}\n".format(c[0], file_str) + zip_file.writestr("checksums", checksums_str) + # test if zip is ok + zip_file.testzip() + self._print(f"All done, you can find new zip here: {zip_filename}") + + @staticmethod + def _replace_version(version: OpenPypeVersion, path: Path): + """Replace version in version.py. + + Args: + version (OpenPypeVersion): OpenPype version to set + path (Path): Path to unzipped version. + + """ + with open(path / "openpype" / "version.py", "r") as op_version_file: + replacement = "" + + for line in op_version_file: + stripped_line = line.strip() + if stripped_line.strip().startswith("__version__ ="): + line = f'__version__ = "{version}"\n' + replacement += line + + with open(path / "openpype" / "version.py", "w") as op_version_file: + op_version_file.write(replacement) + + +if __name__ == '__main__': + print(sys.argv[1]) + version_packer = VersionRepacker(sys.argv[1]) + version_packer.process() diff --git a/openpype/tools/sceneinventory/__init__.py b/openpype/tools/sceneinventory/__init__.py new file mode 100644 index 0000000000..410b52e5fe --- /dev/null +++ b/openpype/tools/sceneinventory/__init__.py @@ -0,0 +1,9 @@ +from .window import ( + show, + SceneInventoryWindow +) + +__all__ = ( + "show", + "SceneInventoryWindow" +) diff --git a/openpype/tools/sceneinventory/lib.py b/openpype/tools/sceneinventory/lib.py new file mode 100644 index 0000000000..7653e1da89 --- /dev/null +++ b/openpype/tools/sceneinventory/lib.py @@ -0,0 +1,82 @@ +import os +from openpype_modules import sync_server + +from Qt import QtGui + + +def walk_hierarchy(node): + """Recursively yield group node.""" + for child in node.children(): + if child.get("isGroupNode"): + yield child + + for _child in walk_hierarchy(child): + yield _child + + +def get_site_icons(): + resource_path = os.path.join( + os.path.dirname(sync_server.sync_server_module.__file__), + "providers", + "resources" + ) + icons = {} + # TODO get from sync module + for provider in ["studio", "local_drive", "gdrive"]: + pix_url = "{}/{}.png".format(resource_path, provider) + icons[provider] = QtGui.QIcon(pix_url) + + return icons + + +def get_progress_for_repre(repre_doc, active_site, remote_site): + """ + Calculates average progress for representation. + + If site has created_dt >> fully available >> progress == 1 + + Could be calculated in aggregate if it would be too slow + Args: + repre_doc(dict): representation dict + Returns: + (dict) with active and remote sites progress + {'studio': 1.0, 'gdrive': -1} - gdrive site is not present + -1 is used to highlight the site should be added + {'studio': 1.0, 'gdrive': 0.0} - gdrive site is present, not + uploaded yet + """ + progress = {active_site: -1, remote_site: -1} + if not repre_doc: + return progress + + files = {active_site: 0, remote_site: 0} + doc_files = repre_doc.get("files") or [] + for doc_file in doc_files: + if not isinstance(doc_file, dict): + continue + + sites = doc_file.get("sites") or [] + for site in sites: + if ( + # Pype 2 compatibility + not isinstance(site, dict) + # Check if site name is one of progress sites + or site["name"] not in progress + ): + continue + + files[site["name"]] += 1 + norm_progress = max(progress[site["name"]], 0) + if site.get("created_dt"): + progress[site["name"]] = norm_progress + 1 + elif site.get("progress"): + progress[site["name"]] = norm_progress + site["progress"] + else: # site exists, might be failed, do not add again + progress[site["name"]] = 0 + + # for example 13 fully avail. files out of 26 >> 13/26 = 0.5 + avg_progress = { + active_site: progress[active_site] / max(files[active_site], 1), + remote_site: progress[remote_site] / max(files[remote_site], 1) + } + return avg_progress diff --git a/openpype/tools/sceneinventory/model.py b/openpype/tools/sceneinventory/model.py new file mode 100644 index 0000000000..d2b7f8b70f --- /dev/null +++ b/openpype/tools/sceneinventory/model.py @@ -0,0 +1,576 @@ +import re +import logging + +from collections import defaultdict + +from Qt import QtCore, QtGui +from avalon import api, io, style, schema +from avalon.vendor import qtawesome + +from avalon.lib import HeroVersionType +from avalon.tools.models import TreeModel, Item + +from .lib import ( + get_site_icons, + walk_hierarchy, + get_progress_for_repre +) + +from openpype.modules import ModulesManager + + +class InventoryModel(TreeModel): + """The model for the inventory""" + + Columns = ["Name", "version", "count", "family", "loader", "objectName"] + + OUTDATED_COLOR = QtGui.QColor(235, 30, 30) + CHILD_OUTDATED_COLOR = QtGui.QColor(200, 160, 30) + GRAYOUT_COLOR = QtGui.QColor(160, 160, 160) + + UniqueRole = QtCore.Qt.UserRole + 2 # unique label role + + def __init__(self, family_config_cache, parent=None): + super(InventoryModel, self).__init__(parent) + self.log = logging.getLogger(self.__class__.__name__) + + self.family_config_cache = family_config_cache + + self._hierarchy_view = False + + manager = ModulesManager() + sync_server = manager.modules_by_name["sync_server"] + self.sync_enabled = sync_server.enabled + self._site_icons = {} + self.active_site = self.remote_site = None + self.active_provider = self.remote_provider = None + + if not self.sync_enabled: + return + + project_name = io.Session["AVALON_PROJECT"] + active_site = sync_server.get_active_site(project_name) + remote_site = sync_server.get_remote_site(project_name) + + active_provider = "studio" + remote_provider = "studio" + if active_site != "studio": + # sanitized for icon + active_provider = sync_server.get_provider_for_site( + project_name, active_site + ) + + if remote_site != "studio": + remote_provider = sync_server.get_provider_for_site( + project_name, remote_site + ) + + # self.sync_server = sync_server + self.active_site = active_site + self.active_provider = active_provider + self.remote_site = remote_site + self.remote_provider = remote_provider + self._site_icons = get_site_icons() + if "active_site" not in self.Columns: + self.Columns.append("active_site") + if "remote_site" not in self.Columns: + self.Columns.append("remote_site") + + def outdated(self, item): + value = item.get("version") + if isinstance(value, HeroVersionType): + return False + + if item.get("version") == item.get("highest_version"): + return False + return True + + def data(self, index, role): + if not index.isValid(): + return + + item = index.internalPointer() + + if role == QtCore.Qt.FontRole: + # Make top-level entries bold + if item.get("isGroupNode") or item.get("isNotSet"): # group-item + font = QtGui.QFont() + font.setBold(True) + return font + + if role == QtCore.Qt.ForegroundRole: + # Set the text color to the OUTDATED_COLOR when the + # collected version is not the same as the highest version + key = self.Columns[index.column()] + if key == "version": # version + if item.get("isGroupNode"): # group-item + if self.outdated(item): + return self.OUTDATED_COLOR + + if self._hierarchy_view: + # If current group is not outdated, check if any + # outdated children. + for _node in walk_hierarchy(item): + if self.outdated(_node): + return self.CHILD_OUTDATED_COLOR + else: + + if self._hierarchy_view: + # Although this is not a group item, we still need + # to distinguish which one contain outdated child. + for _node in walk_hierarchy(item): + if self.outdated(_node): + return self.CHILD_OUTDATED_COLOR.darker(150) + + return self.GRAYOUT_COLOR + + if key == "Name" and not item.get("isGroupNode"): + return self.GRAYOUT_COLOR + + # Add icons + if role == QtCore.Qt.DecorationRole: + if index.column() == 0: + # Override color + color = item.get("color", style.colors.default) + if item.get("isGroupNode"): # group-item + return qtawesome.icon("fa.folder", color=color) + if item.get("isNotSet"): + return qtawesome.icon("fa.exclamation-circle", color=color) + + return qtawesome.icon("fa.file-o", color=color) + + if index.column() == 3: + # Family icon + return item.get("familyIcon", None) + + if item.get("isGroupNode"): + column_name = self.Columns[index.column()] + if column_name == "active_site": + provider = item.get("active_site_provider") + return self._site_icons.get(provider) + + if column_name == "remote_site": + provider = item.get("remote_site_provider") + return self._site_icons.get(provider) + + if role == QtCore.Qt.DisplayRole and item.get("isGroupNode"): + column_name = self.Columns[index.column()] + progress = None + if column_name == 'active_site': + progress = item.get("active_site_progress", 0) + elif column_name == 'remote_site': + progress = item.get("remote_site_progress", 0) + if progress is not None: + return "{}%".format(max(progress, 0) * 100) + + if role == self.UniqueRole: + return item["representation"] + item.get("objectName", "") + + return super(InventoryModel, self).data(index, role) + + def set_hierarchy_view(self, state): + """Set whether to display subsets in hierarchy view.""" + state = bool(state) + + if state != self._hierarchy_view: + self._hierarchy_view = state + + def refresh(self, selected=None, items=None): + """Refresh the model""" + + host = api.registered_host() + if not items: # for debugging or testing, injecting items from outside + items = host.ls() + + self.clear() + + if self._hierarchy_view and selected: + + if not hasattr(host.pipeline, "update_hierarchy"): + # If host doesn't support hierarchical containers, then + # cherry-pick only. + self.add_items((item for item in items + if item["objectName"] in selected)) + + # Update hierarchy info for all containers + items_by_name = {item["objectName"]: item + for item in host.pipeline.update_hierarchy(items)} + + selected_items = set() + + def walk_children(names): + """Select containers and extend to chlid containers""" + for name in [n for n in names if n not in selected_items]: + selected_items.add(name) + item = items_by_name[name] + yield item + + for child in walk_children(item["children"]): + yield child + + items = list(walk_children(selected)) # Cherry-picked and extended + + # Cut unselected upstream containers + for item in items: + if not item.get("parent") in selected_items: + # Parent not in selection, this is root item. + item["parent"] = None + + parents = [self._root_item] + + # The length of `items` array is the maximum depth that a + # hierarchy could be. + # Take this as an easiest way to prevent looping forever. + maximum_loop = len(items) + count = 0 + while items: + if count > maximum_loop: + self.log.warning("Maximum loop count reached, possible " + "missing parent node.") + break + + _parents = list() + for parent in parents: + _unparented = list() + + def _children(): + """Child item provider""" + for item in items: + if item.get("parent") == parent.get("objectName"): + # (NOTE) + # Since `self._root_node` has no "objectName" + # entry, it will be paired with root item if + # the value of key "parent" is None, or not + # having the key. + yield item + else: + # Not current parent's child, try next + _unparented.append(item) + + self.add_items(_children(), parent) + + items[:] = _unparented + + # Parents of next level + for group_node in parent.children(): + _parents += group_node.children() + + parents[:] = _parents + count += 1 + + else: + self.add_items(items) + + def add_items(self, items, parent=None): + """Add the items to the model. + + The items should be formatted similar to `api.ls()` returns, an item + is then represented as: + {"filename_v001.ma": [full/filename/of/loaded/filename_v001.ma, + full/filename/of/loaded/filename_v001.ma], + "nodetype" : "reference", + "node": "referenceNode1"} + + Note: When performing an additional call to `add_items` it will *not* + group the new items with previously existing item groups of the + same type. + + Args: + items (generator): the items to be processed as returned by `ls()` + parent (Item, optional): Set this item as parent for the added + items when provided. Defaults to the root of the model. + + Returns: + node.Item: root node which has children added based on the data + """ + + self.beginResetModel() + + # Group by representation + grouped = defaultdict(lambda: {"items": list()}) + for item in items: + grouped[item["representation"]]["items"].append(item) + + # Add to model + not_found = defaultdict(list) + not_found_ids = [] + for repre_id, group_dict in sorted(grouped.items()): + group_items = group_dict["items"] + # Get parenthood per group + representation = io.find_one({"_id": io.ObjectId(repre_id)}) + if not representation: + not_found["representation"].append(group_items) + not_found_ids.append(repre_id) + continue + + version = io.find_one({"_id": representation["parent"]}) + if not version: + not_found["version"].append(group_items) + not_found_ids.append(repre_id) + continue + + elif version["type"] == "hero_version": + _version = io.find_one({ + "_id": version["version_id"] + }) + version["name"] = HeroVersionType(_version["name"]) + version["data"] = _version["data"] + + subset = io.find_one({"_id": version["parent"]}) + if not subset: + not_found["subset"].append(group_items) + not_found_ids.append(repre_id) + continue + + asset = io.find_one({"_id": subset["parent"]}) + if not asset: + not_found["asset"].append(group_items) + not_found_ids.append(repre_id) + continue + + grouped[repre_id].update({ + "representation": representation, + "version": version, + "subset": subset, + "asset": asset + }) + + for id in not_found_ids: + grouped.pop(id) + + for where, group_items in not_found.items(): + # create the group header + group_node = Item() + name = "< NOT FOUND - {} >".format(where) + group_node["Name"] = name + group_node["representation"] = name + group_node["count"] = len(group_items) + group_node["isGroupNode"] = False + group_node["isNotSet"] = True + + self.add_child(group_node, parent=parent) + + for _group_items in group_items: + item_node = Item() + item_node["Name"] = ", ".join( + [item["objectName"] for item in _group_items] + ) + self.add_child(item_node, parent=group_node) + + for repre_id, group_dict in sorted(grouped.items()): + group_items = group_dict["items"] + representation = grouped[repre_id]["representation"] + version = grouped[repre_id]["version"] + subset = grouped[repre_id]["subset"] + asset = grouped[repre_id]["asset"] + + # Get the primary family + no_family = "" + maj_version, _ = schema.get_schema_version(subset["schema"]) + if maj_version < 3: + prim_family = version["data"].get("family") + if not prim_family: + families = version["data"].get("families") + prim_family = families[0] if families else no_family + else: + families = subset["data"].get("families") or [] + prim_family = families[0] if families else no_family + + # Get the label and icon for the family if in configuration + family_config = self.family_config_cache.family_config(prim_family) + family = family_config.get("label", prim_family) + family_icon = family_config.get("icon", None) + + # Store the highest available version so the model can know + # whether current version is currently up-to-date. + highest_version = io.find_one({ + "type": "version", + "parent": version["parent"] + }, sort=[("name", -1)]) + + # create the group header + group_node = Item() + group_node["Name"] = "%s_%s: (%s)" % (asset["name"], + subset["name"], + representation["name"]) + group_node["representation"] = repre_id + group_node["version"] = version["name"] + group_node["highest_version"] = highest_version["name"] + group_node["family"] = family + group_node["familyIcon"] = family_icon + group_node["count"] = len(group_items) + group_node["isGroupNode"] = True + + if self.sync_enabled: + progress = get_progress_for_repre( + representation, self.active_site, self.remote_site + ) + group_node["active_site"] = self.active_site + group_node["active_site_provider"] = self.active_provider + group_node["remote_site"] = self.remote_site + group_node["remote_site_provider"] = self.remote_provider + group_node["active_site_progress"] = progress[self.active_site] + group_node["remote_site_progress"] = progress[self.remote_site] + + self.add_child(group_node, parent=parent) + + for item in group_items: + item_node = Item() + item_node.update(item) + + # store the current version on the item + item_node["version"] = version["name"] + + # Remapping namespace to item name. + # Noted that the name key is capital "N", by doing this, we + # can view namespace in GUI without changing container data. + item_node["Name"] = item["namespace"] + + self.add_child(item_node, parent=group_node) + + self.endResetModel() + + return self._root_item + + +class FilterProxyModel(QtCore.QSortFilterProxyModel): + """Filter model to where key column's value is in the filtered tags""" + + def __init__(self, *args, **kwargs): + super(FilterProxyModel, self).__init__(*args, **kwargs) + self._filter_outdated = False + self._hierarchy_view = False + + def filterAcceptsRow(self, row, parent): + model = self.sourceModel() + source_index = model.index(row, self.filterKeyColumn(), parent) + + # Always allow bottom entries (individual containers), since their + # parent group hidden if it wouldn't have been validated. + rows = model.rowCount(source_index) + if not rows: + return True + + # Filter by regex + if not self.filterRegExp().isEmpty(): + pattern = re.escape(self.filterRegExp().pattern()) + + if not self._matches(row, parent, pattern): + return False + + if self._filter_outdated: + # When filtering to outdated we filter the up to date entries + # thus we "allow" them when they are outdated + if not self._is_outdated(row, parent): + return False + + return True + + def set_filter_outdated(self, state): + """Set whether to show the outdated entries only.""" + state = bool(state) + + if state != self._filter_outdated: + self._filter_outdated = bool(state) + self.invalidateFilter() + + def set_hierarchy_view(self, state): + state = bool(state) + + if state != self._hierarchy_view: + self._hierarchy_view = state + + def _is_outdated(self, row, parent): + """Return whether row is outdated. + + A row is considered outdated if it has "version" and "highest_version" + data and in the internal data structure, and they are not of an + equal value. + + """ + def outdated(node): + version = node.get("version", None) + highest = node.get("highest_version", None) + + # Always allow indices that have no version data at all + if version is None and highest is None: + return True + + # If either a version or highest is present but not the other + # consider the item invalid. + if not self._hierarchy_view: + # Skip this check if in hierarchy view, or the child item + # node will be hidden even it's actually outdated. + if version is None or highest is None: + return False + return version != highest + + index = self.sourceModel().index(row, self.filterKeyColumn(), parent) + + # The scene contents are grouped by "representation", e.g. the same + # "representation" loaded twice is grouped under the same header. + # Since the version check filters these parent groups we skip that + # check for the individual children. + has_parent = index.parent().isValid() + if has_parent and not self._hierarchy_view: + return True + + # Filter to those that have the different version numbers + node = index.internalPointer() + if outdated(node): + return True + + if self._hierarchy_view: + for _node in walk_hierarchy(node): + if outdated(_node): + return True + + return False + + def _matches(self, row, parent, pattern): + """Return whether row matches regex pattern. + + Args: + row (int): row number in model + parent (QtCore.QModelIndex): parent index + pattern (regex.pattern): pattern to check for in key + + Returns: + bool + + """ + model = self.sourceModel() + column = self.filterKeyColumn() + role = self.filterRole() + + def matches(row, parent, pattern): + index = model.index(row, column, parent) + key = model.data(index, role) + if re.search(pattern, key, re.IGNORECASE): + return True + + if matches(row, parent, pattern): + return True + + # Also allow if any of the children matches + source_index = model.index(row, column, parent) + rows = model.rowCount(source_index) + + if any( + matches(idx, source_index, pattern) + for idx in range(rows) + ): + return True + + if not self._hierarchy_view: + return False + + for idx in range(rows): + child_index = model.index(idx, column, source_index) + child_rows = model.rowCount(child_index) + return any( + self._matches(child_idx, child_index, pattern) + for child_idx in range(child_rows) + ) + + return True diff --git a/openpype/tools/sceneinventory/switch_dialog.py b/openpype/tools/sceneinventory/switch_dialog.py new file mode 100644 index 0000000000..75e2b6be40 --- /dev/null +++ b/openpype/tools/sceneinventory/switch_dialog.py @@ -0,0 +1,1371 @@ +import collections +import logging +from Qt import QtWidgets, QtCore + +from avalon import io, api, pipeline +from avalon.vendor import qtawesome + +from .widgets import ( + ButtonWithMenu, + SearchComboBox +) + +log = logging.getLogger("SwitchAssetDialog") + + +class ValidationState: + def __init__(self): + self.asset_ok = True + self.subset_ok = True + self.repre_ok = True + + @property + def all_ok(self): + return ( + self.asset_ok + and self.subset_ok + and self.repre_ok + ) + + +class SwitchAssetDialog(QtWidgets.QDialog): + """Widget to support asset switching""" + + MIN_WIDTH = 550 + + switched = QtCore.Signal() + + def __init__(self, parent=None, items=None): + super(SwitchAssetDialog, self).__init__(parent) + + self.setWindowTitle("Switch selected items ...") + + # Force and keep focus dialog + self.setModal(True) + + assets_combox = SearchComboBox(self) + subsets_combox = SearchComboBox(self) + repres_combobox = SearchComboBox(self) + + assets_combox.set_placeholder("") + subsets_combox.set_placeholder("") + repres_combobox.set_placeholder("") + + asset_label = QtWidgets.QLabel(self) + subset_label = QtWidgets.QLabel(self) + repre_label = QtWidgets.QLabel(self) + + current_asset_btn = QtWidgets.QPushButton("Use current asset") + + accept_icon = qtawesome.icon("fa.check", color="white") + accept_btn = ButtonWithMenu(self) + accept_btn.setIcon(accept_icon) + + main_layout = QtWidgets.QGridLayout(self) + # Asset column + main_layout.addWidget(current_asset_btn, 0, 0) + main_layout.addWidget(assets_combox, 1, 0) + main_layout.addWidget(asset_label, 2, 0) + # Subset column + main_layout.addWidget(subsets_combox, 1, 1) + main_layout.addWidget(subset_label, 2, 1) + # Representation column + main_layout.addWidget(repres_combobox, 1, 2) + main_layout.addWidget(repre_label, 2, 2) + # Btn column + main_layout.addWidget(accept_btn, 1, 3) + main_layout.setColumnStretch(0, 1) + main_layout.setColumnStretch(1, 1) + main_layout.setColumnStretch(2, 1) + main_layout.setColumnStretch(3, 0) + + assets_combox.currentIndexChanged.connect( + self._combobox_value_changed + ) + subsets_combox.currentIndexChanged.connect( + self._combobox_value_changed + ) + repres_combobox.currentIndexChanged.connect( + self._combobox_value_changed + ) + accept_btn.clicked.connect(self._on_accept) + current_asset_btn.clicked.connect(self._on_current_asset) + + self._current_asset_btn = current_asset_btn + + self._assets_box = assets_combox + self._subsets_box = subsets_combox + self._representations_box = repres_combobox + + self._asset_label = asset_label + self._subset_label = subset_label + self._repre_label = repre_label + + self._accept_btn = accept_btn + + self.setMinimumWidth(self.MIN_WIDTH) + + # Set default focus to accept button so you don't directly type in + # first asset field, this also allows to see the placeholder value. + accept_btn.setFocus() + + self.content_loaders = set() + self.content_assets = {} + self.content_subsets = {} + self.content_versions = {} + self.content_repres = {} + + self.hero_version_ids = set() + + self.missing_assets = [] + self.missing_versions = [] + self.missing_subsets = [] + self.missing_repres = [] + self.missing_docs = False + + self.archived_assets = [] + self.archived_subsets = [] + self.archived_repres = [] + + self._init_asset_name = None + self._init_subset_name = None + self._init_repre_name = None + + self._fill_check = False + + self._items = items + self._prepare_content_data() + self.refresh(True) + + def _prepare_content_data(self): + repre_ids = set() + content_loaders = set() + for item in self._items: + repre_ids.add(io.ObjectId(item["representation"])) + content_loaders.add(item["loader"]) + + repres = list(io.find({ + "type": {"$in": ["representation", "archived_representation"]}, + "_id": {"$in": list(repre_ids)} + })) + repres_by_id = {repre["_id"]: repre for repre in repres} + + # stash context values, works only for single representation + if len(repres) == 1: + self._init_asset_name = repres[0]["context"]["asset"] + self._init_subset_name = repres[0]["context"]["subset"] + self._init_repre_name = repres[0]["context"]["representation"] + + content_repres = {} + archived_repres = [] + missing_repres = [] + version_ids = [] + for repre_id in repre_ids: + if repre_id not in repres_by_id: + missing_repres.append(repre_id) + elif repres_by_id[repre_id]["type"] == "archived_representation": + repre = repres_by_id[repre_id] + archived_repres.append(repre) + version_ids.append(repre["parent"]) + else: + repre = repres_by_id[repre_id] + content_repres[repre_id] = repres_by_id[repre_id] + version_ids.append(repre["parent"]) + + versions = io.find({ + "type": {"$in": ["version", "hero_version"]}, + "_id": {"$in": list(set(version_ids))} + }) + content_versions = {} + hero_version_ids = set() + for version in versions: + content_versions[version["_id"]] = version + if version["type"] == "hero_version": + hero_version_ids.add(version["_id"]) + + missing_versions = [] + subset_ids = [] + for version_id in version_ids: + if version_id not in content_versions: + missing_versions.append(version_id) + else: + subset_ids.append(content_versions[version_id]["parent"]) + + subsets = io.find({ + "type": {"$in": ["subset", "archived_subset"]}, + "_id": {"$in": subset_ids} + }) + subsets_by_id = {sub["_id"]: sub for sub in subsets} + + asset_ids = [] + archived_subsets = [] + missing_subsets = [] + content_subsets = {} + for subset_id in subset_ids: + if subset_id not in subsets_by_id: + missing_subsets.append(subset_id) + elif subsets_by_id[subset_id]["type"] == "archived_subset": + subset = subsets_by_id[subset_id] + asset_ids.append(subset["parent"]) + archived_subsets.append(subset) + else: + subset = subsets_by_id[subset_id] + asset_ids.append(subset["parent"]) + content_subsets[subset_id] = subset + + assets = io.find({ + "type": {"$in": ["asset", "archived_asset"]}, + "_id": {"$in": list(asset_ids)} + }) + assets_by_id = {asset["_id"]: asset for asset in assets} + + missing_assets = [] + archived_assets = [] + content_assets = {} + for asset_id in asset_ids: + if asset_id not in assets_by_id: + missing_assets.append(asset_id) + elif assets_by_id[asset_id]["type"] == "archived_asset": + archived_assets.append(assets_by_id[asset_id]) + else: + content_assets[asset_id] = assets_by_id[asset_id] + + self.content_loaders = content_loaders + self.content_assets = content_assets + self.content_subsets = content_subsets + self.content_versions = content_versions + self.content_repres = content_repres + + self.hero_version_ids = hero_version_ids + + self.missing_assets = missing_assets + self.missing_versions = missing_versions + self.missing_subsets = missing_subsets + self.missing_repres = missing_repres + self.missing_docs = ( + bool(missing_assets) + or bool(missing_versions) + or bool(missing_subsets) + or bool(missing_repres) + ) + + self.archived_assets = archived_assets + self.archived_subsets = archived_subsets + self.archived_repres = archived_repres + + def _combobox_value_changed(self, *args, **kwargs): + self.refresh() + + def refresh(self, init_refresh=False): + """Build the need comboboxes with content""" + if not self._fill_check and not init_refresh: + return + + self._fill_check = False + + if init_refresh: + asset_values = self._get_asset_box_values() + self._fill_combobox(asset_values, "asset") + + validation_state = ValidationState() + + # Set other comboboxes to empty if any document is missing or any asset + # of loaded representations is archived. + self._is_asset_ok(validation_state) + if validation_state.asset_ok: + subset_values = self._get_subset_box_values() + self._fill_combobox(subset_values, "subset") + self._is_subset_ok(validation_state) + + if validation_state.asset_ok and validation_state.subset_ok: + repre_values = sorted(self._representations_box_values()) + self._fill_combobox(repre_values, "repre") + self._is_repre_ok(validation_state) + + # Fill comboboxes with values + self.set_labels() + + self.apply_validations(validation_state) + + self._build_loaders_menu() + + if init_refresh: # pre select context if possible + self._assets_box.set_valid_value(self._init_asset_name) + self._subsets_box.set_valid_value(self._init_subset_name) + self._representations_box.set_valid_value(self._init_repre_name) + + self._fill_check = True + + def _build_loaders_menu(self): + repre_ids = self._get_current_output_repre_ids() + loaders = self._get_loaders(repre_ids) + # Get and destroy the action group + self._accept_btn.clear_actions() + + if not loaders: + return + + # Build new action group + group = QtWidgets.QActionGroup(self._accept_btn) + + for loader in loaders: + # Label + label = getattr(loader, "label", None) + if label is None: + label = loader.__name__ + + action = group.addAction(label) + # action = QtWidgets.QAction(label) + action.setData(loader) + + # Support font-awesome icons using the `.icon` and `.color` + # attributes on plug-ins. + icon = getattr(loader, "icon", None) + if icon is not None: + try: + key = "fa.{0}".format(icon) + color = getattr(loader, "color", "white") + action.setIcon(qtawesome.icon(key, color=color)) + + except Exception as exc: + print("Unable to set icon for loader {}: {}".format( + loader, str(exc) + )) + + self._accept_btn.add_action(action) + + group.triggered.connect(self._on_action_clicked) + + def _on_action_clicked(self, action): + loader_plugin = action.data() + self._trigger_switch(loader_plugin) + + def _get_loaders(self, repre_ids): + repre_contexts = None + if repre_ids: + repre_contexts = pipeline.get_repres_contexts(repre_ids) + + if not repre_contexts: + return list() + + available_loaders = [] + for loader_plugin in api.discover(api.Loader): + # Skip loaders without switch method + if not hasattr(loader_plugin, "switch"): + continue + + # Skip utility loaders + if ( + hasattr(loader_plugin, "is_utility") + and loader_plugin.is_utility + ): + continue + available_loaders.append(loader_plugin) + + loaders = None + for repre_context in repre_contexts.values(): + _loaders = set(pipeline.loaders_from_repre_context( + available_loaders, repre_context + )) + if loaders is None: + loaders = _loaders + else: + loaders = _loaders.intersection(loaders) + + if not loaders: + break + + if loaders is None: + loaders = [] + else: + loaders = list(loaders) + + return loaders + + def _fill_combobox(self, values, combobox_type): + if combobox_type == "asset": + combobox_widget = self._assets_box + elif combobox_type == "subset": + combobox_widget = self._subsets_box + elif combobox_type == "repre": + combobox_widget = self._representations_box + else: + return + selected_value = combobox_widget.get_valid_value() + + # Fill combobox + if values is not None: + combobox_widget.populate(list(sorted(values))) + if selected_value and selected_value in values: + index = None + for idx in range(combobox_widget.count()): + if selected_value == str(combobox_widget.itemText(idx)): + index = idx + break + if index is not None: + combobox_widget.setCurrentIndex(index) + + def set_labels(self): + asset_label = self._assets_box.get_valid_value() + subset_label = self._subsets_box.get_valid_value() + repre_label = self._representations_box.get_valid_value() + + default = "*No changes" + self._asset_label.setText(asset_label or default) + self._subset_label.setText(subset_label or default) + self._repre_label.setText(repre_label or default) + + def apply_validations(self, validation_state): + error_msg = "*Please select" + error_sheet = "border: 1px solid red;" + + asset_sheet = None + subset_sheet = None + repre_sheet = None + accept_state = "" + if validation_state.asset_ok is False: + asset_sheet = error_sheet + self._asset_label.setText(error_msg) + elif validation_state.subset_ok is False: + subset_sheet = error_sheet + self._subset_label.setText(error_msg) + elif validation_state.repre_ok is False: + repre_sheet = error_sheet + self._repre_label.setText(error_msg) + + if validation_state.all_ok: + accept_state = "1" + + self._assets_box.setStyleSheet(asset_sheet or "") + self._subsets_box.setStyleSheet(subset_sheet or "") + self._representations_box.setStyleSheet(repre_sheet or "") + + self._accept_btn.setEnabled(validation_state.all_ok) + self._set_style_property(self._accept_btn, "state", accept_state) + + def _set_style_property(self, widget, name, value): + cur_value = widget.property(name) + if cur_value == value: + return + widget.setProperty(name, value) + widget.style().polish(widget) + + def _get_current_output_repre_ids(self): + # NOTE hero versions are not used because it is expected that + # hero version has same representations as latests + selected_asset = self._assets_box.currentText() + selected_subset = self._subsets_box.currentText() + selected_repre = self._representations_box.currentText() + + # Nothing is selected + # [ ] [ ] [ ] + if not selected_asset and not selected_subset and not selected_repre: + return list(self.content_repres.keys()) + + # Prepare asset document if asset is selected + asset_doc = None + if selected_asset: + asset_doc = io.find_one( + {"type": "asset", "name": selected_asset}, + {"_id": True} + ) + if not asset_doc: + return [] + + # Everything is selected + # [x] [x] [x] + if selected_asset and selected_subset and selected_repre: + return self._get_current_output_repre_ids_xxx( + asset_doc, selected_subset, selected_repre + ) + + # [x] [x] [ ] + # If asset and subset is selected + if selected_asset and selected_subset: + return self._get_current_output_repre_ids_xxo( + asset_doc, selected_subset + ) + + # [x] [ ] [x] + # If asset and repre is selected + if selected_asset and selected_repre: + return self._get_current_output_repre_ids_xox( + asset_doc, selected_repre + ) + + # [x] [ ] [ ] + # If asset and subset is selected + if selected_asset: + return self._get_current_output_repre_ids_xoo(asset_doc) + + # [ ] [x] [x] + if selected_subset and selected_repre: + return self._get_current_output_repre_ids_oxx( + selected_subset, selected_repre + ) + + # [ ] [x] [ ] + if selected_subset: + return self._get_current_output_repre_ids_oxo( + selected_subset + ) + + # [ ] [ ] [x] + return self._get_current_output_repre_ids_oox(selected_repre) + + def _get_current_output_repre_ids_xxx( + self, asset_doc, selected_subset, selected_repre + ): + subset_doc = io.find_one( + { + "type": "subset", + "name": selected_subset, + "parent": asset_doc["_id"] + }, + {"_id": True} + ) + subset_id = subset_doc["_id"] + last_versions_by_subset_id = self.find_last_versions([subset_id]) + version_doc = last_versions_by_subset_id.get(subset_id) + if not version_doc: + return [] + + repre_docs = io.find( + { + "type": "representation", + "parent": version_doc["_id"], + "name": selected_repre + }, + {"_id": True} + ) + return [repre_doc["_id"] for repre_doc in repre_docs] + + def _get_current_output_repre_ids_xxo(self, asset_doc, selected_subset): + subset_doc = io.find_one( + { + "type": "subset", + "parent": asset_doc["_id"], + "name": selected_subset + }, + {"_id": True} + ) + if not subset_doc: + return [] + + repre_names = set() + for repre_doc in self.content_repres.values(): + repre_names.add(repre_doc["name"]) + + repre_docs = io.find( + { + "type": "rerpesentation", + "parent": subset_doc["_id"], + "name": {"$in": list(repre_names)} + }, + {"_id": True} + ) + return [repre_doc["_id"] for repre_doc in repre_docs] + + def _get_current_output_repre_ids_xox(self, asset_doc, selected_repre): + susbet_names = set() + for subset_doc in self.content_subsets.values(): + susbet_names.add(subset_doc["name"]) + + subset_docs = io.find( + { + "type": "subset", + "name": {"$in": list(susbet_names)}, + "parent": asset_doc["_id"] + }, + {"_id": True} + ) + subset_ids = [subset_doc["_id"] for subset_doc in subset_docs] + repre_docs = io.find( + { + "type": "representation", + "parent": {"$in": subset_ids}, + "name": selected_repre + }, + {"_id": True} + ) + return [repre_doc["_id"] for repre_doc in repre_docs] + + def _get_current_output_repre_ids_xoo(self, asset_doc): + repres_by_subset_name = collections.defaultdict(set) + for repre_doc in self.content_repres.values(): + repre_name = repre_doc["name"] + version_doc = self.content_versions[repre_doc["parent"]] + subset_doc = self.content_subsets[version_doc["parent"]] + subset_name = subset_doc["name"] + repres_by_subset_name[subset_name].add(repre_name) + + subset_docs = list(io.find( + { + "type": "subset", + "parent": asset_doc["_id"], + "name": {"$in": list(repres_by_subset_name.keys())} + }, + {"_id": True, "name": True} + )) + subset_name_by_id = { + subset_doc["_id"]: subset_doc["name"] + for subset_doc in subset_docs + } + subset_ids = list(subset_name_by_id.keys()) + last_versions_by_subset_id = self.find_last_versions(subset_ids) + last_version_id_by_subset_name = {} + for subset_id, last_version in last_versions_by_subset_id.items(): + subset_name = subset_name_by_id[subset_id] + last_version_id_by_subset_name[subset_name] = ( + last_version["_id"] + ) + + repre_or_query = [] + for subset_name, repre_names in repres_by_subset_name.items(): + version_id = last_version_id_by_subset_name.get(subset_name) + # This should not happen but why to crash? + if version_id is None: + continue + repre_or_query.append({ + "parent": version_id, + "name": {"$in": list(repre_names)} + }) + repre_docs = io.find( + {"$or": repre_or_query}, + {"_id": True} + ) + return [repre_doc["_id"] for repre_doc in repre_docs] + + def _get_current_output_repre_ids_oxx( + self, selected_subset, selected_repre + ): + subset_docs = list(io.find({ + "type": "subset", + "parent": {"$in": list(self.content_assets.keys())}, + "name": selected_subset + })) + subset_ids = [subset_doc["_id"] for subset_doc in subset_docs] + last_versions_by_subset_id = self.find_last_versions(subset_ids) + last_version_ids = [ + last_version["_id"] + for last_version in last_versions_by_subset_id.values() + ] + repre_docs = io.find({ + "type": "representation", + "parent": {"$in": last_version_ids}, + "name": selected_repre + }) + + return [repre_doc["_id"] for repre_doc in repre_docs] + + def _get_current_output_repre_ids_oxo(self, selected_subset): + subset_docs = list(io.find( + { + "type": "subset", + "parent": {"$in": list(self.content_assets.keys())}, + "name": selected_subset + }, + {"_id": True, "parent": True} + )) + if not subset_docs: + return list() + + subset_docs_by_id = { + subset_doc["_id"]: subset_doc + for subset_doc in subset_docs + } + last_versions_by_subset_id = self.find_last_versions( + subset_docs_by_id.keys() + ) + + subset_id_by_version_id = {} + for subset_id, last_version in last_versions_by_subset_id.items(): + version_id = last_version["_id"] + subset_id_by_version_id[version_id] = subset_id + + if not subset_id_by_version_id: + return list() + + repre_names_by_asset_id = collections.defaultdict(set) + for repre_doc in self.content_repres.values(): + version_doc = self.content_versions[repre_doc["parent"]] + subset_doc = self.content_subsets[version_doc["parent"]] + asset_doc = self.content_assets[subset_doc["parent"]] + repre_name = repre_doc["name"] + asset_id = asset_doc["_id"] + repre_names_by_asset_id[asset_id].add(repre_name) + + repre_or_query = [] + for last_version_id, subset_id in subset_id_by_version_id.items(): + subset_doc = subset_docs_by_id[subset_id] + asset_id = subset_doc["parent"] + repre_names = repre_names_by_asset_id.get(asset_id) + if not repre_names: + continue + repre_or_query.append({ + "parent": last_version_id, + "name": {"$in": list(repre_names)} + }) + repre_docs = io.find( + { + "type": "representation", + "$or": repre_or_query + }, + {"_id": True} + ) + + return [repre_doc["_id"] for repre_doc in repre_docs] + + def _get_current_output_repre_ids_oox(self, selected_repre): + repre_docs = io.find( + { + "name": selected_repre, + "parent": {"$in": list(self.content_versions.keys())} + }, + {"_id": True} + ) + return [repre_doc["_id"] for repre_doc in repre_docs] + + def _get_asset_box_values(self): + asset_docs = io.find( + {"type": "asset"}, + {"_id": 1, "name": 1} + ) + asset_names_by_id = { + asset_doc["_id"]: asset_doc["name"] + for asset_doc in asset_docs + } + subsets = io.find( + { + "type": "subset", + "parent": {"$in": list(asset_names_by_id.keys())} + }, + { + "parent": 1 + } + ) + + filtered_assets = [] + for subset in subsets: + asset_name = asset_names_by_id[subset["parent"]] + if asset_name not in filtered_assets: + filtered_assets.append(asset_name) + return sorted(filtered_assets) + + def _get_subset_box_values(self): + selected_asset = self._assets_box.get_valid_value() + if selected_asset: + asset_doc = io.find_one({"type": "asset", "name": selected_asset}) + asset_ids = [asset_doc["_id"]] + else: + asset_ids = list(self.content_assets.keys()) + + subsets = io.find( + { + "type": "subset", + "parent": {"$in": asset_ids} + }, + { + "parent": 1, + "name": 1 + } + ) + + subset_names_by_parent_id = collections.defaultdict(set) + for subset in subsets: + subset_names_by_parent_id[subset["parent"]].add(subset["name"]) + + possible_subsets = None + for subset_names in subset_names_by_parent_id.values(): + if possible_subsets is None: + possible_subsets = subset_names + else: + possible_subsets = (possible_subsets & subset_names) + + if not possible_subsets: + break + + return list(possible_subsets or list()) + + def _representations_box_values(self): + # NOTE hero versions are not used because it is expected that + # hero version has same representations as latests + selected_asset = self._assets_box.currentText() + selected_subset = self._subsets_box.currentText() + + # If nothing is selected + # [ ] [ ] [?] + if not selected_asset and not selected_subset: + # Find all representations of selection's subsets + possible_repres = list(io.find( + { + "type": "representation", + "parent": {"$in": list(self.content_versions.keys())} + }, + { + "parent": 1, + "name": 1 + } + )) + + possible_repres_by_parent = collections.defaultdict(set) + for repre in possible_repres: + possible_repres_by_parent[repre["parent"]].add(repre["name"]) + + output_repres = None + for repre_names in possible_repres_by_parent.values(): + if output_repres is None: + output_repres = repre_names + else: + output_repres = (output_repres & repre_names) + + if not output_repres: + break + + return list(output_repres or list()) + + # [x] [x] [?] + if selected_asset and selected_subset: + asset_doc = io.find_one( + {"type": "asset", "name": selected_asset}, + {"_id": 1} + ) + subset_doc = io.find_one( + { + "type": "subset", + "name": selected_subset, + "parent": asset_doc["_id"] + }, + {"_id": 1} + ) + subset_id = subset_doc["_id"] + last_versions_by_subset_id = self.find_last_versions([subset_id]) + version_doc = last_versions_by_subset_id.get(subset_id) + repre_docs = io.find( + { + "type": "representation", + "parent": version_doc["_id"] + }, + { + "name": 1 + } + ) + return [ + repre_doc["name"] + for repre_doc in repre_docs + ] + + # [x] [ ] [?] + # If asset only is selected + if selected_asset: + asset_doc = io.find_one( + {"type": "asset", "name": selected_asset}, + {"_id": 1} + ) + if not asset_doc: + return list() + + # Filter subsets by subset names from content + subset_names = set() + for subset_doc in self.content_subsets.values(): + subset_names.add(subset_doc["name"]) + subset_docs = io.find( + { + "type": "subset", + "parent": asset_doc["_id"], + "name": {"$in": list(subset_names)} + }, + {"_id": 1} + ) + subset_ids = [ + subset_doc["_id"] + for subset_doc in subset_docs + ] + if not subset_ids: + return list() + + last_versions_by_subset_id = self.find_last_versions(subset_ids) + subset_id_by_version_id = {} + for subset_id, last_version in last_versions_by_subset_id.items(): + version_id = last_version["_id"] + subset_id_by_version_id[version_id] = subset_id + + if not subset_id_by_version_id: + return list() + + repre_docs = list(io.find( + { + "type": "representation", + "parent": {"$in": list(subset_id_by_version_id.keys())} + }, + { + "name": 1, + "parent": 1 + } + )) + if not repre_docs: + return list() + + repre_names_by_parent = collections.defaultdict(set) + for repre_doc in repre_docs: + repre_names_by_parent[repre_doc["parent"]].add( + repre_doc["name"] + ) + + available_repres = None + for repre_names in repre_names_by_parent.values(): + if available_repres is None: + available_repres = repre_names + continue + + available_repres = available_repres.intersection(repre_names) + + return list(available_repres) + + # [ ] [x] [?] + subset_docs = list(io.find( + { + "type": "subset", + "parent": {"$in": list(self.content_assets.keys())}, + "name": selected_subset + }, + {"_id": 1, "parent": 1} + )) + if not subset_docs: + return list() + + subset_docs_by_id = { + subset_doc["_id"]: subset_doc + for subset_doc in subset_docs + } + last_versions_by_subset_id = self.find_last_versions( + subset_docs_by_id.keys() + ) + + subset_id_by_version_id = {} + for subset_id, last_version in last_versions_by_subset_id.items(): + version_id = last_version["_id"] + subset_id_by_version_id[version_id] = subset_id + + if not subset_id_by_version_id: + return list() + + repre_docs = list(io.find( + { + "type": "representation", + "parent": {"$in": list(subset_id_by_version_id.keys())} + }, + { + "name": 1, + "parent": 1 + } + )) + if not repre_docs: + return list() + + repre_names_by_asset_id = {} + for repre_doc in repre_docs: + subset_id = subset_id_by_version_id[repre_doc["parent"]] + asset_id = subset_docs_by_id[subset_id]["parent"] + if asset_id not in repre_names_by_asset_id: + repre_names_by_asset_id[asset_id] = set() + repre_names_by_asset_id[asset_id].add(repre_doc["name"]) + + available_repres = None + for repre_names in repre_names_by_asset_id.values(): + if available_repres is None: + available_repres = repre_names + continue + + available_repres = available_repres.intersection(repre_names) + + return list(available_repres) + + def _is_asset_ok(self, validation_state): + selected_asset = self._assets_box.get_valid_value() + if ( + selected_asset is None + and (self.missing_docs or self.archived_assets) + ): + validation_state.asset_ok = False + + def _is_subset_ok(self, validation_state): + selected_asset = self._assets_box.get_valid_value() + selected_subset = self._subsets_box.get_valid_value() + + # [?] [x] [?] + # If subset is selected then must be ok + if selected_subset is not None: + return + + # [ ] [ ] [?] + if selected_asset is None: + # If there were archived subsets and asset is not selected + if self.archived_subsets: + validation_state.subset_ok = False + return + + # [x] [ ] [?] + asset_doc = io.find_one( + {"type": "asset", "name": selected_asset}, + {"_id": 1} + ) + subset_docs = io.find( + {"type": "subset", "parent": asset_doc["_id"]}, + {"name": 1} + ) + subset_names = set( + subset_doc["name"] + for subset_doc in subset_docs + ) + + for subset_doc in self.content_subsets.values(): + if subset_doc["name"] not in subset_names: + validation_state.subset_ok = False + break + + def find_last_versions(self, subset_ids): + _pipeline = [ + # Find all versions of those subsets + {"$match": { + "type": "version", + "parent": {"$in": list(subset_ids)} + }}, + # Sorting versions all together + {"$sort": {"name": 1}}, + # Group them by "parent", but only take the last + {"$group": { + "_id": "$parent", + "_version_id": {"$last": "$_id"}, + "type": {"$last": "$type"} + }} + ] + last_versions_by_subset_id = dict() + for doc in io.aggregate(_pipeline): + doc["parent"] = doc["_id"] + doc["_id"] = doc.pop("_version_id") + last_versions_by_subset_id[doc["parent"]] = doc + return last_versions_by_subset_id + + def _is_repre_ok(self, validation_state): + selected_asset = self._assets_box.get_valid_value() + selected_subset = self._subsets_box.get_valid_value() + selected_repre = self._representations_box.get_valid_value() + + # [?] [?] [x] + # If subset is selected then must be ok + if selected_repre is not None: + return + + # [ ] [ ] [ ] + if selected_asset is None and selected_subset is None: + if ( + self.archived_repres + or self.missing_versions + or self.missing_repres + ): + validation_state.repre_ok = False + return + + # [x] [x] [ ] + if selected_asset is not None and selected_subset is not None: + asset_doc = io.find_one( + {"type": "asset", "name": selected_asset}, + {"_id": 1} + ) + subset_doc = io.find_one( + { + "type": "subset", + "parent": asset_doc["_id"], + "name": selected_subset + }, + {"_id": 1} + ) + last_versions_by_subset_id = self.find_last_versions( + [subset_doc["_id"]] + ) + last_version = last_versions_by_subset_id.get(subset_doc["_id"]) + if not last_version: + validation_state.repre_ok = False + return + + repre_docs = io.find( + { + "type": "representation", + "parent": last_version["_id"] + }, + {"name": 1} + ) + + repre_names = set( + repre_doc["name"] + for repre_doc in repre_docs + ) + for repre_doc in self.content_repres.values(): + if repre_doc["name"] not in repre_names: + validation_state.repre_ok = False + break + return + + # [x] [ ] [ ] + if selected_asset is not None: + asset_doc = io.find_one( + {"type": "asset", "name": selected_asset}, + {"_id": 1} + ) + subset_docs = list(io.find( + { + "type": "subset", + "parent": asset_doc["_id"] + }, + {"_id": 1, "name": 1} + )) + + subset_name_by_id = {} + subset_ids = set() + for subset_doc in subset_docs: + subset_id = subset_doc["_id"] + subset_ids.add(subset_id) + subset_name_by_id[subset_id] = subset_doc["name"] + + last_versions_by_subset_id = self.find_last_versions(subset_ids) + + subset_id_by_version_id = {} + for subset_id, last_version in last_versions_by_subset_id.items(): + version_id = last_version["_id"] + subset_id_by_version_id[version_id] = subset_id + + repre_docs = io.find( + { + "type": "representation", + "parent": {"$in": list(subset_id_by_version_id.keys())} + }, + { + "name": 1, + "parent": 1 + } + ) + repres_by_subset_name = {} + for repre_doc in repre_docs: + subset_id = subset_id_by_version_id[repre_doc["parent"]] + subset_name = subset_name_by_id[subset_id] + if subset_name not in repres_by_subset_name: + repres_by_subset_name[subset_name] = set() + repres_by_subset_name[subset_name].add(repre_doc["name"]) + + for repre_doc in self.content_repres.values(): + version_doc = self.content_versions[repre_doc["parent"]] + subset_doc = self.content_subsets[version_doc["parent"]] + repre_names = ( + repres_by_subset_name.get(subset_doc["name"]) or [] + ) + if repre_doc["name"] not in repre_names: + validation_state.repre_ok = False + break + return + + # [ ] [x] [ ] + # Subset documents + subset_docs = io.find( + { + "type": "subset", + "parent": {"$in": list(self.content_assets.keys())}, + "name": selected_subset + }, + {"_id": 1, "name": 1, "parent": 1} + ) + + subset_docs_by_id = {} + for subset_doc in subset_docs: + subset_docs_by_id[subset_doc["_id"]] = subset_doc + + last_versions_by_subset_id = self.find_last_versions( + subset_docs_by_id.keys() + ) + subset_id_by_version_id = {} + for subset_id, last_version in last_versions_by_subset_id.items(): + version_id = last_version["_id"] + subset_id_by_version_id[version_id] = subset_id + + repre_docs = io.find( + { + "type": "representation", + "parent": {"$in": list(subset_id_by_version_id.keys())} + }, + { + "name": 1, + "parent": 1 + } + ) + repres_by_asset_id = {} + for repre_doc in repre_docs: + subset_id = subset_id_by_version_id[repre_doc["parent"]] + asset_id = subset_docs_by_id[subset_id]["parent"] + if asset_id not in repres_by_asset_id: + repres_by_asset_id[asset_id] = set() + repres_by_asset_id[asset_id].add(repre_doc["name"]) + + for repre_doc in self.content_repres.values(): + version_doc = self.content_versions[repre_doc["parent"]] + subset_doc = self.content_subsets[version_doc["parent"]] + asset_id = subset_doc["parent"] + repre_names = ( + repres_by_asset_id.get(asset_id) or [] + ) + if repre_doc["name"] not in repre_names: + validation_state.repre_ok = False + break + + def _on_current_asset(self): + # Set initial asset as current. + asset_name = io.Session["AVALON_ASSET"] + index = self._assets_box.findText( + asset_name, QtCore.Qt.MatchFixedString + ) + if index >= 0: + print("Setting asset to {}".format(asset_name)) + self._assets_box.setCurrentIndex(index) + + def _on_accept(self): + self._trigger_switch() + + def _trigger_switch(self, loader=None): + # Use None when not a valid value or when placeholder value + selected_asset = self._assets_box.get_valid_value() + selected_subset = self._subsets_box.get_valid_value() + selected_representation = self._representations_box.get_valid_value() + + if selected_asset: + asset_doc = io.find_one({"type": "asset", "name": selected_asset}) + asset_docs_by_id = {asset_doc["_id"]: asset_doc} + else: + asset_docs_by_id = self.content_assets + + asset_docs_by_name = { + asset_doc["name"]: asset_doc + for asset_doc in asset_docs_by_id.values() + } + + asset_ids = list(asset_docs_by_id.keys()) + + subset_query = { + "type": "subset", + "parent": {"$in": asset_ids} + } + if selected_subset: + subset_query["name"] = selected_subset + + subset_docs = list(io.find(subset_query)) + subset_ids = [] + subset_docs_by_parent_and_name = collections.defaultdict(dict) + for subset in subset_docs: + subset_ids.append(subset["_id"]) + parent_id = subset["parent"] + name = subset["name"] + subset_docs_by_parent_and_name[parent_id][name] = subset + + # versions + version_docs = list(io.find({ + "type": "version", + "parent": {"$in": subset_ids} + }, sort=[("name", -1)])) + + hero_version_docs = list(io.find({ + "type": "hero_version", + "parent": {"$in": subset_ids} + })) + + version_ids = list() + + version_docs_by_parent_id = {} + for version_doc in version_docs: + parent_id = version_doc["parent"] + if parent_id not in version_docs_by_parent_id: + version_ids.append(version_doc["_id"]) + version_docs_by_parent_id[parent_id] = version_doc + + hero_version_docs_by_parent_id = {} + for hero_version_doc in hero_version_docs: + version_ids.append(hero_version_doc["_id"]) + parent_id = hero_version_doc["parent"] + hero_version_docs_by_parent_id[parent_id] = hero_version_doc + + repre_docs = io.find({ + "type": "representation", + "parent": {"$in": version_ids} + }) + repre_docs_by_parent_id_by_name = collections.defaultdict(dict) + for repre_doc in repre_docs: + parent_id = repre_doc["parent"] + name = repre_doc["name"] + repre_docs_by_parent_id_by_name[parent_id][name] = repre_doc + + for container in self._items: + container_repre_id = io.ObjectId(container["representation"]) + container_repre = self.content_repres[container_repre_id] + container_repre_name = container_repre["name"] + + container_version_id = container_repre["parent"] + container_version = self.content_versions[container_version_id] + + container_subset_id = container_version["parent"] + container_subset = self.content_subsets[container_subset_id] + container_subset_name = container_subset["name"] + + container_asset_id = container_subset["parent"] + container_asset = self.content_assets[container_asset_id] + container_asset_name = container_asset["name"] + + if selected_asset: + asset_doc = asset_docs_by_name[selected_asset] + else: + asset_doc = asset_docs_by_name[container_asset_name] + + subsets_by_name = subset_docs_by_parent_and_name[asset_doc["_id"]] + if selected_subset: + subset_doc = subsets_by_name[selected_subset] + else: + subset_doc = subsets_by_name[container_subset_name] + + repre_doc = None + subset_id = subset_doc["_id"] + if container_version["type"] == "hero_version": + hero_version = hero_version_docs_by_parent_id.get( + subset_id + ) + if hero_version: + _repres = repre_docs_by_parent_id_by_name.get( + hero_version["_id"] + ) + if selected_representation: + repre_doc = _repres.get(selected_representation) + else: + repre_doc = _repres.get(container_repre_name) + + if not repre_doc: + version_doc = version_docs_by_parent_id[subset_id] + version_id = version_doc["_id"] + repres_by_name = repre_docs_by_parent_id_by_name[version_id] + if selected_representation: + repre_doc = repres_by_name[selected_representation] + else: + repre_doc = repres_by_name[container_repre_name] + + try: + api.switch(container, repre_doc, loader) + except Exception: + msg = ( + "Couldn't switch asset." + "See traceback for more information." + ) + log.warning(msg, exc_info=True) + dialog = QtWidgets.QMessageBox(self) + dialog.setWindowTitle("Switch asset failed") + dialog.setText( + "Switch asset failed. Search console log for more details" + ) + dialog.exec_() + + self.switched.emit() + + self.close() diff --git a/openpype/tools/sceneinventory/view.py b/openpype/tools/sceneinventory/view.py new file mode 100644 index 0000000000..80f26a881d --- /dev/null +++ b/openpype/tools/sceneinventory/view.py @@ -0,0 +1,794 @@ +import collections +import logging +from functools import partial + +from Qt import QtWidgets, QtCore + +from avalon import io, api, style +from avalon.vendor import qtawesome +from avalon.lib import HeroVersionType +from avalon.tools import lib as tools_lib + +from openpype.modules import ModulesManager + +from .switch_dialog import SwitchAssetDialog +from .model import InventoryModel + + +DEFAULT_COLOR = "#fb9c15" + +log = logging.getLogger("SceneInventory") + + +class SceneInvetoryView(QtWidgets.QTreeView): + data_changed = QtCore.Signal() + hierarchy_view_changed = QtCore.Signal(bool) + + def __init__(self, parent=None): + super(SceneInvetoryView, self).__init__(parent=parent) + + # view settings + self.setIndentation(12) + self.setAlternatingRowColors(True) + self.setSortingEnabled(True) + self.setSelectionMode(self.ExtendedSelection) + self.setContextMenuPolicy(QtCore.Qt.CustomContextMenu) + self.customContextMenuRequested.connect(self._show_right_mouse_menu) + self._hierarchy_view = False + self._selected = None + + manager = ModulesManager() + self.sync_server = manager.modules_by_name["sync_server"] + self.sync_enabled = self.sync_server.enabled + + def _set_hierarchy_view(self, enabled): + if enabled == self._hierarchy_view: + return + self._hierarchy_view = enabled + self.hierarchy_view_changed.emit(enabled) + + def _enter_hierarchy(self, items): + self._selected = set(i["objectName"] for i in items) + self._set_hierarchy_view(True) + self.data_changed.emit() + self.expandToDepth(1) + self.setStyleSheet(""" + QTreeView { + border-color: #fb9c15; + } + """) + + def _leave_hierarchy(self): + self._set_hierarchy_view(False) + self.data_changed.emit() + self.setStyleSheet("QTreeView {}") + + def _build_item_menu_for_selection(self, items, menu): + if not items: + return + + repre_ids = [] + for item in items: + item_id = io.ObjectId(item["representation"]) + if item_id not in repre_ids: + repre_ids.append(item_id) + + repre_docs = io.find( + { + "type": "representation", + "_id": {"$in": repre_ids} + }, + {"parent": 1} + ) + + version_ids = [] + for repre_doc in repre_docs: + version_id = repre_doc["parent"] + if version_id not in version_ids: + version_ids.append(version_id) + + loaded_versions = io.find({ + "_id": {"$in": version_ids}, + "type": {"$in": ["version", "hero_version"]} + }) + + loaded_hero_versions = [] + versions_by_parent_id = collections.defaultdict(list) + version_parents = [] + for version in loaded_versions: + if version["type"] == "hero_version": + loaded_hero_versions.append(version) + else: + parent_id = version["parent"] + versions_by_parent_id[parent_id].append(version) + if parent_id not in version_parents: + version_parents.append(parent_id) + + all_versions = io.find({ + "type": {"$in": ["hero_version", "version"]}, + "parent": {"$in": version_parents} + }) + hero_versions = [] + versions = [] + for version in all_versions: + if version["type"] == "hero_version": + hero_versions.append(version) + else: + versions.append(version) + + has_loaded_hero_versions = len(loaded_hero_versions) > 0 + has_available_hero_version = len(hero_versions) > 0 + has_outdated = False + + for version in versions: + parent_id = version["parent"] + current_versions = versions_by_parent_id[parent_id] + for current_version in current_versions: + if current_version["name"] < version["name"]: + has_outdated = True + break + + if has_outdated: + break + + switch_to_versioned = None + if has_loaded_hero_versions: + def _on_switch_to_versioned(items): + repre_ids = [] + for item in items: + item_id = io.ObjectId(item["representation"]) + if item_id not in repre_ids: + repre_ids.append(item_id) + + repre_docs = io.find( + { + "type": "representation", + "_id": {"$in": repre_ids} + }, + {"parent": 1} + ) + + version_ids = [] + version_id_by_repre_id = {} + for repre_doc in repre_docs: + version_id = repre_doc["parent"] + version_id_by_repre_id[repre_doc["_id"]] = version_id + if version_id not in version_ids: + version_ids.append(version_id) + hero_versions = io.find( + { + "_id": {"$in": version_ids}, + "type": "hero_version" + }, + {"version_id": 1} + ) + version_ids = set() + for hero_version in hero_versions: + version_id = hero_version["version_id"] + version_ids.add(version_id) + hero_version_id = hero_version["_id"] + for _repre_id, current_version_id in ( + version_id_by_repre_id.items() + ): + if current_version_id == hero_version_id: + version_id_by_repre_id[_repre_id] = version_id + + version_docs = io.find( + { + "_id": {"$in": list(version_ids)}, + "type": "version" + }, + {"name": 1} + ) + version_name_by_id = {} + for version_doc in version_docs: + version_name_by_id[version_doc["_id"]] = \ + version_doc["name"] + + for item in items: + repre_id = io.ObjectId(item["representation"]) + version_id = version_id_by_repre_id.get(repre_id) + version_name = version_name_by_id.get(version_id) + if version_name is not None: + try: + api.update(item, version_name) + except AssertionError: + self._show_version_error_dialog( + version_name, [item] + ) + log.warning("Update failed", exc_info=True) + + self.data_changed.emit() + + update_icon = qtawesome.icon( + "fa.asterisk", + color=DEFAULT_COLOR + ) + switch_to_versioned = QtWidgets.QAction( + update_icon, + "Switch to versioned", + menu + ) + switch_to_versioned.triggered.connect( + lambda: _on_switch_to_versioned(items) + ) + + update_to_latest_action = None + if has_outdated or has_loaded_hero_versions: + # update to latest version + def _on_update_to_latest(items): + for item in items: + try: + api.update(item, -1) + except AssertionError: + self._show_version_error_dialog(None, [item]) + log.warning("Update failed", exc_info=True) + self.data_changed.emit() + + update_icon = qtawesome.icon( + "fa.angle-double-up", + color=DEFAULT_COLOR + ) + update_to_latest_action = QtWidgets.QAction( + update_icon, + "Update to latest", + menu + ) + update_to_latest_action.triggered.connect( + lambda: _on_update_to_latest(items) + ) + + change_to_hero = None + if has_available_hero_version: + # change to hero version + def _on_update_to_hero(items): + for item in items: + try: + api.update(item, HeroVersionType(-1)) + except AssertionError: + self._show_version_error_dialog('hero', [item]) + log.warning("Update failed", exc_info=True) + self.data_changed.emit() + + # TODO change icon + change_icon = qtawesome.icon( + "fa.asterisk", + color="#00b359" + ) + change_to_hero = QtWidgets.QAction( + change_icon, + "Change to hero", + menu + ) + change_to_hero.triggered.connect( + lambda: _on_update_to_hero(items) + ) + + # set version + set_version_icon = qtawesome.icon("fa.hashtag", color=DEFAULT_COLOR) + set_version_action = QtWidgets.QAction( + set_version_icon, + "Set version", + menu + ) + set_version_action.triggered.connect( + lambda: self._show_version_dialog(items)) + + # switch asset + switch_asset_icon = qtawesome.icon("fa.sitemap", color=DEFAULT_COLOR) + switch_asset_action = QtWidgets.QAction( + switch_asset_icon, + "Switch Asset", + menu + ) + switch_asset_action.triggered.connect( + lambda: self._show_switch_dialog(items)) + + # remove + remove_icon = qtawesome.icon("fa.remove", color=DEFAULT_COLOR) + remove_action = QtWidgets.QAction(remove_icon, "Remove items", menu) + remove_action.triggered.connect( + lambda: self._show_remove_warning_dialog(items)) + + # add the actions + if switch_to_versioned: + menu.addAction(switch_to_versioned) + + if update_to_latest_action: + menu.addAction(update_to_latest_action) + + if change_to_hero: + menu.addAction(change_to_hero) + + menu.addAction(set_version_action) + menu.addAction(switch_asset_action) + + menu.addSeparator() + + menu.addAction(remove_action) + + self._handle_sync_server(menu, repre_ids) + + def _handle_sync_server(self, menu, repre_ids): + """ + Adds actions for download/upload when SyncServer is enabled + + Args: + menu (OptionMenu) + repre_ids (list) of object_ids + Returns: + (OptionMenu) + """ + if not self.sync_enabled: + return + + menu.addSeparator() + + download_icon = qtawesome.icon("fa.download", color=DEFAULT_COLOR) + download_active_action = QtWidgets.QAction( + download_icon, + "Download", + menu + ) + download_active_action.triggered.connect( + lambda: self._add_sites(repre_ids, 'active_site')) + + upload_icon = qtawesome.icon("fa.upload", color=DEFAULT_COLOR) + upload_remote_action = QtWidgets.QAction( + upload_icon, + "Upload", + menu + ) + upload_remote_action.triggered.connect( + lambda: self._add_sites(repre_ids, 'remote_site')) + + menu.addAction(download_active_action) + menu.addAction(upload_remote_action) + + def _add_sites(self, repre_ids, side): + """ + (Re)sync all 'repre_ids' to specific site. + + It checks if opposite site has fully available content to limit + accidents. (ReSync active when no remote >> losing active content) + + Args: + repre_ids (list) + side (str): 'active_site'|'remote_site' + """ + project_name = io.Session["AVALON_PROJECT"] + active_site = self.sync_server.get_active_site(project_name) + remote_site = self.sync_server.get_remote_site(project_name) + + repre_docs = io.find({ + "type": "representation", + "_id": {"$in": repre_ids} + }) + repre_docs_by_id = { + repre_doc["_id"]: repre_doc + for repre_doc in repre_docs + } + for repre_id in repre_ids: + repre_doc = repre_docs_by_id.get(repre_id) + if not repre_doc: + continue + + progress = tools_lib.get_progress_for_repre( + repre_doc, + active_site, + remote_site + ) + if side == "active_site": + # check opposite from added site, must be 1 or unable to sync + check_progress = progress[remote_site] + site = active_site + else: + check_progress = progress[active_site] + site = remote_site + + if check_progress == 1: + self.sync_server.add_site( + project_name, repre_id, site, force=True + ) + + self.data_changed.emit() + + def _build_item_menu(self, items=None): + """Create menu for the selected items""" + + if not items: + items = [] + + menu = QtWidgets.QMenu(self) + + # add the actions + self._build_item_menu_for_selection(items, menu) + + # These two actions should be able to work without selection + # expand all items + expandall_action = QtWidgets.QAction(menu, text="Expand all items") + expandall_action.triggered.connect(self.expandAll) + + # collapse all items + collapse_action = QtWidgets.QAction(menu, text="Collapse all items") + collapse_action.triggered.connect(self.collapseAll) + + menu.addAction(expandall_action) + menu.addAction(collapse_action) + + custom_actions = self._get_custom_actions(containers=items) + if custom_actions: + submenu = QtWidgets.QMenu("Actions", self) + for action in custom_actions: + color = action.color or DEFAULT_COLOR + icon = qtawesome.icon("fa.%s" % action.icon, color=color) + action_item = QtWidgets.QAction(icon, action.label, submenu) + action_item.triggered.connect( + partial(self._process_custom_action, action, items)) + + submenu.addAction(action_item) + + menu.addMenu(submenu) + + # go back to flat view + if self._hierarchy_view: + back_to_flat_icon = qtawesome.icon("fa.list", color=DEFAULT_COLOR) + back_to_flat_action = QtWidgets.QAction( + back_to_flat_icon, + "Back to Full-View", + menu + ) + back_to_flat_action.triggered.connect(self._leave_hierarchy) + + # send items to hierarchy view + enter_hierarchy_icon = qtawesome.icon("fa.indent", color="#d8d8d8") + enter_hierarchy_action = QtWidgets.QAction( + enter_hierarchy_icon, + "Cherry-Pick (Hierarchy)", + menu + ) + enter_hierarchy_action.triggered.connect( + lambda: self._enter_hierarchy(items)) + + if items: + menu.addAction(enter_hierarchy_action) + + if self._hierarchy_view: + menu.addAction(back_to_flat_action) + + return menu + + def _get_custom_actions(self, containers): + """Get the registered Inventory Actions + + Args: + containers(list): collection of containers + + Returns: + list: collection of filter and initialized actions + """ + + def sorter(Plugin): + """Sort based on order attribute of the plugin""" + return Plugin.order + + # Fedd an empty dict if no selection, this will ensure the compat + # lookup always work, so plugin can interact with Scene Inventory + # reversely. + containers = containers or [dict()] + + # Check which action will be available in the menu + Plugins = api.discover(api.InventoryAction) + compatible = [p() for p in Plugins if + any(p.is_compatible(c) for c in containers)] + + return sorted(compatible, key=sorter) + + def _process_custom_action(self, action, containers): + """Run action and if results are returned positive update the view + + If the result is list or dict, will select view items by the result. + + Args: + action (InventoryAction): Inventory Action instance + containers (list): Data of currently selected items + + Returns: + None + """ + + result = action.process(containers) + if result: + self.data_changed.emit() + + if isinstance(result, (list, set)): + self._select_items_by_action(result) + + if isinstance(result, dict): + self._select_items_by_action( + result["objectNames"], result["options"] + ) + + def _select_items_by_action(self, object_names, options=None): + """Select view items by the result of action + + Args: + object_names (list or set): A list/set of container object name + options (dict): GUI operation options. + + Returns: + None + + """ + options = options or dict() + + if options.get("clear", True): + self.clearSelection() + + object_names = set(object_names) + if ( + self._hierarchy_view + and not self._selected.issuperset(object_names) + ): + # If any container not in current cherry-picked view, update + # view before selecting them. + self._selected.update(object_names) + self.data_changed.emit() + + model = self.model() + selection_model = self.selectionModel() + + select_mode = { + "select": selection_model.Select, + "deselect": selection_model.Deselect, + "toggle": selection_model.Toggle, + }[options.get("mode", "select")] + + for item in tools_lib.iter_model_rows(model, 0): + item = item.data(InventoryModel.ItemRole) + if item.get("isGroupNode"): + continue + + name = item.get("objectName") + if name in object_names: + self.scrollTo(item) # Ensure item is visible + flags = select_mode | selection_model.Rows + selection_model.select(item, flags) + + object_names.remove(name) + + if len(object_names) == 0: + break + + def _show_right_mouse_menu(self, pos): + """Display the menu when at the position of the item clicked""" + + globalpos = self.viewport().mapToGlobal(pos) + + if not self.selectionModel().hasSelection(): + print("No selection") + # Build menu without selection, feed an empty list + menu = self._build_item_menu() + menu.exec_(globalpos) + return + + active = self.currentIndex() # index under mouse + active = active.sibling(active.row(), 0) # get first column + + # move index under mouse + indices = self.get_indices() + if active in indices: + indices.remove(active) + + indices.append(active) + + # Extend to the sub-items + all_indices = self._extend_to_children(indices) + items = [dict(i.data(InventoryModel.ItemRole)) for i in all_indices + if i.parent().isValid()] + + if self._hierarchy_view: + # Ensure no group item + items = [n for n in items if not n.get("isGroupNode")] + + menu = self._build_item_menu(items) + menu.exec_(globalpos) + + def get_indices(self): + """Get the selected rows""" + selection_model = self.selectionModel() + return selection_model.selectedRows() + + def _extend_to_children(self, indices): + """Extend the indices to the children indices. + + Top-level indices are extended to its children indices. Sub-items + are kept as is. + + Args: + indices (list): The indices to extend. + + Returns: + list: The children indices + + """ + def get_children(i): + model = i.model() + rows = model.rowCount(parent=i) + for row in range(rows): + child = model.index(row, 0, parent=i) + yield child + + subitems = set() + for i in indices: + valid_parent = i.parent().isValid() + if valid_parent and i not in subitems: + subitems.add(i) + + if self._hierarchy_view: + # Assume this is a group item + for child in get_children(i): + subitems.add(child) + else: + # is top level item + for child in get_children(i): + subitems.add(child) + + return list(subitems) + + def _show_version_dialog(self, items): + """Create a dialog with the available versions for the selected file + + Args: + items (list): list of items to run the "set_version" for + + Returns: + None + """ + + active = items[-1] + + # Get available versions for active representation + representation_id = io.ObjectId(active["representation"]) + representation = io.find_one({"_id": representation_id}) + version = io.find_one({ + "_id": representation["parent"] + }) + + versions = list(io.find( + { + "parent": version["parent"], + "type": "version" + }, + sort=[("name", 1)] + )) + + hero_version = io.find_one({ + "parent": version["parent"], + "type": "hero_version" + }) + if hero_version: + _version_id = hero_version["version_id"] + for _version in versions: + if _version["_id"] != _version_id: + continue + + hero_version["name"] = HeroVersionType( + _version["name"] + ) + hero_version["data"] = _version["data"] + break + + # Get index among the listed versions + current_item = None + current_version = active["version"] + if isinstance(current_version, HeroVersionType): + current_item = hero_version + else: + for version in versions: + if version["name"] == current_version: + current_item = version + break + + all_versions = [] + if hero_version: + all_versions.append(hero_version) + all_versions.extend(reversed(versions)) + + if current_item: + index = all_versions.index(current_item) + else: + index = 0 + + versions_by_label = dict() + labels = [] + for version in all_versions: + is_hero = version["type"] == "hero_version" + label = tools_lib.format_version(version["name"], is_hero) + labels.append(label) + versions_by_label[label] = version["name"] + + label, state = QtWidgets.QInputDialog.getItem( + self, + "Set version..", + "Set version number to", + labels, + current=index, + editable=False + ) + if not state: + return + + if label: + version = versions_by_label[label] + for item in items: + try: + api.update(item, version) + except AssertionError: + self._show_version_error_dialog(version, [item]) + log.warning("Update failed", exc_info=True) + # refresh model when done + self.data_changed.emit() + + def _show_switch_dialog(self, items): + """Display Switch dialog""" + dialog = SwitchAssetDialog(self, items) + dialog.switched.connect(self.data_changed.emit) + dialog.show() + + def _show_remove_warning_dialog(self, items): + """Prompt a dialog to inform the user the action will remove items""" + + accept = QtWidgets.QMessageBox.Ok + buttons = accept | QtWidgets.QMessageBox.Cancel + + state = QtWidgets.QMessageBox.question( + self, + "Are you sure?", + "Are you sure you want to remove {} item(s)".format(len(items)), + buttons=buttons, + defaultButton=accept + ) + + if state != accept: + return + + for item in items: + api.remove(item) + self.data_changed.emit() + + def _show_version_error_dialog(self, version, items): + """Shows QMessageBox when version switch doesn't work + + Args: + version: str or int or None + """ + if not version: + version_str = "latest" + elif version == "hero": + version_str = "hero" + elif isinstance(version, int): + version_str = "v{:03d}".format(version) + else: + version_str = version + + dialog = QtWidgets.QMessageBox() + dialog.setIcon(QtWidgets.QMessageBox.Warning) + dialog.setStyleSheet(style.load_stylesheet()) + dialog.setWindowTitle("Update failed") + + switch_btn = dialog.addButton( + "Switch Asset", + QtWidgets.QMessageBox.ActionRole + ) + switch_btn.clicked.connect(lambda: self._show_switch_dialog(items)) + + dialog.addButton(QtWidgets.QMessageBox.Cancel) + + msg = ( + "Version update to '{}' failed as representation doesn't exist." + "\n\nPlease update to version with a valid representation" + " OR \n use 'Switch Asset' button to change asset." + ).format(version_str) + dialog.setText(msg) + dialog.exec_() diff --git a/openpype/tools/sceneinventory/widgets.py b/openpype/tools/sceneinventory/widgets.py new file mode 100644 index 0000000000..4c4aafad3a --- /dev/null +++ b/openpype/tools/sceneinventory/widgets.py @@ -0,0 +1,93 @@ +from Qt import QtWidgets, QtCore +from openpype import style + + +class ButtonWithMenu(QtWidgets.QToolButton): + def __init__(self, parent=None): + super(ButtonWithMenu, self).__init__(parent) + + self.setObjectName("ButtonWithMenu") + + self.setPopupMode(self.MenuButtonPopup) + menu = QtWidgets.QMenu(self) + + self.setMenu(menu) + + self._menu = menu + self._actions = [] + + def menu(self): + return self._menu + + def clear_actions(self): + if self._menu is not None: + self._menu.clear() + self._actions = [] + + def add_action(self, action): + self._actions.append(action) + self._menu.addAction(action) + + def _on_action_trigger(self): + action = self.sender() + if action not in self._actions: + return + action.trigger() + + +class SearchComboBox(QtWidgets.QComboBox): + """Searchable ComboBox with empty placeholder value as first value""" + + def __init__(self, parent): + super(SearchComboBox, self).__init__(parent) + + self.setEditable(True) + self.setInsertPolicy(self.NoInsert) + + combobox_delegate = QtWidgets.QStyledItemDelegate(self) + self.setItemDelegate(combobox_delegate) + + completer = self.completer() + completer.setCompletionMode( + QtWidgets.QCompleter.PopupCompletion + ) + completer.setCaseSensitivity(QtCore.Qt.CaseInsensitive) + + completer_view = completer.popup() + completer_view.setObjectName("CompleterView") + completer_delegate = QtWidgets.QStyledItemDelegate(completer_view) + completer_view.setItemDelegate(completer_delegate) + completer_view.setStyleSheet(style.load_stylesheet()) + + self._combobox_delegate = combobox_delegate + + self._completer_delegate = completer_delegate + self._completer = completer + + def set_placeholder(self, placeholder): + self.lineEdit().setPlaceholderText(placeholder) + + def populate(self, items): + self.clear() + self.addItems([""]) # ensure first item is placeholder + self.addItems(items) + + def get_valid_value(self): + """Return the current text if it's a valid value else None + + Note: The empty placeholder value is valid and returns as "" + + """ + + text = self.currentText() + lookup = set(self.itemText(i) for i in range(self.count())) + if text not in lookup: + return None + + return text or None + + def set_valid_value(self, value): + """Try to locate 'value' and pre-select it in dropdown.""" + index = self.findText(value) + if index > -1: + self.setCurrentIndex(index) diff --git a/openpype/tools/sceneinventory/window.py b/openpype/tools/sceneinventory/window.py new file mode 100644 index 0000000000..e71af6a93d --- /dev/null +++ b/openpype/tools/sceneinventory/window.py @@ -0,0 +1,203 @@ +import os +import sys + +from Qt import QtWidgets, QtCore +from avalon.vendor import qtawesome +from avalon import io, api + +from openpype import style +from openpype.tools.utils.delegates import VersionDelegate +from openpype.tools.utils.lib import ( + qt_app_context, + preserve_expanded_rows, + preserve_selection, + FamilyConfigCache +) + +from .model import ( + InventoryModel, + FilterProxyModel +) +from .view import SceneInvetoryView + + +module = sys.modules[__name__] +module.window = None + + +class SceneInventoryWindow(QtWidgets.QDialog): + """Scene Inventory window""" + + def __init__(self, parent=None): + super(SceneInventoryWindow, self).__init__(parent) + + if not parent: + self.setWindowFlags( + self.windowFlags() | QtCore.Qt.WindowStaysOnTopHint + ) + + project_name = os.getenv("AVALON_PROJECT") or "" + self.setWindowTitle("Scene Inventory 1.0 - {}".format(project_name)) + self.setObjectName("SceneInventory") + # Maya only property + self.setProperty("saveWindowPref", True) + + self.resize(1100, 480) + + # region control + filter_label = QtWidgets.QLabel("Search", self) + text_filter = QtWidgets.QLineEdit(self) + + outdated_only_checkbox = QtWidgets.QCheckBox( + "Filter to outdated", self + ) + outdated_only_checkbox.setToolTip("Show outdated files only") + outdated_only_checkbox.setChecked(False) + + icon = qtawesome.icon("fa.refresh", color="white") + refresh_button = QtWidgets.QPushButton(self) + refresh_button.setIcon(icon) + + control_layout = QtWidgets.QHBoxLayout() + control_layout.addWidget(filter_label) + control_layout.addWidget(text_filter) + control_layout.addWidget(outdated_only_checkbox) + control_layout.addWidget(refresh_button) + + # endregion control + family_config_cache = FamilyConfigCache(io) + + model = InventoryModel(family_config_cache) + proxy = FilterProxyModel() + proxy.setSourceModel(model) + proxy.setDynamicSortFilter(True) + proxy.setFilterCaseSensitivity(QtCore.Qt.CaseInsensitive) + + view = SceneInvetoryView(self) + view.setModel(proxy) + + # set some nice default widths for the view + view.setColumnWidth(0, 250) # name + view.setColumnWidth(1, 55) # version + view.setColumnWidth(2, 55) # count + view.setColumnWidth(3, 150) # family + view.setColumnWidth(4, 100) # namespace + + # apply delegates + version_delegate = VersionDelegate(io, self) + column = model.Columns.index("version") + view.setItemDelegateForColumn(column, version_delegate) + + layout = QtWidgets.QVBoxLayout(self) + layout.addLayout(control_layout) + layout.addWidget(view) + + # signals + text_filter.textChanged.connect(self._on_text_filter_change) + outdated_only_checkbox.stateChanged.connect( + self._on_outdated_state_change + ) + view.hierarchy_view_changed.connect( + self._on_hiearchy_view_change + ) + view.data_changed.connect(self.refresh) + refresh_button.clicked.connect(self.refresh) + + self._outdated_only_checkbox = outdated_only_checkbox + self._view = view + self._model = model + self._proxy = proxy + self._version_delegate = version_delegate + self._family_config_cache = family_config_cache + + self._first_show = True + + family_config_cache.refresh() + + def showEvent(self, event): + super(SceneInventoryWindow, self).showEvent(event) + if self._first_show: + self._first_show = False + self.setStyleSheet(style.load_stylesheet()) + + def keyPressEvent(self, event): + """Custom keyPressEvent. + + Override keyPressEvent to do nothing so that Maya's panels won't + take focus when pressing "SHIFT" whilst mouse is over viewport or + outliner. This way users don't accidently perform Maya commands + whilst trying to name an instance. + + """ + + def refresh(self, items=None): + with preserve_expanded_rows( + tree_view=self._view, + role=self._model.UniqueRole + ): + with preserve_selection( + tree_view=self._view, + role=self._model.UniqueRole, + current_index=False + ): + kwargs = {"items": items} + # TODO do not touch view's inner attribute + if self._view._hierarchy_view: + kwargs["selected"] = self._view._selected + self._model.refresh(**kwargs) + + def _on_hiearchy_view_change(self, enabled): + self._proxy.set_hierarchy_view(enabled) + self._model.set_hierarchy_view(enabled) + + def _on_text_filter_change(self, text_filter): + self._proxy.setFilterRegExp(text_filter) + + def _on_outdated_state_change(self): + self._proxy.set_filter_outdated( + self._outdated_only_checkbox.isChecked() + ) + + +def show(root=None, debug=False, parent=None, items=None): + """Display Scene Inventory GUI + + Arguments: + debug (bool, optional): Run in debug-mode, + defaults to False + parent (QtCore.QObject, optional): When provided parent the interface + to this QObject. + items (list) of dictionaries - for injection of items for standalone + testing + + """ + + try: + module.window.close() + del module.window + except (RuntimeError, AttributeError): + pass + + if debug is True: + io.install() + + if not os.environ.get("AVALON_PROJECT"): + any_project = next( + project for project in io.projects() + if project.get("active", True) is not False + ) + + api.Session["AVALON_PROJECT"] = any_project["name"] + else: + api.Session["AVALON_PROJECT"] = os.environ.get("AVALON_PROJECT") + + with qt_app_context(): + window = SceneInventoryWindow(parent) + window.show() + window.refresh(items=items) + + module.window = window + + # Pull window to the front. + module.window.raise_() + module.window.activateWindow() diff --git a/openpype/tools/settings/__init__.py b/openpype/tools/settings/__init__.py index a156228dc1..3e77a8348a 100644 --- a/openpype/tools/settings/__init__.py +++ b/openpype/tools/settings/__init__.py @@ -1,12 +1,13 @@ import sys from Qt import QtWidgets, QtGui + +from openpype import style from .lib import ( BTN_FIXED_SIZE, CHILD_OFFSET ) from .local_settings import LocalSettingsWindow from .settings import ( - style, MainWidget, ProjectListWidget ) @@ -36,8 +37,6 @@ __all__ = ( "BTN_FIXED_SIZE", "CHILD_OFFSET", - "style", - "MainWidget", "ProjectListWidget", "LocalSettingsWindow", diff --git a/openpype/tools/settings/local_settings/apps_widget.py b/openpype/tools/settings/local_settings/apps_widget.py index e6a4132955..850e009937 100644 --- a/openpype/tools/settings/local_settings/apps_widget.py +++ b/openpype/tools/settings/local_settings/apps_widget.py @@ -172,7 +172,9 @@ class LocalApplicationsWidgets(QtWidgets.QWidget): def _reset_app_widgets(self): while self.content_layout.count() > 0: item = self.content_layout.itemAt(0) - item.widget().hide() + widget = item.widget() + if widget is not None: + widget.setVisible(False) self.content_layout.removeItem(item) self.widgets_by_group_name.clear() diff --git a/openpype/tools/settings/local_settings/projects_widget.py b/openpype/tools/settings/local_settings/projects_widget.py index 9cd3b9a38e..7e2ad661a0 100644 --- a/openpype/tools/settings/local_settings/projects_widget.py +++ b/openpype/tools/settings/local_settings/projects_widget.py @@ -6,10 +6,7 @@ from openpype.settings.constants import ( PROJECT_ANATOMY_KEY, DEFAULT_PROJECT_KEY ) -from .widgets import ( - SpacerWidget, - ProxyLabelWidget -) +from .widgets import ProxyLabelWidget from .constants import ( LABEL_REMOVE_DEFAULT, LABEL_ADD_DEFAULT, @@ -238,9 +235,9 @@ class SitesWidget(QtWidgets.QWidget): comboboxes_layout = QtWidgets.QHBoxLayout(comboboxes_widget) comboboxes_layout.setContentsMargins(0, 0, 0, 0) - comboboxes_layout.addWidget(active_site_widget) - comboboxes_layout.addWidget(remote_site_widget) - comboboxes_layout.addWidget(SpacerWidget(comboboxes_widget), 1) + comboboxes_layout.addWidget(active_site_widget, 0) + comboboxes_layout.addWidget(remote_site_widget, 0) + comboboxes_layout.addStretch(1) content_widget = QtWidgets.QWidget(self) content_layout = QtWidgets.QVBoxLayout(content_widget) @@ -259,7 +256,9 @@ class SitesWidget(QtWidgets.QWidget): def _clear_widgets(self): while self.content_layout.count(): item = self.content_layout.itemAt(0) - item.widget().hide() + widget = item.widget() + if widget is not None: + widget.setVisible(False) self.content_layout.removeItem(item) self.input_objects = {} @@ -383,7 +382,7 @@ class SitesWidget(QtWidgets.QWidget): self.input_objects[site_name] = site_input_objects # Add spacer so other widgets are squeezed to top - self.content_layout.addWidget(SpacerWidget(self), 1) + self.content_layout.addStretch(1) def _on_input_value_change(self, site_name, key): if ( @@ -456,6 +455,8 @@ class _SiteCombobox(QtWidgets.QWidget): self ) combobox_input = QtWidgets.QComboBox(self) + combobox_delegate = QtWidgets.QStyledItemDelegate() + combobox_input.setItemDelegate(combobox_delegate) main_layout = QtWidgets.QHBoxLayout(self) main_layout.addWidget(label_widget) @@ -464,6 +465,7 @@ class _SiteCombobox(QtWidgets.QWidget): combobox_input.currentIndexChanged.connect(self._on_index_change) self.label_widget = label_widget self.combobox_input = combobox_input + self._combobox_delegate = combobox_delegate def _set_current_text(self, text): index = None @@ -777,7 +779,7 @@ class RootSiteWidget(QtWidgets.QWidget): main_layout = QtWidgets.QVBoxLayout(self) main_layout.addWidget(sites_widget) - main_layout.addWidget(SpacerWidget(self), 1) + main_layout.addStretch(1) self.sites_widget = sites_widget diff --git a/openpype/tools/settings/local_settings/widgets.py b/openpype/tools/settings/local_settings/widgets.py index b164f1b407..2733aef187 100644 --- a/openpype/tools/settings/local_settings/widgets.py +++ b/openpype/tools/settings/local_settings/widgets.py @@ -1,7 +1,6 @@ from Qt import QtWidgets, QtCore from openpype.tools.settings.settings.widgets import ( - ExpandingWidget, - SpacerWidget + ExpandingWidget ) @@ -56,7 +55,5 @@ class ProxyLabelWidget(QtWidgets.QWidget): __all__ = ( "ExpandingWidget", - "SpacerWidget", "Separator", - "SpacerWidget" ) diff --git a/openpype/tools/settings/local_settings/window.py b/openpype/tools/settings/local_settings/window.py index f22e397323..a00bc232f4 100644 --- a/openpype/tools/settings/local_settings/window.py +++ b/openpype/tools/settings/local_settings/window.py @@ -1,7 +1,7 @@ import logging from Qt import QtWidgets, QtGui -from ..settings import style +from openpype import style from openpype.settings.lib import ( get_local_settings, @@ -15,7 +15,6 @@ from openpype.api import ( from openpype.modules import ModulesManager from .widgets import ( - SpacerWidget, ExpandingWidget ) from .mongo_widget import OpenPypeMongoWidget @@ -58,8 +57,7 @@ class LocalSettingsWidget(QtWidgets.QWidget): self._create_app_ui() self._create_project_ui() - # Add spacer to main layout - self.main_layout.addWidget(SpacerWidget(self), 1) + self.main_layout.addStretch(1) def _create_pype_mongo_ui(self): pype_mongo_expand_widget = ExpandingWidget("OpenPype Mongo URL", self) @@ -210,7 +208,7 @@ class LocalSettingsWindow(QtWidgets.QWidget): footer_layout = QtWidgets.QHBoxLayout(footer) footer_layout.addWidget(reset_btn, 0) - footer_layout.addWidget(SpacerWidget(footer), 1) + footer_layout.addStretch(1) footer_layout.addWidget(save_btn, 0) main_layout = QtWidgets.QVBoxLayout(self) diff --git a/openpype/tools/settings/settings/__init__.py b/openpype/tools/settings/settings/__init__.py index 6b4cf94357..9eadd456b7 100644 --- a/openpype/tools/settings/settings/__init__.py +++ b/openpype/tools/settings/settings/__init__.py @@ -1,10 +1,8 @@ -from . import style from .window import MainWidget from .widgets import ProjectListWidget __all__ = ( - "style", "MainWidget", "ProjectListWidget" ) diff --git a/openpype/tools/settings/settings/base.py b/openpype/tools/settings/settings/base.py index 92fffe6f9c..f8378ed18c 100644 --- a/openpype/tools/settings/settings/base.py +++ b/openpype/tools/settings/settings/base.py @@ -470,10 +470,9 @@ class GUIWidget(BaseWidget): self.entity_widget.add_widget_to_layout(self) def _create_label_ui(self): - self.setObjectName("LabelWidget") - label = self.entity["label"] label_widget = QtWidgets.QLabel(label, self) + label_widget.setObjectName("SettingsLabel") layout = QtWidgets.QHBoxLayout(self) layout.setContentsMargins(0, 5, 0, 5) @@ -481,7 +480,7 @@ class GUIWidget(BaseWidget): def _create_separator_ui(self): splitter_item = QtWidgets.QWidget(self) - splitter_item.setObjectName("SplitterItem") + splitter_item.setObjectName("Separator") splitter_item.setMinimumHeight(self.separator_height) splitter_item.setMaximumHeight(self.separator_height) @@ -513,10 +512,9 @@ class MockUpWidget(BaseWidget): child_invalid = False def create_ui(self): - self.setObjectName("LabelWidget") - label = "Mockup widget for entity {}".format(self.entity.path) label_widget = QtWidgets.QLabel(label, self) + label_widget.setObjectName("SettingsLabel") layout = QtWidgets.QHBoxLayout(self) layout.setContentsMargins(0, 5, 0, 5) diff --git a/openpype/tools/settings/settings/categories.py b/openpype/tools/settings/settings/categories.py index 5f9051344d..a6e4154b2b 100644 --- a/openpype/tools/settings/settings/categories.py +++ b/openpype/tools/settings/settings/categories.py @@ -391,7 +391,9 @@ class SettingsCategoryWidget(QtWidgets.QWidget): while self.content_layout.count() != 0: widget = self.content_layout.itemAt(0).widget() - widget.hide() + if widget is not None: + widget.setVisible(False) + self.content_layout.removeWidget(widget) widget.deleteLater() diff --git a/openpype/tools/settings/settings/dict_conditional.py b/openpype/tools/settings/settings/dict_conditional.py index 3e3270cac9..2e1617f505 100644 --- a/openpype/tools/settings/settings/dict_conditional.py +++ b/openpype/tools/settings/settings/dict_conditional.py @@ -164,6 +164,7 @@ class DictConditionalWidget(BaseWidget): content_widget.setProperty("show_borders", show_borders) label_widget = QtWidgets.QLabel(self.entity.label) + label_widget.setObjectName("SettingsLabel") content_layout = QtWidgets.QGridLayout(content_widget) content_layout.setContentsMargins(5, 5, 5, 5) diff --git a/openpype/tools/settings/settings/dict_mutable_widget.py b/openpype/tools/settings/settings/dict_mutable_widget.py index 9afce7259e..294711b38a 100644 --- a/openpype/tools/settings/settings/dict_mutable_widget.py +++ b/openpype/tools/settings/settings/dict_mutable_widget.py @@ -3,7 +3,12 @@ from uuid import uuid4 from Qt import QtWidgets, QtCore, QtGui from .base import BaseWidget -from .lib import create_deffered_value_change_timer +from .lib import ( + create_deffered_value_change_timer, + create_add_btn, + create_remove_btn, + create_confirm_btn +) from .widgets import ( ExpandingWidget, IconButton @@ -21,92 +26,6 @@ KEY_INPUT_TOOLTIP = ( ) -class PaintHelper: - cached_icons = {} - - @classmethod - def _draw_image(cls, width, height, brush): - image = QtGui.QPixmap(width, height) - image.fill(QtCore.Qt.transparent) - - icon_path_stroker = QtGui.QPainterPathStroker() - icon_path_stroker.setCapStyle(QtCore.Qt.RoundCap) - icon_path_stroker.setJoinStyle(QtCore.Qt.RoundJoin) - icon_path_stroker.setWidth(height / 5) - - painter = QtGui.QPainter(image) - painter.setPen(QtCore.Qt.transparent) - painter.setBrush(brush) - rect = QtCore.QRect(0, 0, image.width(), image.height()) - fifteenth = rect.height() / 15 - # Left point - p1 = QtCore.QPoint( - rect.x() + (5 * fifteenth), - rect.y() + (9 * fifteenth) - ) - # Middle bottom point - p2 = QtCore.QPoint( - rect.center().x(), - rect.y() + (11 * fifteenth) - ) - # Top right point - p3 = QtCore.QPoint( - rect.x() + (10 * fifteenth), - rect.y() + (5 * fifteenth) - ) - - path = QtGui.QPainterPath(p1) - path.lineTo(p2) - path.lineTo(p3) - - stroked_path = icon_path_stroker.createStroke(path) - painter.drawPath(stroked_path) - - painter.end() - - return image - - @classmethod - def get_confirm_icon(cls, width, height): - key = "{}x{}-confirm_image".format(width, height) - icon = cls.cached_icons.get(key) - - if icon is None: - image = cls._draw_image(width, height, QtCore.Qt.white) - icon = QtGui.QIcon(image) - cls.cached_icons[key] = icon - return icon - - -def create_add_btn(parent): - add_btn = QtWidgets.QPushButton("+", parent) - add_btn.setFocusPolicy(QtCore.Qt.ClickFocus) - add_btn.setProperty("btn-type", "tool-item") - add_btn.setFixedSize(BTN_FIXED_SIZE, BTN_FIXED_SIZE) - return add_btn - - -def create_remove_btn(parent): - remove_btn = QtWidgets.QPushButton("-", parent) - remove_btn.setFocusPolicy(QtCore.Qt.ClickFocus) - remove_btn.setProperty("btn-type", "tool-item") - remove_btn.setFixedSize(BTN_FIXED_SIZE, BTN_FIXED_SIZE) - return remove_btn - - -def create_confirm_btn(parent): - confirm_btn = QtWidgets.QPushButton(parent) - - icon = PaintHelper.get_confirm_icon( - BTN_FIXED_SIZE, BTN_FIXED_SIZE - ) - confirm_btn.setIcon(icon) - confirm_btn.setFocusPolicy(QtCore.Qt.ClickFocus) - confirm_btn.setProperty("btn-type", "tool-item") - confirm_btn.setFixedSize(BTN_FIXED_SIZE, BTN_FIXED_SIZE) - return confirm_btn - - class ModifiableDictEmptyItem(QtWidgets.QWidget): def __init__(self, entity_widget, store_as_list, parent): super(ModifiableDictEmptyItem, self).__init__(parent) @@ -375,7 +294,7 @@ class ModifiableDictItem(QtWidgets.QWidget): "fa.edit", QtCore.Qt.lightGray, QtCore.Qt.white ) edit_btn.setFocusPolicy(QtCore.Qt.ClickFocus) - edit_btn.setProperty("btn-type", "tool-item-icon") + edit_btn.setObjectName("SettingsToolIconBtn") edit_btn.setFixedHeight(BTN_FIXED_SIZE) confirm_btn = create_confirm_btn(self) diff --git a/openpype/tools/settings/settings/images/__init__.py b/openpype/tools/settings/settings/images/__init__.py new file mode 100644 index 0000000000..3ad65e114a --- /dev/null +++ b/openpype/tools/settings/settings/images/__init__.py @@ -0,0 +1,19 @@ +import os +from Qt import QtGui + + +def get_image_path(image_filename): + return os.path.join( + os.path.dirname(os.path.abspath(__file__)), + image_filename + ) + + +def get_image(image_filename): + image_path = get_image_path(image_filename) + return QtGui.QImage(image_path) + + +def get_pixmap(image_filename): + image_path = get_image_path(image_filename) + return QtGui.QPixmap(image_path) diff --git a/openpype/tools/settings/settings/images/add.png b/openpype/tools/settings/settings/images/add.png new file mode 100644 index 0000000000..91ef720d32 Binary files /dev/null and b/openpype/tools/settings/settings/images/add.png differ diff --git a/openpype/tools/settings/settings/images/confirm.png b/openpype/tools/settings/settings/images/confirm.png new file mode 100644 index 0000000000..a0fdc66d3e Binary files /dev/null and b/openpype/tools/settings/settings/images/confirm.png differ diff --git a/openpype/tools/settings/settings/images/down.png b/openpype/tools/settings/settings/images/down.png new file mode 100644 index 0000000000..f78622922f Binary files /dev/null and b/openpype/tools/settings/settings/images/down.png differ diff --git a/openpype/tools/settings/settings/images/mask.png b/openpype/tools/settings/settings/images/mask.png new file mode 100644 index 0000000000..f10f00be2c Binary files /dev/null and b/openpype/tools/settings/settings/images/mask.png differ diff --git a/openpype/tools/settings/settings/images/remove.png b/openpype/tools/settings/settings/images/remove.png new file mode 100644 index 0000000000..79ea6eb973 Binary files /dev/null and b/openpype/tools/settings/settings/images/remove.png differ diff --git a/openpype/tools/settings/settings/images/up.png b/openpype/tools/settings/settings/images/up.png new file mode 100644 index 0000000000..4fccb08fe1 Binary files /dev/null and b/openpype/tools/settings/settings/images/up.png differ diff --git a/openpype/tools/settings/settings/item_widgets.py b/openpype/tools/settings/settings/item_widgets.py index a28bee8d36..2e00967a60 100644 --- a/openpype/tools/settings/settings/item_widgets.py +++ b/openpype/tools/settings/settings/item_widgets.py @@ -7,8 +7,8 @@ from .widgets import ( NumberSpinBox, GridLabelWidget, SettingsComboBox, - NiceCheckbox, SettingsPlainTextEdit, + SettingsNiceCheckbox, SettingsLineEdit ) from .multiselection_combobox import MultiSelectionComboBox @@ -21,6 +21,7 @@ from .base import ( BaseWidget, InputWidget ) + from openpype.widgets.sliders import NiceSlider from openpype.tools.settings import CHILD_OFFSET @@ -129,6 +130,7 @@ class DictImmutableKeysWidget(BaseWidget): content_widget.setProperty("show_borders", show_borders) label_widget = QtWidgets.QLabel(self.entity.label) + label_widget.setObjectName("SettingsLabel") content_layout = QtWidgets.QGridLayout(content_widget) content_layout.setContentsMargins(5, 5, 5, 5) @@ -324,12 +326,7 @@ class DictImmutableKeysWidget(BaseWidget): class BoolWidget(InputWidget): def _add_inputs_to_layout(self): - checkbox_height = self.style().pixelMetric( - QtWidgets.QStyle.PM_IndicatorHeight - ) - self.input_field = NiceCheckbox( - height=checkbox_height, parent=self.content_widget - ) + self.input_field = SettingsNiceCheckbox(parent=self.content_widget) self.content_layout.addWidget(self.input_field, 0) self.content_layout.addStretch(1) @@ -352,6 +349,9 @@ class BoolWidget(InputWidget): def _on_value_change(self): if self.ignore_input_changes: return + self.start_value_timer() + + def _on_value_change_timer(self): self.entity.set(self.input_field.isChecked()) diff --git a/openpype/tools/settings/settings/lib.py b/openpype/tools/settings/settings/lib.py index 577aaa5671..d12a14259a 100644 --- a/openpype/tools/settings/settings/lib.py +++ b/openpype/tools/settings/settings/lib.py @@ -1,5 +1,7 @@ from Qt import QtCore +from .widgets import SettingsToolBtn + # Offset of value change trigger in ms VALUE_CHANGE_OFFSET_MS = 300 @@ -16,3 +18,33 @@ def create_deffered_value_change_timer(callback): timer.setInterval(VALUE_CHANGE_OFFSET_MS) timer.timeout.connect(callback) return timer + + +def create_add_btn(parent): + add_btn = SettingsToolBtn("add", parent) + add_btn.setFocusPolicy(QtCore.Qt.ClickFocus) + return add_btn + + +def create_remove_btn(parent): + remove_btn = SettingsToolBtn("remove", parent) + remove_btn.setFocusPolicy(QtCore.Qt.ClickFocus) + return remove_btn + + +def create_up_btn(parent): + remove_btn = SettingsToolBtn("up", parent) + remove_btn.setFocusPolicy(QtCore.Qt.ClickFocus) + return remove_btn + + +def create_down_btn(parent): + add_btn = SettingsToolBtn("down", parent) + add_btn.setFocusPolicy(QtCore.Qt.ClickFocus) + return add_btn + + +def create_confirm_btn(parent): + remove_btn = SettingsToolBtn("confirm", parent) + remove_btn.setFocusPolicy(QtCore.Qt.ClickFocus) + return remove_btn diff --git a/openpype/tools/settings/settings/list_item_widget.py b/openpype/tools/settings/settings/list_item_widget.py index 128af92631..cd1fd912ae 100644 --- a/openpype/tools/settings/settings/list_item_widget.py +++ b/openpype/tools/settings/settings/list_item_widget.py @@ -1,13 +1,17 @@ from Qt import QtWidgets, QtCore -from .base import InputWidget -from .widgets import ExpandingWidget from openpype.tools.settings import ( - BTN_FIXED_SIZE, CHILD_OFFSET ) -from avalon.vendor import qtawesome +from .base import InputWidget +from .widgets import ExpandingWidget +from .lib import ( + create_add_btn, + create_remove_btn, + create_up_btn, + create_down_btn +) class EmptyListItem(QtWidgets.QWidget): @@ -16,18 +20,11 @@ class EmptyListItem(QtWidgets.QWidget): self.entity_widget = entity_widget - add_btn = QtWidgets.QPushButton("+", self) - remove_btn = QtWidgets.QPushButton("-", self) + add_btn = create_add_btn(self) + remove_btn = create_remove_btn(self) - add_btn.setFocusPolicy(QtCore.Qt.ClickFocus) remove_btn.setEnabled(False) - add_btn.setFixedSize(BTN_FIXED_SIZE, BTN_FIXED_SIZE) - remove_btn.setFixedSize(BTN_FIXED_SIZE, BTN_FIXED_SIZE) - - add_btn.setProperty("btn-type", "tool-item") - remove_btn.setProperty("btn-type", "tool-item") - layout = QtWidgets.QHBoxLayout(self) layout.setContentsMargins(0, 0, 0, 0) layout.setSpacing(3) @@ -52,32 +49,10 @@ class ListItem(QtWidgets.QWidget): self.ignore_input_changes = entity_widget.ignore_input_changes - char_up = qtawesome.charmap("fa.angle-up") - char_down = qtawesome.charmap("fa.angle-down") - - add_btn = QtWidgets.QPushButton("+") - remove_btn = QtWidgets.QPushButton("-") - up_btn = QtWidgets.QPushButton(char_up) - down_btn = QtWidgets.QPushButton(char_down) - - font_up_down = qtawesome.font("fa", 13) - up_btn.setFont(font_up_down) - down_btn.setFont(font_up_down) - - add_btn.setFocusPolicy(QtCore.Qt.ClickFocus) - remove_btn.setFocusPolicy(QtCore.Qt.ClickFocus) - up_btn.setFocusPolicy(QtCore.Qt.ClickFocus) - down_btn.setFocusPolicy(QtCore.Qt.ClickFocus) - - add_btn.setFixedSize(BTN_FIXED_SIZE, BTN_FIXED_SIZE) - remove_btn.setFixedSize(BTN_FIXED_SIZE, BTN_FIXED_SIZE) - up_btn.setFixedSize(BTN_FIXED_SIZE, BTN_FIXED_SIZE) - down_btn.setFixedSize(BTN_FIXED_SIZE, BTN_FIXED_SIZE) - - add_btn.setProperty("btn-type", "tool-item") - remove_btn.setProperty("btn-type", "tool-item") - up_btn.setProperty("btn-type", "tool-item") - down_btn.setProperty("btn-type", "tool-item") + add_btn = create_add_btn(self) + remove_btn = create_remove_btn(self) + up_btn = create_up_btn(self) + down_btn = create_down_btn(self) add_btn.clicked.connect(self._on_add_clicked) remove_btn.clicked.connect(self._on_remove_clicked) diff --git a/openpype/tools/settings/settings/style/__init__.py b/openpype/tools/settings/settings/style/__init__.py deleted file mode 100644 index f1d9829a04..0000000000 --- a/openpype/tools/settings/settings/style/__init__.py +++ /dev/null @@ -1,13 +0,0 @@ -import os -from openpype import resources - - -def load_stylesheet(): - style_path = os.path.join(os.path.dirname(__file__), "style.css") - with open(style_path, "r") as style_file: - stylesheet = style_file.read() - return stylesheet - - -def app_icon_path(): - return resources.get_openpype_icon_filepath() diff --git a/openpype/tools/settings/settings/style/style.css b/openpype/tools/settings/settings/style/style.css deleted file mode 100644 index b77b575204..0000000000 --- a/openpype/tools/settings/settings/style/style.css +++ /dev/null @@ -1,453 +0,0 @@ -/* :root { - --border-color-: #464b54; -} - */ - - -QWidget { - color: #bfccd6; - background-color: #282C34; - font-size: 12px; - border-radius: 0px; -} - -QMenu { - border: 1px solid #555555; - background-color: #21252B; -} - -QMenu::item { - padding: 5px 10px 5px 10px; - border-left: 5px solid #313131; -} - -QMenu::item:selected { - border-left-color: #61839e; - background-color: #222d37; -} -QCheckBox { - spacing: 0px; -} -QCheckBox::indicator {} -QCheckBox::indicator:focus {} - -QLineEdit, QSpinBox, QDoubleSpinBox, QPlainTextEdit, QTextEdit { - border: 1px solid #464b54; - border-radius: 3px; - background-color: #21252B; -} - -QLineEdit:disabled, QSpinBox:disabled, QDoubleSpinBox:disabled, QPlainTextEdit:disabled, QTextEdit:disabled, QPushButton:disabled { - background-color: #464b54; -} - -QLineEdit:focus, QSpinBox:focus, QDoubleSpinBox:focus, QPlainTextEdit:focus, QTextEdit:focus { - border: 1px solid #839caf; -} - -QComboBox { - border: 1px solid #464b54; - border-radius: 3px; - padding: 2px 2px 4px 4px; - background: #21252B; -} - -QComboBox QAbstractItemView::item { - padding: 3px; -} - -QToolButton { - background: transparent; -} - -QLabel { - background: transparent; - color: #969b9e; -} -QLabel:hover {color: #b8c1c5;} - -QLabel[state="studio"] {color: #73C990;} -QLabel[state="studio"]:hover {color: #ffffff;} -QLabel[state="modified"] {color: #189aea;} -QLabel[state="modified"]:hover {color: #46b1f3;} -QLabel[state="overriden-modified"] {color: #189aea;} -QLabel[state="overriden-modified"]:hover {color: #46b1f3;} -QLabel[state="overriden"] {color: #ff8c1a;} -QLabel[state="overriden"]:hover {color: #ffa64d;} -QLabel[state="invalid"] {color: #ad2e2e;} -QLabel[state="invalid"]:hover {color: #ad2e2e;} - - -QWidget[input-state="studio"] {border-color: #858a94;} -QWidget[input-state="modified"] {border-color: #189aea;} -QWidget[input-state="overriden-modified"] {border-color: #189aea;} -QWidget[input-state="overriden"] {border-color: #ff8c1a;} -QWidget[input-state="invalid"] {border-color: #ad2e2e;} - -QPushButton { - border: 1px solid #aaaaaa; - border-radius: 3px; - padding: 5px; -} -QPushButton:hover { - background-color: #333840; - border: 1px solid #fff; - color: #fff; -} -QPushButton[btn-type="tool-item"] { - border: 1px solid #bfccd6; - border-radius: 10px; -} - -QPushButton[btn-type="tool-item"]:hover { - border-color: #189aea; - color: #46b1f3; - background-color: transparent; -} - -QPushButton[btn-type="tool-item-icon"] { - border: 0px solid #bfccd6; - background-color: transparent; -} - -QPushButton[btn-type="expand-toggle"] { - background: #21252B; -} - -/* SLider */ -QSlider::groove { - border: 1px solid #464b54; - border-radius: 0.3em; -} -QSlider::groove:horizontal { - height: 8px; -} -QSlider::groove:vertical { - width: 8px; -} -QSlider::handle { - width: 10px; - height: 10px; - - border-radius: 5px; -} -QSlider::handle:horizontal { - margin: -2px 0; -} -QSlider::handle:vertical { - margin: 0 -2px; -} - -#GroupWidget { - border-bottom: 1px solid #21252B; -} - -#ProjectListWidget QListView { - border: 1px solid #464b54; - background: #21252B; -} - -#ProjectListWidget QListView:disabled { - background: #282C34; -} - -#ProjectListWidget QListView::item:disabled { - color: #4e5254; -} - -#ProjectListWidget QLabel { - background: transparent; - font-weight: bold; -} - -#MultiSelectionComboBox { - font-size: 12px; -} - -#DictKey[state="studio"] {border-color: #464b54;} -#DictKey[state="modified"] {border-color: #189aea;} -#DictKey[state="overriden"] {border-color: #00f;} -#DictKey[state="overriden-modified"] {border-color: #0f0;} -#DictKey[state="invalid"] {border-color: #ad2e2e;} - -#DictLabel { - font-weight: bold; -} - -#ContentWidget { - background-color: transparent; -} -#ContentWidget[content_state="hightlighted"] { - background-color: rgba(19, 26, 32, 15%); -} - -#SideLineWidget { - background-color: #333942; - border-style: solid; - border-color: #4e5254; - border-left-width: 3px; - border-bottom-width: 0px; - border-right-width: 0px; - border-top-width: 0px; -} - -#SideLineWidget:hover { - border-color: #7d8386; -} - -#SideLineWidget[state="child-studio"] {border-color: #56a06f;} -#SideLineWidget[state="child-studio"]:hover {border-color: #73C990;} - -#SideLineWidget[state="child-modified"] {border-color: #106aa2;} -#SideLineWidget[state="child-modified"]:hover {border-color: #189aea;} - -#SideLineWidget[state="child-invalid"] {border-color: #ad2e2e;} -#SideLineWidget[state="child-invalid"]:hover {border-color: #c93636;} - -#SideLineWidget[state="child-overriden"] {border-color: #e67300;} -#SideLineWidget[state="child-overriden"]:hover {border-color: #ff8c1a;} - -#SideLineWidget[state="child-overriden-modified"] {border-color: #106aa2;} -#SideLineWidget[state="child-overriden-modified"]:hover {border-color: #189aea;} - -#MainWidget { - background: #141a1f; -} - -#DictAsWidgetBody { - background: transparent; -} -#DictAsWidgetBody[show_borders="1"] { - border: 1px solid #4e5254; - border-radius: 5px; -} - -#SplitterItem { - background-color: #21252B; -} - -#ShadowWidget { - font-size: 36pt; -} -QTabWidget::pane { - border-top-style: none; -} - -QTabBar { - background: transparent; -} - -QTabBar::tab { - border-top-left-radius: 4px; - border-top-right-radius: 4px; - padding: 5px; -} - -QTabBar::tab:selected { - background: #282C34; - border-color: #9B9B9B; - border-bottom-color: #C2C7CB; -} - -QTabBar::tab:!selected { - margin-top: 2px; - background: #21252B; -} - -QTabBar::tab:!selected:hover { - background: #333840; -} - -QTabBar::tab:first:selected { - margin-left: 0; -} - -QTabBar::tab:last:selected { - margin-right: 0; -} - -QTabBar::tab:only-one { - margin: 0; -} - -QScrollBar:horizontal { - height: 15px; - margin: 3px 15px 3px 15px; - border: 1px transparent #21252B; - border-radius: 4px; - background-color: #21252B; -} - -QScrollBar::handle:horizontal { - background-color: #4B5362; - min-width: 5px; - border-radius: 4px; -} - -QScrollBar::add-line:horizontal { - margin: 0px 3px 0px 3px; - border-image: url(:/qss_icons/rc/right_arrow_disabled.png); - width: 10px; - height: 10px; - subcontrol-position: right; - subcontrol-origin: margin; -} - -QScrollBar::sub-line:horizontal { - margin: 0px 3px 0px 3px; - border-image: url(:/qss_icons/rc/left_arrow_disabled.png); - height: 10px; - width: 10px; - subcontrol-position: left; - subcontrol-origin: margin; -} - -QScrollBar::add-line:horizontal:hover,QScrollBar::add-line:horizontal:on { - border-image: url(:/qss_icons/rc/right_arrow.png); - height: 10px; - width: 10px; - subcontrol-position: right; - subcontrol-origin: margin; -} - -QScrollBar::sub-line:horizontal:hover, QScrollBar::sub-line:horizontal:on { - border-image: url(:/qss_icons/rc/left_arrow.png); - height: 10px; - width: 10px; - subcontrol-position: left; - subcontrol-origin: margin; -} - -QScrollBar::up-arrow:horizontal, QScrollBar::down-arrow:horizontal { - background: none; -} - -QScrollBar::add-page:horizontal, QScrollBar::sub-page:horizontal { - background: none; -} - -QScrollBar:vertical { - background-color: #21252B; - width: 15px; - margin: 15px 3px 15px 3px; - border: 1px transparent #21252B; - border-radius: 4px; -} - -QScrollBar::handle:vertical { - background-color: #4B5362; - min-height: 5px; - border-radius: 4px; -} - -QScrollBar::sub-line:vertical { - margin: 3px 0px 3px 0px; - border-image: url(:/qss_icons/rc/up_arrow_disabled.png); - height: 10px; - width: 10px; - subcontrol-position: top; - subcontrol-origin: margin; -} - -QScrollBar::add-line:vertical { - margin: 3px 0px 3px 0px; - border-image: url(:/qss_icons/rc/down_arrow_disabled.png); - height: 10px; - width: 10px; - subcontrol-position: bottom; - subcontrol-origin: margin; -} - -QScrollBar::sub-line:vertical:hover,QScrollBar::sub-line:vertical:on { - - border-image: url(:/qss_icons/rc/up_arrow.png); - height: 10px; - width: 10px; - subcontrol-position: top; - subcontrol-origin: margin; -} - - -QScrollBar::add-line:vertical:hover, QScrollBar::add-line:vertical:on { - border-image: url(:/qss_icons/rc/down_arrow.png); - height: 10px; - width: 10px; - subcontrol-position: bottom; - subcontrol-origin: margin; -} - -QScrollBar::up-arrow:vertical, QScrollBar::down-arrow:vertical { - background: none; -} - - -QScrollBar::add-page:vertical, QScrollBar::sub-page:vertical { - background: none; -} - -QTableView -{ - border: 1px solid #444; - gridline-color: #6c6c6c; - background-color: #201F1F; - alternate-background-color:#21252B; -} - -QHeaderView -{ - border: 1px transparent; - border-radius: 2px; - margin: 0px; - padding: 0px; -} - -QHeaderView::section { - background-color: #21252B; - /*color: silver;*/ - padding: 4px; - border: 1px solid #6c6c6c; - border-radius: 0px; - text-align: center; - color: #969b9e; - font-weight: bold; -} - -QAbstractItemView::item:pressed { - background: #78879b; - color: #FFFFFF; -} - -QAbstractItemView::item:selected:active { - background: #3d8ec9; -} -QAbstractItemView::item:selected:!active { - background: #3d8ec9; -} - -#BreadcrumbsPathInput { - padding: 2px; - font-size: 9pt; -} - -#BreadcrumbsButton { - padding-right: 12px; - font-size: 9pt; -} - -#BreadcrumbsButton[empty="1"] { - padding-right: 0px; -} - -#BreadcrumbsButton::menu-button { - width: 12px; - background: rgba(127, 127, 127, 60); -} -#BreadcrumbsButton::menu-button:hover { - background: rgba(127, 127, 127, 90); -} - -#BreadcrumbsPanel { - border: 1px solid #4e5254; - border-radius: 5px; - background: #21252B;; -} diff --git a/openpype/tools/settings/settings/widgets.py b/openpype/tools/settings/settings/widgets.py index 710884e9e5..7a7213fa66 100644 --- a/openpype/tools/settings/settings/widgets.py +++ b/openpype/tools/settings/settings/widgets.py @@ -6,7 +6,16 @@ from avalon.mongodb import ( AvalonMongoDB ) +from openpype.style import get_objected_colors +from openpype.tools.utils.widgets import ImageButton +from openpype.tools.utils.lib import paint_image_with_color + +from openpype.widgets.nice_checkbox import NiceCheckbox from openpype.settings.lib import get_system_settings +from .images import ( + get_pixmap, + get_image +) from .constants import ( DEFAULT_PROJECT_LABEL, PROJECT_NAME_ROLE, @@ -31,6 +40,78 @@ class SettingsPlainTextEdit(QtWidgets.QPlainTextEdit): self.focused_in.emit() +class SettingsToolBtn(ImageButton): + _mask_pixmap = None + _cached_icons = {} + + def __init__(self, btn_type, parent): + super(SettingsToolBtn, self).__init__(parent) + + icon, hover_icon = self._get_icon_type(btn_type) + + self.setIcon(icon) + + self._icon = icon + self._hover_icon = hover_icon + + @classmethod + def _get_icon_type(cls, btn_type): + if btn_type not in cls._cached_icons: + settings_colors = get_objected_colors()["settings"] + normal_color = settings_colors["image-btn"].get_qcolor() + hover_color = settings_colors["image-btn-hover"].get_qcolor() + disabled_color = settings_colors["image-btn-disabled"].get_qcolor() + + image = get_image("{}.png".format(btn_type)) + + pixmap = paint_image_with_color(image, normal_color) + hover_pixmap = paint_image_with_color(image, hover_color) + disabled_pixmap = paint_image_with_color(image, disabled_color) + + icon = QtGui.QIcon(pixmap) + hover_icon = QtGui.QIcon(hover_pixmap) + icon.addPixmap( + disabled_pixmap, QtGui.QIcon.Disabled, QtGui.QIcon.On + ) + icon.addPixmap( + disabled_pixmap, QtGui.QIcon.Disabled, QtGui.QIcon.Off + ) + hover_icon.addPixmap( + disabled_pixmap, QtGui.QIcon.Disabled, QtGui.QIcon.On + ) + hover_icon.addPixmap( + disabled_pixmap, QtGui.QIcon.Disabled, QtGui.QIcon.Off + ) + cls._cached_icons[btn_type] = icon, hover_icon + return cls._cached_icons[btn_type] + + def enterEvent(self, event): + self.setIcon(self._hover_icon) + super(SettingsToolBtn, self).enterEvent(event) + + def leaveEvent(self, event): + self.setIcon(self._icon) + super(SettingsToolBtn, self).leaveEvent(event) + + @classmethod + def _get_mask_pixmap(cls): + if cls._mask_pixmap is None: + mask_pixmap = get_pixmap("mask.png") + cls._mask_pixmap = mask_pixmap + return cls._mask_pixmap + + def _change_size(self): + super(SettingsToolBtn, self)._change_size() + size = self.iconSize() + scaled = self._get_mask_pixmap().scaled( + size.width(), + size.height(), + QtCore.Qt.IgnoreAspectRatio, + QtCore.Qt.SmoothTransformation + ) + self.setMask(scaled.mask()) + + class ShadowWidget(QtWidgets.QWidget): def __init__(self, message, parent): super(ShadowWidget, self).__init__(parent) @@ -132,9 +213,14 @@ class SettingsComboBox(QtWidgets.QComboBox): def __init__(self, *args, **kwargs): super(SettingsComboBox, self).__init__(*args, **kwargs) + delegate = QtWidgets.QStyledItemDelegate() + self.setItemDelegate(delegate) + self.currentIndexChanged.connect(self._on_change) self.setFocusPolicy(QtCore.Qt.StrongFocus) + self._delegate = delegate + def wheelEvent(self, event): if self.hasFocus(): return super(SettingsComboBox, self).wheelEvent(event) @@ -180,14 +266,14 @@ class ExpandingWidget(QtWidgets.QWidget): button_size = QtCore.QSize(5, 5) button_toggle = QtWidgets.QToolButton(parent=side_line_widget) - button_toggle.setProperty("btn-type", "expand-toggle") + button_toggle.setObjectName("ExpandToggleBtn") button_toggle.setIconSize(button_size) button_toggle.setArrowType(QtCore.Qt.RightArrow) button_toggle.setCheckable(True) button_toggle.setChecked(False) label_widget = QtWidgets.QLabel(label, parent=side_line_widget) - label_widget.setObjectName("DictLabel") + label_widget.setObjectName("ExpandLabel") before_label_widget = QtWidgets.QWidget(side_line_widget) before_label_layout = QtWidgets.QHBoxLayout(before_label_widget) @@ -381,6 +467,7 @@ class GridLabelWidget(QtWidgets.QWidget): self.properties = {} label_widget = QtWidgets.QLabel(label, self) + label_widget.setObjectName("SettingsLabel") label_proxy_layout = QtWidgets.QHBoxLayout() label_proxy_layout.setContentsMargins(0, 0, 0, 0) @@ -415,197 +502,12 @@ class GridLabelWidget(QtWidgets.QWidget): return super(GridLabelWidget, self).mouseReleaseEvent(event) -class NiceCheckboxMoveWidget(QtWidgets.QFrame): - def __init__(self, height, border_width, parent): - super(NiceCheckboxMoveWidget, self).__init__(parent=parent) - - self.checkstate = False - - self.half_size = int(height / 2) - self.full_size = self.half_size * 2 - self.border_width = border_width - self.setFixedHeight(self.full_size) - self.setFixedWidth(self.full_size) - - self.setStyleSheet(( - "background: #444444;border-style: none;" - "border-radius: {};border-width:{}px;" - ).format(self.half_size, self.border_width)) - - def update_position(self): - parent_rect = self.parent().rect() - if self.checkstate is True: - pos_x = ( - parent_rect.x() - + parent_rect.width() - - self.full_size - - self.border_width - ) - else: - pos_x = parent_rect.x() + self.border_width - - pos_y = parent_rect.y() + int( - parent_rect.height() / 2 - self.half_size - ) - self.setGeometry(pos_x, pos_y, self.width(), self.height()) - - def state_offset(self): - diff_x = ( - self.parent().rect().width() - - self.full_size - - (2 * self.border_width) - ) - return QtCore.QPoint(diff_x, 0) - - def change_position(self, checkstate): - self.checkstate = checkstate - - self.update_position() - - def resizeEvent(self, event): - super().resizeEvent(event) - self.update_position() - - -class NiceCheckbox(QtWidgets.QFrame): - stateChanged = QtCore.Signal(int) - checked_bg_color = QtGui.QColor(69, 128, 86) - unchecked_bg_color = QtGui.QColor(170, 80, 80) +class SettingsNiceCheckbox(NiceCheckbox): focused_in = QtCore.Signal() - def set_bg_color(self, color): - self._bg_color = color - self.setStyleSheet(self._stylesheet_template.format( - color.red(), color.green(), color.blue() - )) - - def bg_color(self): - return self._bg_color - - bgcolor = QtCore.Property(QtGui.QColor, bg_color, set_bg_color) - - def __init__(self, checked=True, height=30, *args, **kwargs): - super(NiceCheckbox, self).__init__(*args, **kwargs) - - self._checkstate = checked - if checked: - bg_color = self.checked_bg_color - else: - bg_color = self.unchecked_bg_color - - self.half_height = int(height / 2) - height = self.half_height * 2 - tenth_height = int(height / 10) - - self.setFixedHeight(height) - self.setFixedWidth((height - tenth_height) * 2) - - move_item_size = height - (2 * tenth_height) - - self.move_item = NiceCheckboxMoveWidget( - move_item_size, tenth_height, self - ) - self.move_item.change_position(self._checkstate) - - self._stylesheet_template = ( - "border-radius: {}px;" - "border-width: {}px;" - "background: #333333;" - "border-style: solid;" - "border-color: #555555;" - ).format(self.half_height, tenth_height) - self._stylesheet_template += "background: rgb({},{},{});" - - self.set_bg_color(bg_color) - - def resizeEvent(self, event): - super(NiceCheckbox, self).resizeEvent(event) - self.move_item.update_position() - - def show(self, *args, **kwargs): - super(NiceCheckbox, self).show(*args, **kwargs) - self.move_item.update_position() - - def checkState(self): - if self._checkstate: - return QtCore.Qt.Checked - else: - return QtCore.Qt.Unchecked - - def _on_checkstate_change(self): - self.stateChanged.emit(self.checkState()) - - move_start_value = self.move_item.pos() - offset = self.move_item.state_offset() - if self._checkstate is True: - move_end_value = move_start_value + offset - else: - move_end_value = move_start_value - offset - move_animation = QtCore.QPropertyAnimation( - self.move_item, b"pos", self - ) - move_animation.setDuration(150) - move_animation.setEasingCurve(QtCore.QEasingCurve.OutQuad) - move_animation.setStartValue(move_start_value) - move_animation.setEndValue(move_end_value) - - color_animation = QtCore.QPropertyAnimation( - self, b"bgcolor" - ) - color_animation.setDuration(150) - if self._checkstate is True: - color_animation.setStartValue(self.unchecked_bg_color) - color_animation.setEndValue(self.checked_bg_color) - else: - color_animation.setStartValue(self.checked_bg_color) - color_animation.setEndValue(self.unchecked_bg_color) - - anim_group = QtCore.QParallelAnimationGroup(self) - anim_group.addAnimation(move_animation) - anim_group.addAnimation(color_animation) - - def _finished(): - self.move_item.change_position(self._checkstate) - - anim_group.finished.connect(_finished) - anim_group.start() - - def isChecked(self): - return self._checkstate - - def setChecked(self, checked): - if checked == self._checkstate: - return - self._checkstate = checked - self._on_checkstate_change() - - def setCheckState(self, state=None): - if state is None: - checkstate = not self._checkstate - elif state == QtCore.Qt.Checked: - checkstate = True - elif state == QtCore.Qt.Unchecked: - checkstate = False - else: - return - - if checkstate == self._checkstate: - return - - self._checkstate = checkstate - - self._on_checkstate_change() - def mousePressEvent(self, event): self.focused_in.emit() - super(NiceCheckbox, self).mousePressEvent(event) - - def mouseReleaseEvent(self, event): - if event.button() == QtCore.Qt.LeftButton: - self.setCheckState() - event.accept() - return - return super(NiceCheckbox, self).mouseReleaseEvent(event) + super(SettingsNiceCheckbox, self).mousePressEvent(event) class ProjectModel(QtGui.QStandardItemModel): diff --git a/openpype/tools/settings/settings/window.py b/openpype/tools/settings/settings/window.py index 4e88301349..fd0cd1d7cd 100644 --- a/openpype/tools/settings/settings/window.py +++ b/openpype/tools/settings/settings/window.py @@ -5,7 +5,7 @@ from .categories import ( ProjectWidget ) from .widgets import ShadowWidget, RestartDialog -from . import style +from openpype import style from openpype.lib import is_admin_password_required from openpype.widgets import PasswordDialog @@ -25,7 +25,7 @@ class MainWidget(QtWidgets.QWidget): self._password_dialog = None - self.setObjectName("MainWidget") + self.setObjectName("SettingsMainWidget") self.setWindowTitle("OpenPype Settings") self.resize(self.widget_width, self.widget_height) diff --git a/openpype/tools/subsetmanager/README.md b/openpype/tools/subsetmanager/README.md new file mode 100644 index 0000000000..062214834a --- /dev/null +++ b/openpype/tools/subsetmanager/README.md @@ -0,0 +1,19 @@ +Subset manager +-------------- + +Simple UI showing list of created subset that will be published via Pyblish. +Useful for applications (Photoshop, AfterEffects, TVPaint, Harmony) which are +storing metadata about instance hidden from user. + +This UI allows listing all created subset and removal of them if needed ( +in case use doesn't want to publish anymore, its using workfile as a starting +file for different task and instances should be completely different etc. +) + +Host is expected to implemented: +- `list_instances` - returning list of dictionaries (instances), must contain + unique uuid field + example: + ```[{"uuid":"15","active":true,"subset":"imageBG","family":"image","id":"pyblish.avalon.instance","asset":"Town"}]``` +- `remove_instance(instance)` - removes instance from file's metadata + instance is a dictionary, with uuid field \ No newline at end of file diff --git a/openpype/tools/subsetmanager/__init__.py b/openpype/tools/subsetmanager/__init__.py new file mode 100644 index 0000000000..6cfca7db66 --- /dev/null +++ b/openpype/tools/subsetmanager/__init__.py @@ -0,0 +1,9 @@ +from .window import ( + show, + SubsetManagerWindow +) + +__all__ = ( + "show", + "SubsetManagerWindow" +) diff --git a/openpype/tools/subsetmanager/model.py b/openpype/tools/subsetmanager/model.py new file mode 100644 index 0000000000..b76c3c2343 --- /dev/null +++ b/openpype/tools/subsetmanager/model.py @@ -0,0 +1,52 @@ +import uuid + +from Qt import QtCore, QtGui + +from avalon import api + +ITEM_ID_ROLE = QtCore.Qt.UserRole + 1 + + +class InstanceModel(QtGui.QStandardItemModel): + def __init__(self, *args, **kwargs): + super(InstanceModel, self).__init__(*args, **kwargs) + self._instances_by_item_id = {} + + def get_instance_by_id(self, item_id): + return self._instances_by_item_id.get(item_id) + + def refresh(self): + self.clear() + + self._instances_by_item_id = {} + + instances = None + host = api.registered_host() + list_instances = getattr(host, "list_instances", None) + if list_instances: + instances = list_instances() + + if not instances: + return + + items = [] + for instance_data in instances: + item_id = str(uuid.uuid4()) + label = instance_data.get("label") or instance_data["subset"] + item = QtGui.QStandardItem(label) + item.setEnabled(True) + item.setEditable(False) + item.setData(item_id, ITEM_ID_ROLE) + items.append(item) + self._instances_by_item_id[item_id] = instance_data + + if items: + self.invisibleRootItem().appendRows(items) + + def headerData(self, section, orientation, role): + if role == QtCore.Qt.DisplayRole and section == 0: + return "Instance" + + return super(InstanceModel, self).headerData( + section, orientation, role + ) diff --git a/openpype/tools/subsetmanager/widgets.py b/openpype/tools/subsetmanager/widgets.py new file mode 100644 index 0000000000..7a8cb15cbf --- /dev/null +++ b/openpype/tools/subsetmanager/widgets.py @@ -0,0 +1,110 @@ +import json +from Qt import QtWidgets, QtCore + + +class InstanceDetail(QtWidgets.QWidget): + save_triggered = QtCore.Signal() + + def __init__(self, parent=None): + super(InstanceDetail, self).__init__(parent) + + details_widget = QtWidgets.QPlainTextEdit(self) + details_widget.setObjectName("SubsetManagerDetailsText") + + save_btn = QtWidgets.QPushButton("Save", self) + + self._block_changes = False + self._editable = False + self._item_id = None + + layout = QtWidgets.QVBoxLayout(self) + layout.setContentsMargins(0, 0, 0, 0) + layout.addWidget(details_widget, 1) + layout.addWidget(save_btn, 0, QtCore.Qt.AlignRight) + + save_btn.clicked.connect(self._on_save_clicked) + details_widget.textChanged.connect(self._on_text_change) + + self._details_widget = details_widget + self._save_btn = save_btn + + self.set_editable(False) + + def _on_save_clicked(self): + if self.is_valid(): + self.save_triggered.emit() + + def set_editable(self, enabled=True): + self._editable = enabled + self.update_state() + + def update_state(self, valid=None): + editable = self._editable + if not self._item_id: + editable = False + + self._save_btn.setVisible(editable) + self._details_widget.setReadOnly(not editable) + if valid is None: + valid = self.is_valid() + + self._save_btn.setEnabled(valid) + self._set_invalid_detail(valid) + + def _set_invalid_detail(self, valid): + state = "" + if not valid: + state = "invalid" + + current_state = self._details_widget.property("state") + if current_state != state: + self._details_widget.setProperty("state", state) + self._details_widget.style().polish(self._details_widget) + + def set_details(self, container, item_id): + self._item_id = item_id + + text = "Nothing selected" + if item_id: + try: + text = json.dumps(container, indent=4) + except Exception: + text = str(container) + + self._block_changes = True + self._details_widget.setPlainText(text) + self._block_changes = False + + self.update_state() + + def instance_data_from_text(self): + try: + jsoned = json.loads(self._details_widget.toPlainText()) + except Exception: + jsoned = None + return jsoned + + def item_id(self): + return self._item_id + + def is_valid(self): + if not self._item_id: + return True + + value = self._details_widget.toPlainText() + valid = False + try: + jsoned = json.loads(value) + if jsoned and isinstance(jsoned, dict): + valid = True + + except Exception: + pass + return valid + + def _on_text_change(self): + if self._block_changes or not self._item_id: + return + + valid = self.is_valid() + self.update_state(valid) diff --git a/openpype/tools/subsetmanager/window.py b/openpype/tools/subsetmanager/window.py new file mode 100644 index 0000000000..cb0e3c1c1e --- /dev/null +++ b/openpype/tools/subsetmanager/window.py @@ -0,0 +1,218 @@ +import os +import sys + +from Qt import QtWidgets, QtCore + +from avalon import api +from avalon.vendor import qtawesome + +from openpype import style +from openpype.tools.utils.lib import ( + iter_model_rows, + qt_app_context +) +from openpype.tools.utils.models import RecursiveSortFilterProxyModel +from .model import ( + InstanceModel, + ITEM_ID_ROLE +) +from .widgets import InstanceDetail + + +module = sys.modules[__name__] +module.window = None + + +class SubsetManagerWindow(QtWidgets.QDialog): + def __init__(self, parent=None): + super(SubsetManagerWindow, self).__init__(parent=parent) + self.setWindowTitle("Subset Manager 0.1") + self.setObjectName("SubsetManager") + if not parent: + self.setWindowFlags( + self.windowFlags() | QtCore.Qt.WindowStaysOnTopHint + ) + + self.resize(780, 430) + + # Trigger refresh on first called show + self._first_show = True + + left_side_widget = QtWidgets.QWidget(self) + + # Header part + header_widget = QtWidgets.QWidget(left_side_widget) + + # Filter input + filter_input = QtWidgets.QLineEdit(header_widget) + filter_input.setPlaceholderText("Filter subsets..") + + # Refresh button + icon = qtawesome.icon("fa.refresh", color="white") + refresh_btn = QtWidgets.QPushButton(header_widget) + refresh_btn.setIcon(icon) + + header_layout = QtWidgets.QHBoxLayout(header_widget) + header_layout.setContentsMargins(0, 0, 0, 0) + header_layout.addWidget(filter_input) + header_layout.addWidget(refresh_btn) + + # Instances view + view = QtWidgets.QTreeView(left_side_widget) + view.setIndentation(0) + view.setContextMenuPolicy(QtCore.Qt.CustomContextMenu) + + model = InstanceModel(view) + proxy = RecursiveSortFilterProxyModel() + proxy.setSourceModel(model) + proxy.setFilterCaseSensitivity(QtCore.Qt.CaseInsensitive) + + view.setModel(proxy) + + left_side_layout = QtWidgets.QVBoxLayout(left_side_widget) + left_side_layout.setContentsMargins(0, 0, 0, 0) + left_side_layout.addWidget(header_widget) + left_side_layout.addWidget(view) + + details_widget = InstanceDetail(self) + + layout = QtWidgets.QHBoxLayout(self) + layout.addWidget(left_side_widget, 0) + layout.addWidget(details_widget, 1) + + filter_input.textChanged.connect(proxy.setFilterFixedString) + refresh_btn.clicked.connect(self._on_refresh_clicked) + view.clicked.connect(self._on_activated) + view.customContextMenuRequested.connect(self.on_context_menu) + details_widget.save_triggered.connect(self._on_save) + + self._model = model + self._proxy = proxy + self._view = view + self._details_widget = details_widget + self._refresh_btn = refresh_btn + + def _on_refresh_clicked(self): + self.refresh() + + def _on_activated(self, index): + container = None + item_id = None + if index.isValid(): + item_id = index.data(ITEM_ID_ROLE) + container = self._model.get_instance_by_id(item_id) + + self._details_widget.set_details(container, item_id) + + def _on_save(self): + host = api.registered_host() + if not hasattr(host, "save_instances"): + print("BUG: Host does not have \"save_instances\" method") + return + + current_index = self._view.selectionModel().currentIndex() + if not current_index.isValid(): + return + + item_id = current_index.data(ITEM_ID_ROLE) + if item_id != self._details_widget.item_id(): + return + + item_data = self._details_widget.instance_data_from_text() + new_instances = [] + for index in iter_model_rows(self._model, 0): + _item_id = index.data(ITEM_ID_ROLE) + if _item_id == item_id: + instance_data = item_data + else: + instance_data = self._model.get_instance_by_id(item_id) + new_instances.append(instance_data) + + host.save_instances(new_instances) + + def on_context_menu(self, point): + point_index = self._view.indexAt(point) + item_id = point_index.data(ITEM_ID_ROLE) + instance_data = self._model.get_instance_by_id(item_id) + if instance_data is None: + return + + # Prepare menu + menu = QtWidgets.QMenu(self) + actions = [] + host = api.registered_host() + if hasattr(host, "remove_instance"): + action = QtWidgets.QAction("Remove instance", menu) + action.setData(host.remove_instance) + actions.append(action) + + if hasattr(host, "select_instance"): + action = QtWidgets.QAction("Select instance", menu) + action.setData(host.select_instance) + actions.append(action) + + if not actions: + actions.append(QtWidgets.QAction("* Nothing to do", menu)) + + for action in actions: + menu.addAction(action) + + # Show menu under mouse + global_point = self._view.mapToGlobal(point) + action = menu.exec_(global_point) + if not action or not action.data(): + return + + # Process action + # TODO catch exceptions + function = action.data() + function(instance_data) + + # Reset modified data + self.refresh() + + def refresh(self): + self._details_widget.set_details(None, None) + self._model.refresh() + + host = api.registered_host() + dev_mode = os.environ.get("AVALON_DEVELOP_MODE") or "" + editable = False + if dev_mode.lower() in ("1", "yes", "true", "on"): + editable = hasattr(host, "save_instances") + self._details_widget.set_editable(editable) + + def showEvent(self, *args, **kwargs): + super(SubsetManagerWindow, self).showEvent(*args, **kwargs) + if self._first_show: + self._first_show = False + self.setStyleSheet(style.load_stylesheet()) + self.refresh() + + +def show(root=None, debug=False, parent=None): + """Display Scene Inventory GUI + + Arguments: + debug (bool, optional): Run in debug-mode, + defaults to False + parent (QtCore.QObject, optional): When provided parent the interface + to this QObject. + + """ + + try: + module.window.close() + del module.window + except (RuntimeError, AttributeError): + pass + + with qt_app_context(): + window = SubsetManagerWindow(parent) + window.show() + + module.window = window + + # Pull window to the front. + module.window.raise_() + module.window.activateWindow() diff --git a/openpype/tools/utils/assets_widget.py b/openpype/tools/utils/assets_widget.py new file mode 100644 index 0000000000..041bb1ef1c --- /dev/null +++ b/openpype/tools/utils/assets_widget.py @@ -0,0 +1,810 @@ +import time +import collections + +import Qt +from Qt import QtWidgets, QtCore, QtGui + +from avalon import style +from avalon.vendor import qtawesome + +from openpype.style import get_objected_colors +from openpype.tools.flickcharm import FlickCharm + +from .views import ( + TreeViewSpinner, + DeselectableTreeView +) +from .widgets import PlaceholderLineEdit +from .models import RecursiveSortFilterProxyModel +from .lib import DynamicQThread + +if Qt.__binding__ == "PySide": + from PySide.QtGui import QStyleOptionViewItemV4 +elif Qt.__binding__ == "PyQt4": + from PyQt4.QtGui import QStyleOptionViewItemV4 + +ASSET_ID_ROLE = QtCore.Qt.UserRole + 1 +ASSET_NAME_ROLE = QtCore.Qt.UserRole + 2 +ASSET_LABEL_ROLE = QtCore.Qt.UserRole + 3 +ASSET_UNDERLINE_COLORS_ROLE = QtCore.Qt.UserRole + 4 + + +class AssetsView(TreeViewSpinner, DeselectableTreeView): + """Asset items view. + + Adds abilities to deselect, show loading spinner and add flick charm + (scroll by mouse/touchpad click and move). + """ + + def __init__(self, parent=None): + super(AssetsView, self).__init__(parent) + self.setIndentation(15) + self.setContextMenuPolicy(QtCore.Qt.CustomContextMenu) + self.setHeaderHidden(True) + + self._flick_charm_activated = False + self._flick_charm = FlickCharm(parent=self) + self._before_flick_scroll_mode = None + + def activate_flick_charm(self): + if self._flick_charm_activated: + return + self._flick_charm_activated = True + self._before_flick_scroll_mode = self.verticalScrollMode() + self._flick_charm.activateOn(self) + self.setVerticalScrollMode(self.ScrollPerPixel) + + def deactivate_flick_charm(self): + if not self._flick_charm_activated: + return + self._flick_charm_activated = False + self._flick_charm.deactivateFrom(self) + if self._before_flick_scroll_mode is not None: + self.setVerticalScrollMode(self._before_flick_scroll_mode) + + def mousePressEvent(self, event): + index = self.indexAt(event.pos()) + if not index.isValid(): + modifiers = QtWidgets.QApplication.keyboardModifiers() + if modifiers == QtCore.Qt.ShiftModifier: + return + elif modifiers == QtCore.Qt.ControlModifier: + return + + super(AssetsView, self).mousePressEvent(event) + + def set_loading_state(self, loading, empty): + """Change loading state. + + TODO: Separate into 2 individual methods. + + Args: + loading(bool): Is loading. + empty(bool): Is model empty. + """ + if self.is_loading != loading: + if loading: + self.spinner.repaintNeeded.connect( + self.viewport().update + ) + else: + self.spinner.repaintNeeded.disconnect() + self.viewport().update() + + self.is_loading = loading + self.is_empty = empty + + +class UnderlinesAssetDelegate(QtWidgets.QItemDelegate): + """Item delegate drawing bars under asset name. + + This is used in loader and library loader tools. Multiselection of assets + may group subsets by name under colored groups. Selected color groups are + then propagated back to selected assets as underlines. + """ + bar_height = 3 + + def __init__(self, *args, **kwargs): + super(UnderlinesAssetDelegate, self).__init__(*args, **kwargs) + asset_view_colors = get_objected_colors()["loader"]["asset-view"] + self._selected_color = ( + asset_view_colors["selected"].get_qcolor() + ) + self._hover_color = ( + asset_view_colors["hover"].get_qcolor() + ) + self._selected_hover_color = ( + asset_view_colors["selected-hover"].get_qcolor() + ) + + def sizeHint(self, option, index): + """Add bar height to size hint.""" + result = super(UnderlinesAssetDelegate, self).sizeHint(option, index) + height = result.height() + result.setHeight(height + self.bar_height) + + return result + + def paint(self, painter, option, index): + """Replicate painting of an item and draw color bars if needed.""" + # Qt4 compat + if Qt.__binding__ in ("PySide", "PyQt4"): + option = QStyleOptionViewItemV4(option) + + painter.save() + + item_rect = QtCore.QRect(option.rect) + item_rect.setHeight(option.rect.height() - self.bar_height) + + subset_colors = index.data(ASSET_UNDERLINE_COLORS_ROLE) or [] + subset_colors_width = 0 + if subset_colors: + subset_colors_width = option.rect.width() / len(subset_colors) + + subset_rects = [] + counter = 0 + for subset_c in subset_colors: + new_color = None + new_rect = None + if subset_c: + new_color = QtGui.QColor(*subset_c) + + new_rect = QtCore.QRect( + option.rect.left() + (counter * subset_colors_width), + option.rect.top() + ( + option.rect.height() - self.bar_height + ), + subset_colors_width, + self.bar_height + ) + subset_rects.append((new_color, new_rect)) + counter += 1 + + # Background + if option.state & QtWidgets.QStyle.State_Selected: + if len(subset_colors) == 0: + item_rect.setTop(item_rect.top() + (self.bar_height / 2)) + + if option.state & QtWidgets.QStyle.State_MouseOver: + bg_color = self._selected_hover_color + else: + bg_color = self._selected_color + else: + item_rect.setTop(item_rect.top() + (self.bar_height / 2)) + if option.state & QtWidgets.QStyle.State_MouseOver: + bg_color = self._hover_color + else: + bg_color = QtGui.QColor() + bg_color.setAlpha(0) + + # When not needed to do a rounded corners (easier and without + # painter restore): + painter.fillRect( + option.rect, + QtGui.QBrush(bg_color) + ) + + if option.state & QtWidgets.QStyle.State_Selected: + for color, subset_rect in subset_rects: + if not color or not subset_rect: + continue + painter.fillRect(subset_rect, QtGui.QBrush(color)) + + # Icon + icon_index = index.model().index( + index.row(), index.column(), index.parent() + ) + # - Default icon_rect if not icon + icon_rect = QtCore.QRect( + item_rect.left(), + item_rect.top(), + # To make sure it's same size all the time + option.rect.height() - self.bar_height, + option.rect.height() - self.bar_height + ) + icon = index.model().data(icon_index, QtCore.Qt.DecorationRole) + + if icon: + mode = QtGui.QIcon.Normal + if not (option.state & QtWidgets.QStyle.State_Enabled): + mode = QtGui.QIcon.Disabled + elif option.state & QtWidgets.QStyle.State_Selected: + mode = QtGui.QIcon.Selected + + if isinstance(icon, QtGui.QPixmap): + icon = QtGui.QIcon(icon) + option.decorationSize = icon.size() / icon.devicePixelRatio() + + elif isinstance(icon, QtGui.QColor): + pixmap = QtGui.QPixmap(option.decorationSize) + pixmap.fill(icon) + icon = QtGui.QIcon(pixmap) + + elif isinstance(icon, QtGui.QImage): + icon = QtGui.QIcon(QtGui.QPixmap.fromImage(icon)) + option.decorationSize = icon.size() / icon.devicePixelRatio() + + elif isinstance(icon, QtGui.QIcon): + state = QtGui.QIcon.Off + if option.state & QtWidgets.QStyle.State_Open: + state = QtGui.QIcon.On + actual_size = option.icon.actualSize( + option.decorationSize, mode, state + ) + option.decorationSize = QtCore.QSize( + min(option.decorationSize.width(), actual_size.width()), + min(option.decorationSize.height(), actual_size.height()) + ) + + state = QtGui.QIcon.Off + if option.state & QtWidgets.QStyle.State_Open: + state = QtGui.QIcon.On + + icon.paint( + painter, icon_rect, + QtCore.Qt.AlignLeft, mode, state + ) + + # Text + text_rect = QtCore.QRect( + icon_rect.left() + icon_rect.width() + 2, + item_rect.top(), + item_rect.width(), + item_rect.height() + ) + + painter.drawText( + text_rect, QtCore.Qt.AlignVCenter, + index.data(QtCore.Qt.DisplayRole) + ) + + painter.restore() + + +class AssetModel(QtGui.QStandardItemModel): + """A model listing assets in the active project. + + The assets are displayed in a treeview, they are visually parented by + a `visualParent` field in the database containing an `_id` to a parent + asset. + + Asset document may have defined label, icon or icon color. + + Loading of data for model happens in thread which means that refresh + is not sequential. When refresh is triggered it is required to listen for + 'refreshed' signal. + + Args: + dbcon (AvalonMongoDB): Ready to use connection to mongo with. + parent (QObject): Parent Qt object. + """ + + _doc_fetched = QtCore.Signal() + refreshed = QtCore.Signal(bool) + + # Asset document projection + _asset_projection = { + "name": 1, + "parent": 1, + "data.visualParent": 1, + "data.label": 1, + "data.icon": 1, + "data.color": 1 + } + + def __init__(self, dbcon, parent=None): + super(AssetModel, self).__init__(parent=parent) + self.dbcon = dbcon + + self._refreshing = False + self._doc_fetching_thread = None + self._doc_fetching_stop = False + self._doc_payload = [] + + self._doc_fetched.connect(self._on_docs_fetched) + + self._items_with_color_by_id = {} + self._items_by_asset_id = {} + + @property + def refreshing(self): + return self._refreshing + + def get_index_by_asset_id(self, asset_id): + item = self._items_by_asset_id.get(asset_id) + if item is not None: + return item.index() + return QtCore.QModelIndex() + + def get_indexes_by_asset_ids(self, asset_ids): + return [ + self.get_index_by_asset_id(asset_id) + for asset_id in asset_ids + ] + + def get_index_by_asset_name(self, asset_name): + indexes = self.get_indexes_by_asset_names([asset_name]) + for index in indexes: + if index.isValid(): + return index + return indexes[0] + + def get_indexes_by_asset_names(self, asset_names): + asset_ids_by_name = { + asset_name: None + for asset_name in asset_names + } + + for asset_id, item in self._items_by_asset_id.items(): + asset_name = item.data(ASSET_NAME_ROLE) + if asset_name in asset_ids_by_name: + asset_ids_by_name[asset_name] = asset_id + + asset_ids = [ + asset_ids_by_name[asset_name] + for asset_name in asset_names + ] + + return self.get_indexes_by_asset_ids(asset_ids) + + def refresh(self, force=False): + """Refresh the data for the model.""" + # Skip fetch if there is already other thread fetching documents + if self._refreshing: + if not force: + return + self.stop_refresh() + + # Fetch documents from mongo + # Restart payload + self._refreshing = True + self._doc_payload = [] + self._doc_fetching_thread = DynamicQThread(self._threaded_fetch) + self._doc_fetching_thread.start() + + def stop_refresh(self): + self._stop_fetch_thread() + + def clear_underlines(self): + for asset_id in tuple(self._items_with_color_by_id.keys()): + item = self._items_with_color_by_id.pop(asset_id) + item.setData(None, ASSET_UNDERLINE_COLORS_ROLE) + + def set_underline_colors(self, colors_by_asset_id): + self.clear_underlines() + + for asset_id, colors in colors_by_asset_id.items(): + item = self._items_by_asset_id.get(asset_id) + if item is None: + continue + item.setData(colors, ASSET_UNDERLINE_COLORS_ROLE) + + def _on_docs_fetched(self): + # Make sure refreshing did not change + # - since this line is refreshing sequential and + # triggering of new refresh will happen when this method is done + if not self._refreshing: + root_item = self.invisibleRootItem() + root_item.removeRows(0, root_item.rowCount()) + self._items_by_asset_id = {} + self._items_with_color_by_id = {} + return + + # Collect asset documents as needed + asset_ids = set() + asset_docs_by_id = {} + asset_ids_by_parents = collections.defaultdict(set) + for asset_doc in self._doc_payload: + asset_id = asset_doc["_id"] + asset_data = asset_doc.get("data") or {} + parent_id = asset_data.get("visualParent") + asset_ids.add(asset_id) + asset_docs_by_id[asset_id] = asset_doc + asset_ids_by_parents[parent_id].add(asset_id) + + # Prepare removed asset ids + removed_asset_ids = ( + set(self._items_by_asset_id.keys()) - set(asset_docs_by_id.keys()) + ) + + # Prepare queue for adding new items + asset_items_queue = collections.deque() + + # Queue starts with root item and 'visualParent' None + root_item = self.invisibleRootItem() + asset_items_queue.append((None, root_item)) + + while asset_items_queue: + # Get item from queue + parent_id, parent_item = asset_items_queue.popleft() + # Skip if there are no children + children_ids = asset_ids_by_parents[parent_id] + if not children_ids: + continue + + # Go through current children of parent item + # - find out items that were deleted and skip creation of already + # existing items + for row in reversed(range(parent_item.rowCount())): + child_item = parent_item.child(row, 0) + asset_id = child_item.data(ASSET_ID_ROLE) + # Remove item that is not available + if asset_id not in children_ids: + if asset_id in removed_asset_ids: + # Remove and destroy row + parent_item.removeRow(row) + else: + # Just take the row from parent without destroying + parent_item.takeRow(row) + continue + + # Remove asset id from `children_ids` set + # - is used as set for creation of "new items" + children_ids.remove(asset_id) + # Add existing children to queue + asset_items_queue.append((asset_id, child_item)) + + new_items = [] + for asset_id in children_ids: + # Look for item in cache (maybe parent changed) + item = self._items_by_asset_id.get(asset_id) + # Create new item if was not found + if item is None: + item = QtGui.QStandardItem() + item.setEditable(False) + item.setData(asset_id, ASSET_ID_ROLE) + self._items_by_asset_id[asset_id] = item + new_items.append(item) + # Add item to queue + asset_items_queue.append((asset_id, item)) + + if new_items: + parent_item.appendRows(new_items) + + # Remove cache of removed items + for asset_id in removed_asset_ids: + self._items_by_asset_id.pop(asset_id) + if asset_id in self._items_with_color_by_id: + self._items_with_color_by_id.pop(asset_id) + + # Refresh data + # - all items refresh all data except id + for asset_id, item in self._items_by_asset_id.items(): + asset_doc = asset_docs_by_id[asset_id] + + asset_name = asset_doc["name"] + if item.data(ASSET_NAME_ROLE) != asset_name: + item.setData(asset_name, ASSET_NAME_ROLE) + + asset_data = asset_doc.get("data") or {} + asset_label = asset_data.get("label") or asset_name + if item.data(ASSET_LABEL_ROLE) != asset_label: + item.setData(asset_label, QtCore.Qt.DisplayRole) + item.setData(asset_label, ASSET_LABEL_ROLE) + + icon_color = asset_data.get("color") or style.colors.default + icon_name = asset_data.get("icon") + if not icon_name: + # Use default icons if no custom one is specified. + # If it has children show a full folder, otherwise + # show an open folder + if item.rowCount() > 0: + icon_name = "folder" + else: + icon_name = "folder-o" + + try: + # font-awesome key + full_icon_name = "fa.{0}".format(icon_name) + icon = qtawesome.icon(full_icon_name, color=icon_color) + item.setData(icon, QtCore.Qt.DecorationRole) + + except Exception: + pass + + self.refreshed.emit(bool(self._items_by_asset_id)) + + self._stop_fetch_thread() + + def _threaded_fetch(self): + asset_docs = self._fetch_asset_docs() + if not self._refreshing: + return + + self._doc_payload = asset_docs + + # Emit doc fetched only if was not stopped + self._doc_fetched.emit() + + def _fetch_asset_docs(self): + if not self.dbcon.Session.get("AVALON_PROJECT"): + return [] + + project_doc = self.dbcon.find_one( + {"type": "project"}, + {"_id": True} + ) + if not project_doc: + return [] + + # Get all assets sorted by name + return list(self.dbcon.find( + {"type": "asset"}, + self._asset_projection + )) + + def _stop_fetch_thread(self): + self._refreshing = False + if self._doc_fetching_thread is not None: + while self._doc_fetching_thread.isRunning(): + time.sleep(0.01) + self._doc_fetching_thread = None + + +class AssetsWidget(QtWidgets.QWidget): + """Base widget to display a tree of assets with filter. + + Assets have only one column and are sorted by name. + + Refreshing of assets happens in thread so calling 'refresh' method + is not sequential. To capture moment when refreshing is finished listen + to 'refreshed' signal. + + To capture selection changes listen to 'selection_changed' signal. It won't + send any information about new selection as it may be different based on + inheritance changes. + + Args: + dbcon (AvalonMongoDB): Connection to avalon mongo db. + parent (QWidget): Parent Qt widget. + """ + + # on model refresh + refresh_triggered = QtCore.Signal() + refreshed = QtCore.Signal() + # on view selection change + selection_changed = QtCore.Signal() + # It was double clicked on view + double_clicked = QtCore.Signal() + + def __init__(self, dbcon, parent=None): + super(AssetsWidget, self).__init__(parent=parent) + + self.dbcon = dbcon + + # Tree View + model = AssetModel(dbcon=self.dbcon, parent=self) + proxy = RecursiveSortFilterProxyModel() + proxy.setSourceModel(model) + proxy.setFilterCaseSensitivity(QtCore.Qt.CaseInsensitive) + proxy.setSortCaseSensitivity(QtCore.Qt.CaseInsensitive) + + view = AssetsView(self) + view.setModel(proxy) + + current_asset_icon = qtawesome.icon( + "fa.arrow-down", color=style.colors.light + ) + current_asset_btn = QtWidgets.QPushButton(self) + current_asset_btn.setIcon(current_asset_icon) + current_asset_btn.setToolTip("Go to Asset from current Session") + # Hide by default + current_asset_btn.setVisible(False) + + refresh_icon = qtawesome.icon("fa.refresh", color=style.colors.light) + refresh_btn = QtWidgets.QPushButton(self) + refresh_btn.setIcon(refresh_icon) + refresh_btn.setToolTip("Refresh items") + + filter_input = PlaceholderLineEdit(self) + filter_input.setPlaceholderText("Filter assets..") + + # Header + header_layout = QtWidgets.QHBoxLayout() + header_layout.addWidget(filter_input) + header_layout.addWidget(current_asset_btn) + header_layout.addWidget(refresh_btn) + + # Layout + layout = QtWidgets.QVBoxLayout(self) + layout.setContentsMargins(0, 0, 0, 0) + layout.setSpacing(4) + layout.addLayout(header_layout) + layout.addWidget(view) + + # Signals/Slots + filter_input.textChanged.connect(self._on_filter_text_change) + + selection_model = view.selectionModel() + selection_model.selectionChanged.connect(self._on_selection_change) + refresh_btn.clicked.connect(self.refresh) + current_asset_btn.clicked.connect(self.set_current_session_asset) + model.refreshed.connect(self._on_model_refresh) + view.doubleClicked.connect(self.double_clicked) + + self._current_asset_btn = current_asset_btn + self._model = model + self._proxy = proxy + self._view = view + + self.model_selection = {} + + @property + def refreshing(self): + return self._model.refreshing + + def refresh(self): + self._refresh_model() + + def stop_refresh(self): + self._model.stop_refresh() + + def set_current_session_asset(self): + asset_name = self.dbcon.Session.get("AVALON_ASSET") + if asset_name: + self.select_asset_by_name(asset_name) + + def set_current_asset_btn_visibility(self, visible=None): + """Hide set current asset button. + + Not all tools support using of current context asset. + """ + if visible is None: + visible = not self._current_asset_btn.isVisible() + self._current_asset_btn.setVisible(visible) + + def select_asset(self, asset_id): + index = self._model.get_index_by_asset_id(asset_id) + new_index = self._proxy.mapFromSource(index) + self._select_indexes([new_index]) + + def select_asset_by_name(self, asset_name): + index = self._model.get_index_by_asset_name(asset_name) + new_index = self._proxy.mapFromSource(index) + self._select_indexes([new_index]) + + def activate_flick_charm(self): + self._view.activate_flick_charm() + + def deactivate_flick_charm(self): + self._view.deactivate_flick_charm() + + def _on_selection_change(self): + self.selection_changed.emit() + + def _on_filter_text_change(self, new_text): + self._proxy.setFilterFixedString(new_text) + + def _on_model_refresh(self, has_item): + self._proxy.sort(0) + self._set_loading_state(loading=False, empty=not has_item) + self.refreshed.emit() + + def _refresh_model(self): + # Store selection + self._set_loading_state(loading=True, empty=True) + + # Trigger signal before refresh is called + self.refresh_triggered.emit() + # Refresh model + self._model.refresh() + + def _set_loading_state(self, loading, empty): + self._view.set_loading_state(loading, empty) + + def _select_indexes(self, indexes): + valid_indexes = [ + index + for index in indexes + if index.isValid() + ] + if not valid_indexes: + return + + selection_model = self._view.selectionModel() + selection_model.clearSelection() + + mode = selection_model.Select | selection_model.Rows + for index in valid_indexes: + self._view.expand(self._proxy.parent(index)) + selection_model.select(index, mode) + self._view.setCurrentIndex(valid_indexes[0]) + + +class SingleSelectAssetsWidget(AssetsWidget): + """Single selection asset widget. + + Contain single selection specific api methods. + """ + def get_selected_asset_id(self): + """Currently selected asset id.""" + selection_model = self._view.selectionModel() + indexes = selection_model.selectedRows() + for index in indexes: + return index.data(ASSET_ID_ROLE) + return None + + def get_selected_asset_name(self): + """Currently selected asset name.""" + selection_model = self._view.selectionModel() + indexes = selection_model.selectedRows() + for index in indexes: + return index.data(ASSET_NAME_ROLE) + return None + + +class MultiSelectAssetsWidget(AssetsWidget): + """Multiselection asset widget. + + Main purpose is for loader and library loader. If another tool would use + multiselection assets this widget should be split and loader's logic + separated. + """ + def __init__(self, *args, **kwargs): + super(MultiSelectAssetsWidget, self).__init__(*args, **kwargs) + self._view.setSelectionMode(QtWidgets.QTreeView.ExtendedSelection) + + delegate = UnderlinesAssetDelegate() + self._view.setItemDelegate(delegate) + self._delegate = delegate + + def get_selected_asset_ids(self): + """Currently selected asset ids.""" + selection_model = self._view.selectionModel() + indexes = selection_model.selectedRows() + return [ + index.data(ASSET_ID_ROLE) + for index in indexes + ] + + def get_selected_asset_names(self): + """Currently selected asset names.""" + selection_model = self._view.selectionModel() + indexes = selection_model.selectedRows() + return [ + index.data(ASSET_NAME_ROLE) + for index in indexes + ] + + def select_assets(self, asset_ids): + """Select assets by their ids. + + Args: + asset_ids (list): List of asset ids. + """ + indexes = self._model.get_indexes_by_asset_ids(asset_ids) + new_indexes = [ + self._proxy.mapFromSource(index) + for index in indexes + ] + self._select_indexes(new_indexes) + + def select_assets_by_name(self, asset_names): + """Select assets by their names. + + Args: + asset_names (list): List of asset names. + """ + indexes = self._model.get_indexes_by_asset_names(asset_names) + new_indexes = [ + self._proxy.mapFromSource(index) + for index in indexes + ] + self._select_indexes(new_indexes) + + def clear_underlines(self): + """Clear underlines in asset items.""" + self._model.clear_underlines() + + self._view.updateGeometries() + + def set_underline_colors(self, colors_by_asset_id): + """Change underline colors for passed assets. + + Args: + colors_by_asset_id (dict): Key is asset id and value is list + of underline colors. + """ + self._model.set_underline_colors(colors_by_asset_id) + # Trigger repaint + self._view.updateGeometries() diff --git a/openpype/tools/utils/constants.py b/openpype/tools/utils/constants.py index 5b6f4126c9..8f12c57321 100644 --- a/openpype/tools/utils/constants.py +++ b/openpype/tools/utils/constants.py @@ -5,10 +5,6 @@ DEFAULT_PROJECT_LABEL = "< Default >" PROJECT_NAME_ROLE = QtCore.Qt.UserRole + 101 PROJECT_IS_ACTIVE_ROLE = QtCore.Qt.UserRole + 102 -TASK_NAME_ROLE = QtCore.Qt.UserRole + 301 -TASK_TYPE_ROLE = QtCore.Qt.UserRole + 302 -TASK_ORDER_ROLE = QtCore.Qt.UserRole + 403 - LOCAL_PROVIDER_ROLE = QtCore.Qt.UserRole + 500 # provider of active site REMOTE_PROVIDER_ROLE = QtCore.Qt.UserRole + 501 # provider of remote site LOCAL_PROGRESS_ROLE = QtCore.Qt.UserRole + 502 # percentage downld on active diff --git a/openpype/tools/utils/delegates.py b/openpype/tools/utils/delegates.py index 96353c44c6..1caed732d8 100644 --- a/openpype/tools/utils/delegates.py +++ b/openpype/tools/utils/delegates.py @@ -8,10 +8,7 @@ from Qt import QtWidgets, QtGui, QtCore from avalon.lib import HeroVersionType from openpype.style import get_objected_colors -from .models import ( - AssetModel, - TreeModel -) +from .models import TreeModel from . import lib if Qt.__binding__ == "PySide": @@ -22,173 +19,6 @@ elif Qt.__binding__ == "PyQt4": log = logging.getLogger(__name__) -class AssetDelegate(QtWidgets.QItemDelegate): - bar_height = 3 - - def __init__(self, *args, **kwargs): - super(AssetDelegate, self).__init__(*args, **kwargs) - asset_view_colors = get_objected_colors()["loader"]["asset-view"] - self._selected_color = ( - asset_view_colors["selected"].get_qcolor() - ) - self._hover_color = ( - asset_view_colors["hover"].get_qcolor() - ) - self._selected_hover_color = ( - asset_view_colors["selected-hover"].get_qcolor() - ) - - def sizeHint(self, option, index): - result = super(AssetDelegate, self).sizeHint(option, index) - height = result.height() - result.setHeight(height + self.bar_height) - - return result - - def paint(self, painter, option, index): - # Qt4 compat - if Qt.__binding__ in ("PySide", "PyQt4"): - option = QStyleOptionViewItemV4(option) - - painter.save() - - item_rect = QtCore.QRect(option.rect) - item_rect.setHeight(option.rect.height() - self.bar_height) - - subset_colors = index.data(AssetModel.subsetColorsRole) - subset_colors_width = 0 - if subset_colors: - subset_colors_width = option.rect.width() / len(subset_colors) - - subset_rects = [] - counter = 0 - for subset_c in subset_colors: - new_color = None - new_rect = None - if subset_c: - new_color = QtGui.QColor(*subset_c) - - new_rect = QtCore.QRect( - option.rect.left() + (counter * subset_colors_width), - option.rect.top() + ( - option.rect.height() - self.bar_height - ), - subset_colors_width, - self.bar_height - ) - subset_rects.append((new_color, new_rect)) - counter += 1 - - # Background - if option.state & QtWidgets.QStyle.State_Selected: - if len(subset_colors) == 0: - item_rect.setTop(item_rect.top() + (self.bar_height / 2)) - - if option.state & QtWidgets.QStyle.State_MouseOver: - bg_color = self._selected_hover_color - else: - bg_color = self._selected_color - else: - item_rect.setTop(item_rect.top() + (self.bar_height / 2)) - if option.state & QtWidgets.QStyle.State_MouseOver: - bg_color = self._hover_color - else: - bg_color = QtGui.QColor() - bg_color.setAlpha(0) - - # When not needed to do a rounded corners (easier and without - # painter restore): - # painter.fillRect( - # item_rect, - # QtGui.QBrush(bg_color) - # ) - pen = painter.pen() - pen.setStyle(QtCore.Qt.NoPen) - pen.setWidth(0) - painter.setPen(pen) - painter.setBrush(QtGui.QBrush(bg_color)) - painter.drawRoundedRect(option.rect, 3, 3) - - if option.state & QtWidgets.QStyle.State_Selected: - for color, subset_rect in subset_rects: - if not color or not subset_rect: - continue - painter.fillRect(subset_rect, QtGui.QBrush(color)) - - painter.restore() - painter.save() - - # Icon - icon_index = index.model().index( - index.row(), index.column(), index.parent() - ) - # - Default icon_rect if not icon - icon_rect = QtCore.QRect( - item_rect.left(), - item_rect.top(), - # To make sure it's same size all the time - option.rect.height() - self.bar_height, - option.rect.height() - self.bar_height - ) - icon = index.model().data(icon_index, QtCore.Qt.DecorationRole) - - if icon: - mode = QtGui.QIcon.Normal - if not (option.state & QtWidgets.QStyle.State_Enabled): - mode = QtGui.QIcon.Disabled - elif option.state & QtWidgets.QStyle.State_Selected: - mode = QtGui.QIcon.Selected - - if isinstance(icon, QtGui.QPixmap): - icon = QtGui.QIcon(icon) - option.decorationSize = icon.size() / icon.devicePixelRatio() - - elif isinstance(icon, QtGui.QColor): - pixmap = QtGui.QPixmap(option.decorationSize) - pixmap.fill(icon) - icon = QtGui.QIcon(pixmap) - - elif isinstance(icon, QtGui.QImage): - icon = QtGui.QIcon(QtGui.QPixmap.fromImage(icon)) - option.decorationSize = icon.size() / icon.devicePixelRatio() - - elif isinstance(icon, QtGui.QIcon): - state = QtGui.QIcon.Off - if option.state & QtWidgets.QStyle.State_Open: - state = QtGui.QIcon.On - actualSize = option.icon.actualSize( - option.decorationSize, mode, state - ) - option.decorationSize = QtCore.QSize( - min(option.decorationSize.width(), actualSize.width()), - min(option.decorationSize.height(), actualSize.height()) - ) - - state = QtGui.QIcon.Off - if option.state & QtWidgets.QStyle.State_Open: - state = QtGui.QIcon.On - - icon.paint( - painter, icon_rect, - QtCore.Qt.AlignLeft, mode, state - ) - - # Text - text_rect = QtCore.QRect( - icon_rect.left() + icon_rect.width() + 2, - item_rect.top(), - item_rect.width(), - item_rect.height() - ) - - painter.drawText( - text_rect, QtCore.Qt.AlignVCenter, - index.data(QtCore.Qt.DisplayRole) - ) - - painter.restore() - - class VersionDelegate(QtWidgets.QStyledItemDelegate): """A delegate that display version integer formatted as version string.""" diff --git a/openpype/tools/utils/host_tools.py b/openpype/tools/utils/host_tools.py index a15d12b386..ef1cd3cf5c 100644 --- a/openpype/tools/utils/host_tools.py +++ b/openpype/tools/utils/host_tools.py @@ -62,19 +62,18 @@ class HostToolsHelper: save = True workfiles_tool = self.get_workfiles_tool(parent) - if use_context: - context = { - "asset": avalon.api.Session["AVALON_ASSET"], - "silo": avalon.api.Session["AVALON_SILO"], - "task": avalon.api.Session["AVALON_TASK"] - } - workfiles_tool.set_context(context) + workfiles_tool.set_save_enabled(save) - if save: - workfiles_tool.set_save_enabled(save) + if not workfiles_tool.isVisible(): + workfiles_tool.show() + + if use_context: + context = { + "asset": avalon.api.Session["AVALON_ASSET"], + "task": avalon.api.Session["AVALON_TASK"] + } + workfiles_tool.set_context(context) - workfiles_tool.refresh() - workfiles_tool.show() # Pull window to the front. workfiles_tool.raise_() workfiles_tool.activateWindow() @@ -109,23 +108,19 @@ class HostToolsHelper: def get_creator_tool(self, parent): """Create, cache and return creator tool window.""" if self._creator_tool is None: - from avalon.tools.creator.app import Window + from openpype.tools.creator import CreatorWindow - creator_window = Window(parent=parent or self._parent) + creator_window = CreatorWindow(parent=parent or self._parent) self._creator_tool = creator_window return self._creator_tool def show_creator(self, parent=None): """Show tool to create new instantes for publishing.""" - from avalon import style - creator_tool = self.get_creator_tool(parent) creator_tool.refresh() creator_tool.show() - creator_tool.setStyleSheet(style.load_stylesheet()) - # Pull window to the front. creator_tool.raise_() creator_tool.activateWindow() @@ -133,22 +128,20 @@ class HostToolsHelper: def get_subset_manager_tool(self, parent): """Create, cache and return subset manager tool window.""" if self._subset_manager_tool is None: - from avalon.tools.subsetmanager import Window + from openpype.tools.subsetmanager import SubsetManagerWindow - subset_manager_window = Window(parent=parent or self._parent) + subset_manager_window = SubsetManagerWindow( + parent=parent or self._parent + ) self._subset_manager_tool = subset_manager_window return self._subset_manager_tool def show_subset_manager(self, parent=None): """Show tool display/remove existing created instances.""" - from avalon import style - subset_manager_tool = self.get_subset_manager_tool(parent) subset_manager_tool.show() - subset_manager_tool.setStyleSheet(style.load_stylesheet()) - # Pull window to the front. subset_manager_tool.raise_() subset_manager_tool.activateWindow() @@ -156,21 +149,20 @@ class HostToolsHelper: def get_scene_inventory_tool(self, parent): """Create, cache and return scene inventory tool window.""" if self._scene_inventory_tool is None: - from avalon.tools.sceneinventory.app import Window + from openpype.tools.sceneinventory import SceneInventoryWindow - scene_inventory_window = Window(parent=parent or self._parent) + scene_inventory_window = SceneInventoryWindow( + parent=parent or self._parent + ) self._scene_inventory_tool = scene_inventory_window return self._scene_inventory_tool def show_scene_inventory(self, parent=None): """Show tool maintain loaded containers.""" - from avalon import style - scene_inventory_tool = self.get_scene_inventory_tool(parent) scene_inventory_tool.show() scene_inventory_tool.refresh() - scene_inventory_tool.setStyleSheet(style.load_stylesheet()) # Pull window to the front. scene_inventory_tool.raise_() diff --git a/openpype/tools/utils/lib.py b/openpype/tools/utils/lib.py index e2815f26e4..6742df8557 100644 --- a/openpype/tools/utils/lib.py +++ b/openpype/tools/utils/lib.py @@ -25,6 +25,34 @@ def center_window(window): window.move(geo.topLeft()) +def paint_image_with_color(image, color): + """Redraw image with single color using it's alpha. + + It is expected that input image is singlecolor image with alpha. + + Args: + image (QImage): Loaded image with alpha. + color (QColor): Color that will be used to paint image. + """ + width = image.width() + height = image.height() + + alpha_mask = image.createAlphaMask() + alpha_region = QtGui.QRegion(QtGui.QBitmap.fromImage(alpha_mask)) + + pixmap = QtGui.QPixmap(width, height) + pixmap.fill(QtCore.Qt.transparent) + + painter = QtGui.QPainter(pixmap) + painter.setClipRegion(alpha_region) + painter.setPen(QtCore.Qt.NoPen) + painter.setBrush(color) + painter.drawRect(QtCore.QRect(0, 0, width, height)) + painter.end() + + return pixmap + + def format_version(value, hero_version=False): """Formats integer to displayable version name""" label = "v{0:03d}".format(value) @@ -445,6 +473,30 @@ class GroupsConfig: return ordered_groups, subset_docs_without_group, subset_docs_by_group +class DynamicQThread(QtCore.QThread): + """QThread which can run any function with argument and kwargs. + + Args: + func (function): Function which will be called. + args (tuple): Arguments which will be passed to function. + kwargs (tuple): Keyword arguments which will be passed to function. + parent (QObject): Parent of thread. + """ + def __init__(self, func, args=None, kwargs=None, parent=None): + super(DynamicQThread, self).__init__(parent) + if args is None: + args = tuple() + if kwargs is None: + kwargs = {} + self._func = func + self._args = args + self._kwargs = kwargs + + def run(self): + """Execute the function with arguments.""" + self._func(*self._args, **self._kwargs) + + def create_qthread(func, *args, **kwargs): class Thread(QtCore.QThread): def run(self): @@ -453,6 +505,7 @@ def create_qthread(func, *args, **kwargs): def get_repre_icons(): + """Returns a dict {'provider_name': QIcon}""" try: from openpype_modules import sync_server except Exception: @@ -464,9 +517,17 @@ def get_repre_icons(): "providers", "resources" ) icons = {} - # TODO get from sync module - for provider in ['studio', 'local_drive', 'gdrive']: - pix_url = "{}/{}.png".format(resource_path, provider) + if not os.path.exists(resource_path): + print("No icons for Site Sync found") + return {} + + for file_name in os.listdir(resource_path): + if file_name and not file_name.endswith("png"): + continue + + provider, _ = os.path.splitext(file_name) + + pix_url = os.path.join(resource_path, file_name) icons[provider] = QtGui.QIcon(pix_url) return icons diff --git a/openpype/tools/utils/models.py b/openpype/tools/utils/models.py index c488743f36..df3eee41a2 100644 --- a/openpype/tools/utils/models.py +++ b/openpype/tools/utils/models.py @@ -1,7 +1,5 @@ import re -import time import logging -import collections import Qt from Qt import QtCore, QtGui @@ -11,10 +9,7 @@ from . import lib from .constants import ( PROJECT_IS_ACTIVE_ROLE, PROJECT_NAME_ROLE, - DEFAULT_PROJECT_LABEL, - TASK_ORDER_ROLE, - TASK_TYPE_ROLE, - TASK_NAME_ROLE + DEFAULT_PROJECT_LABEL ) log = logging.getLogger(__name__) @@ -203,283 +198,6 @@ class Item(dict): self._children.append(child) -class AssetModel(TreeModel): - """A model listing assets in the silo in the active project. - - The assets are displayed in a treeview, they are visually parented by - a `visualParent` field in the database containing an `_id` to a parent - asset. - - """ - - Columns = ["label"] - Name = 0 - Deprecated = 2 - ObjectId = 3 - - DocumentRole = QtCore.Qt.UserRole + 2 - ObjectIdRole = QtCore.Qt.UserRole + 3 - subsetColorsRole = QtCore.Qt.UserRole + 4 - - doc_fetched = QtCore.Signal(bool) - refreshed = QtCore.Signal(bool) - - # Asset document projection - asset_projection = { - "type": 1, - "schema": 1, - "name": 1, - "silo": 1, - "data.visualParent": 1, - "data.label": 1, - "data.tags": 1, - "data.icon": 1, - "data.color": 1, - "data.deprecated": 1 - } - - def __init__(self, dbcon=None, parent=None, asset_projection=None): - super(AssetModel, self).__init__(parent=parent) - if dbcon is None: - dbcon = io - self.dbcon = dbcon - self.asset_colors = {} - - # Projections for Mongo queries - # - let ability to modify them if used in tools that require more than - # defaults - if asset_projection: - self.asset_projection = asset_projection - - self.asset_projection = asset_projection - - self._doc_fetching_thread = None - self._doc_fetching_stop = False - self._doc_payload = {} - - self.doc_fetched.connect(self.on_doc_fetched) - - self.refresh() - - def _add_hierarchy(self, assets, parent=None, silos=None): - """Add the assets that are related to the parent as children items. - - This method does *not* query the database. These instead are queried - in a single batch upfront as an optimization to reduce database - queries. Resulting in up to 10x speed increase. - - Args: - assets (dict): All assets in the currently active silo stored - by key/value - - Returns: - None - - """ - # Reset colors - self.asset_colors = {} - - if silos: - # WARNING: Silo item "_id" is set to silo value - # mainly because GUI issue with perserve selection and expanded row - # and because of easier hierarchy parenting (in "assets") - for silo in silos: - item = Item({ - "_id": silo, - "name": silo, - "label": silo, - "type": "silo" - }) - self.add_child(item, parent=parent) - self._add_hierarchy(assets, parent=item) - - parent_id = parent["_id"] if parent else None - current_assets = assets.get(parent_id, list()) - - for asset in current_assets: - # get label from data, otherwise use name - data = asset.get("data", {}) - label = data.get("label", asset["name"]) - tags = data.get("tags", []) - - # store for the asset for optimization - deprecated = "deprecated" in tags - - item = Item({ - "_id": asset["_id"], - "name": asset["name"], - "label": label, - "type": asset["type"], - "tags": ", ".join(tags), - "deprecated": deprecated, - "_document": asset - }) - self.add_child(item, parent=parent) - - # Add asset's children recursively if it has children - if asset["_id"] in assets: - self._add_hierarchy(assets, parent=item) - - self.asset_colors[asset["_id"]] = [] - - def on_doc_fetched(self, was_stopped): - if was_stopped: - self.stop_fetch_thread() - return - - self.beginResetModel() - - assets_by_parent = self._doc_payload.get("assets_by_parent") - silos = self._doc_payload.get("silos") - if assets_by_parent is not None: - # Build the hierarchical tree items recursively - self._add_hierarchy( - assets_by_parent, - parent=None, - silos=silos - ) - - self.endResetModel() - - has_content = bool(assets_by_parent) or bool(silos) - self.refreshed.emit(has_content) - - self.stop_fetch_thread() - - def fetch(self): - self._doc_payload = self._fetch() or {} - # Emit doc fetched only if was not stopped - self.doc_fetched.emit(self._doc_fetching_stop) - - def _fetch(self): - if not self.dbcon.Session.get("AVALON_PROJECT"): - return - - project_doc = self.dbcon.find_one( - {"type": "project"}, - {"_id": True} - ) - if not project_doc: - return - - # Get all assets sorted by name - db_assets = self.dbcon.find( - {"type": "asset"}, - self.asset_projection - ).sort("name", 1) - - # Group the assets by their visual parent's id - assets_by_parent = collections.defaultdict(list) - for asset in db_assets: - if self._doc_fetching_stop: - return - parent_id = asset.get("data", {}).get("visualParent") - assets_by_parent[parent_id].append(asset) - - return { - "assets_by_parent": assets_by_parent, - "silos": None - } - - def stop_fetch_thread(self): - if self._doc_fetching_thread is not None: - self._doc_fetching_stop = True - while self._doc_fetching_thread.isRunning(): - time.sleep(0.001) - self._doc_fetching_thread = None - - def refresh(self, force=False): - """Refresh the data for the model.""" - # Skip fetch if there is already other thread fetching documents - if self._doc_fetching_thread is not None: - if not force: - return - self.stop_fetch_thread() - - # Clear model items - self.clear() - - # Fetch documents from mongo - # Restart payload - self._doc_payload = {} - self._doc_fetching_stop = False - self._doc_fetching_thread = lib.create_qthread(self.fetch) - self._doc_fetching_thread.start() - - def flags(self, index): - return QtCore.Qt.ItemIsEnabled | QtCore.Qt.ItemIsSelectable - - def setData(self, index, value, role=QtCore.Qt.EditRole): - if not index.isValid(): - return False - - if role == self.subsetColorsRole: - asset_id = index.data(self.ObjectIdRole) - self.asset_colors[asset_id] = value - - if Qt.__binding__ in ("PyQt4", "PySide"): - self.dataChanged.emit(index, index) - else: - self.dataChanged.emit(index, index, [role]) - - return True - - return super(AssetModel, self).setData(index, value, role) - - def data(self, index, role): - if not index.isValid(): - return - - item = index.internalPointer() - if role == QtCore.Qt.DecorationRole: - column = index.column() - if column == self.Name: - # Allow a custom icon and custom icon color to be defined - data = item.get("_document", {}).get("data", {}) - icon = data.get("icon", None) - if icon is None and item.get("type") == "silo": - icon = "database" - color = data.get("color", style.colors.default) - - if icon is None: - # Use default icons if no custom one is specified. - # If it has children show a full folder, otherwise - # show an open folder - has_children = self.rowCount(index) > 0 - icon = "folder" if has_children else "folder-o" - - # Make the color darker when the asset is deprecated - if item.get("deprecated", False): - color = QtGui.QColor(color).darker(250) - - try: - key = "fa.{0}".format(icon) # font-awesome key - icon = qtawesome.icon(key, color=color) - return icon - except Exception as exception: - # Log an error message instead of erroring out completely - # when the icon couldn't be created (e.g. invalid name) - log.error(exception) - - return - - if role == QtCore.Qt.ForegroundRole: # font color - if "deprecated" in item.get("tags", []): - return QtGui.QColor(style.colors.light).darker(250) - - if role == self.ObjectIdRole: - return item.get("_id", None) - - if role == self.DocumentRole: - return item.get("_document", None) - - if role == self.subsetColorsRole: - asset_id = item.get("_id", None) - return self.asset_colors.get(asset_id) or [] - - return super(AssetModel, self).data(index, role) - - class RecursiveSortFilterProxyModel(QtCore.QSortFilterProxyModel): """Filters to the regex if any of the children matches allow parent""" def filterAcceptsRow(self, row, parent): @@ -654,163 +372,3 @@ class ProjectSortFilterProxy(QtCore.QSortFilterProxyModel): def set_filter_enabled(self, value): self._filter_enabled = value self.invalidateFilter() - - -class TasksModel(QtGui.QStandardItemModel): - """A model listing the tasks combined for a list of assets""" - def __init__(self, dbcon, parent=None): - super(TasksModel, self).__init__(parent=parent) - self.dbcon = dbcon - self._default_icon = qtawesome.icon( - "fa.male", - color=style.colors.default - ) - self._no_tasks_icon = qtawesome.icon( - "fa.exclamation-circle", - color=style.colors.mid - ) - self._cached_icons = {} - self._project_task_types = {} - - self._last_asset_id = None - - self.refresh() - - def refresh(self): - if self.dbcon.Session.get("AVALON_PROJECT"): - self._refresh_task_types() - self.set_asset_id(self._last_asset_id) - else: - self.clear() - - def _refresh_task_types(self): - # Get the project configured icons from database - project = self.dbcon.find_one( - {"type": "project"}, - {"config.tasks"} - ) - tasks = project["config"].get("tasks") or {} - self._project_task_types = tasks - - def _try_get_awesome_icon(self, icon_name): - icon = None - if icon_name: - try: - icon = qtawesome.icon( - "fa.{}".format(icon_name), - color=style.colors.default - ) - - except Exception: - pass - return icon - - def headerData(self, section, orientation, role): - # Show nice labels in the header - if ( - role == QtCore.Qt.DisplayRole - and orientation == QtCore.Qt.Horizontal - ): - if section == 0: - return "Tasks" - - return super(TasksModel, self).headerData(section, orientation, role) - - def _get_icon(self, task_icon, task_type_icon): - if task_icon in self._cached_icons: - return self._cached_icons[task_icon] - - icon = self._try_get_awesome_icon(task_icon) - if icon is not None: - self._cached_icons[task_icon] = icon - return icon - - if task_type_icon in self._cached_icons: - icon = self._cached_icons[task_type_icon] - self._cached_icons[task_icon] = icon - return icon - - icon = self._try_get_awesome_icon(task_type_icon) - if icon is None: - icon = self._default_icon - - self._cached_icons[task_icon] = icon - self._cached_icons[task_type_icon] = icon - - return icon - - def set_asset_id(self, asset_id): - asset_doc = None - if asset_id: - asset_doc = self.dbcon.find_one( - {"_id": asset_id}, - {"data.tasks": True} - ) - self.set_asset(asset_doc) - - def set_asset(self, asset_doc): - """Set assets to track by their database id - - Arguments: - asset_doc (dict): Asset document from MongoDB. - """ - self.clear() - - if not asset_doc: - self._last_asset_id = None - return - - self._last_asset_id = asset_doc["_id"] - - asset_tasks = asset_doc.get("data", {}).get("tasks") or {} - items = [] - for task_name, task_info in asset_tasks.items(): - task_icon = task_info.get("icon") - task_type = task_info.get("type") - task_order = task_info.get("order") - task_type_info = self._project_task_types.get(task_type) or {} - task_type_icon = task_type_info.get("icon") - icon = self._get_icon(task_icon, task_type_icon) - - label = "{} ({})".format(task_name, task_type or "type N/A") - item = QtGui.QStandardItem(label) - item.setData(task_name, TASK_NAME_ROLE) - item.setData(task_type, TASK_TYPE_ROLE) - item.setData(task_order, TASK_ORDER_ROLE) - item.setData(icon, QtCore.Qt.DecorationRole) - item.setFlags(QtCore.Qt.ItemIsEnabled | QtCore.Qt.ItemIsSelectable) - items.append(item) - - if not items: - item = QtGui.QStandardItem("No task") - item.setData(self._no_tasks_icon, QtCore.Qt.DecorationRole) - item.setFlags(QtCore.Qt.NoItemFlags) - items.append(item) - - self.invisibleRootItem().appendRows(items) - - -class TasksProxyModel(QtCore.QSortFilterProxyModel): - def lessThan(self, x_index, y_index): - x_order = x_index.data(TASK_ORDER_ROLE) - y_order = y_index.data(TASK_ORDER_ROLE) - if x_order is not None and y_order is not None: - if x_order < y_order: - return True - if x_order > y_order: - return False - - elif x_order is None and y_order is not None: - return True - - elif y_order is None and x_order is not None: - return False - - x_name = x_index.data(QtCore.Qt.DisplayRole) - y_name = y_index.data(QtCore.Qt.DisplayRole) - if x_name == y_name: - return True - - if x_name == tuple(sorted((x_name, y_name)))[0]: - return True - return False diff --git a/openpype/tools/utils/tasks_widget.py b/openpype/tools/utils/tasks_widget.py new file mode 100644 index 0000000000..419e77c780 --- /dev/null +++ b/openpype/tools/utils/tasks_widget.py @@ -0,0 +1,295 @@ +from Qt import QtWidgets, QtCore, QtGui + +from avalon import style +from avalon.vendor import qtawesome + +from .views import DeselectableTreeView + + +TASK_NAME_ROLE = QtCore.Qt.UserRole + 1 +TASK_TYPE_ROLE = QtCore.Qt.UserRole + 2 +TASK_ORDER_ROLE = QtCore.Qt.UserRole + 3 + + +class TasksModel(QtGui.QStandardItemModel): + """A model listing the tasks combined for a list of assets""" + def __init__(self, dbcon, parent=None): + super(TasksModel, self).__init__(parent=parent) + self.dbcon = dbcon + self.setHeaderData( + 0, QtCore.Qt.Horizontal, "Tasks", QtCore.Qt.DisplayRole + ) + self._default_icon = qtawesome.icon( + "fa.male", + color=style.colors.default + ) + self._no_tasks_icon = qtawesome.icon( + "fa.exclamation-circle", + color=style.colors.mid + ) + self._cached_icons = {} + self._project_task_types = {} + + self._empty_tasks_item = None + self._last_asset_id = None + self._loaded_project_name = None + + def _context_is_valid(self): + if self.dbcon.Session.get("AVALON_PROJECT"): + return True + return False + + def refresh(self): + self._refresh_task_types() + self.set_asset_id(self._last_asset_id) + + def _refresh_task_types(self): + # Get the project configured icons from database + task_types = {} + if self._context_is_valid(): + project = self.dbcon.find_one( + {"type": "project"}, + {"config.tasks"} + ) + task_types = project["config"].get("tasks") or task_types + self._project_task_types = task_types + + def _try_get_awesome_icon(self, icon_name): + icon = None + if icon_name: + try: + icon = qtawesome.icon( + "fa.{}".format(icon_name), + color=style.colors.default + ) + + except Exception: + pass + return icon + + def headerData(self, section, orientation, role=None): + if role is None: + role = QtCore.Qt.EditRole + # Show nice labels in the header + if section == 0: + if ( + role in (QtCore.Qt.DisplayRole, QtCore.Qt.EditRole) + and orientation == QtCore.Qt.Horizontal + ): + return "Tasks" + + return super(TasksModel, self).headerData(section, orientation, role) + + def _get_icon(self, task_icon, task_type_icon): + if task_icon in self._cached_icons: + return self._cached_icons[task_icon] + + icon = self._try_get_awesome_icon(task_icon) + if icon is not None: + self._cached_icons[task_icon] = icon + return icon + + if task_type_icon in self._cached_icons: + icon = self._cached_icons[task_type_icon] + self._cached_icons[task_icon] = icon + return icon + + icon = self._try_get_awesome_icon(task_type_icon) + if icon is None: + icon = self._default_icon + + self._cached_icons[task_icon] = icon + self._cached_icons[task_type_icon] = icon + + return icon + + def set_asset_id(self, asset_id): + asset_doc = None + if self._context_is_valid(): + asset_doc = self.dbcon.find_one( + {"_id": asset_id}, + {"data.tasks": True} + ) + self._set_asset(asset_doc) + + def _get_empty_task_item(self): + if self._empty_tasks_item is None: + item = QtGui.QStandardItem("No task") + item.setData(self._no_tasks_icon, QtCore.Qt.DecorationRole) + item.setFlags(QtCore.Qt.NoItemFlags) + self._empty_tasks_item = item + return self._empty_tasks_item + + def _set_asset(self, asset_doc): + """Set assets to track by their database id + + Arguments: + asset_doc (dict): Asset document from MongoDB. + """ + asset_tasks = {} + self._last_asset_id = None + if asset_doc: + asset_tasks = asset_doc.get("data", {}).get("tasks") or {} + self._last_asset_id = asset_doc["_id"] + + root_item = self.invisibleRootItem() + root_item.removeRows(0, root_item.rowCount()) + + items = [] + for task_name, task_info in asset_tasks.items(): + task_icon = task_info.get("icon") + task_type = task_info.get("type") + task_order = task_info.get("order") + task_type_info = self._project_task_types.get(task_type) or {} + task_type_icon = task_type_info.get("icon") + icon = self._get_icon(task_icon, task_type_icon) + + label = "{} ({})".format(task_name, task_type or "type N/A") + item = QtGui.QStandardItem(label) + item.setData(task_name, TASK_NAME_ROLE) + item.setData(task_type, TASK_TYPE_ROLE) + item.setData(task_order, TASK_ORDER_ROLE) + item.setData(icon, QtCore.Qt.DecorationRole) + item.setFlags(QtCore.Qt.ItemIsEnabled | QtCore.Qt.ItemIsSelectable) + items.append(item) + + if not items: + item = QtGui.QStandardItem("No task") + item.setData(self._no_tasks_icon, QtCore.Qt.DecorationRole) + item.setFlags(QtCore.Qt.NoItemFlags) + items.append(item) + + root_item.appendRows(items) + + +class TasksProxyModel(QtCore.QSortFilterProxyModel): + def lessThan(self, x_index, y_index): + x_order = x_index.data(TASK_ORDER_ROLE) + y_order = y_index.data(TASK_ORDER_ROLE) + if x_order is not None and y_order is not None: + if x_order < y_order: + return True + if x_order > y_order: + return False + + elif x_order is None and y_order is not None: + return True + + elif y_order is None and x_order is not None: + return False + + x_name = x_index.data(QtCore.Qt.DisplayRole) + y_name = y_index.data(QtCore.Qt.DisplayRole) + if x_name == y_name: + return True + + if x_name == tuple(sorted((x_name, y_name)))[0]: + return True + return False + + +class TasksWidget(QtWidgets.QWidget): + """Widget showing active Tasks""" + + task_changed = QtCore.Signal() + + def __init__(self, dbcon, parent=None): + super(TasksWidget, self).__init__(parent) + + tasks_view = DeselectableTreeView(self) + tasks_view.setIndentation(0) + tasks_view.setSortingEnabled(True) + tasks_view.setEditTriggers(QtWidgets.QTreeView.NoEditTriggers) + + header_view = tasks_view.header() + header_view.setSortIndicator(0, QtCore.Qt.AscendingOrder) + + tasks_model = TasksModel(dbcon) + tasks_proxy = TasksProxyModel() + tasks_proxy.setSourceModel(tasks_model) + tasks_view.setModel(tasks_proxy) + + layout = QtWidgets.QVBoxLayout(self) + layout.setContentsMargins(0, 0, 0, 0) + layout.addWidget(tasks_view) + + selection_model = tasks_view.selectionModel() + selection_model.selectionChanged.connect(self._on_task_change) + + self._tasks_model = tasks_model + self._tasks_proxy = tasks_proxy + self._tasks_view = tasks_view + + self._last_selected_task_name = None + + def refresh(self): + self._tasks_model.refresh() + + def set_asset_id(self, asset_id): + # Try and preserve the last selected task and reselect it + # after switching assets. If there's no currently selected + # asset keep whatever the "last selected" was prior to it. + current = self.get_selected_task_name() + if current: + self._last_selected_task_name = current + + self._tasks_model.set_asset_id(asset_id) + + if self._last_selected_task_name: + self.select_task_name(self._last_selected_task_name) + + # Force a task changed emit. + self.task_changed.emit() + + def select_task_name(self, task_name): + """Select a task by name. + + If the task does not exist in the current model then selection is only + cleared. + + Args: + task (str): Name of the task to select. + + """ + task_view_model = self._tasks_view.model() + if not task_view_model: + return + + # Clear selection + selection_model = self._tasks_view.selectionModel() + selection_model.clearSelection() + + # Select the task + mode = selection_model.Select | selection_model.Rows + for row in range(task_view_model.rowCount()): + index = task_view_model.index(row, 0) + name = index.data(TASK_NAME_ROLE) + if name == task_name: + selection_model.select(index, mode) + + # Set the currently active index + self._tasks_view.setCurrentIndex(index) + break + + def get_selected_task_name(self): + """Return name of task at current index (selected) + + Returns: + str: Name of the current task. + + """ + index = self._tasks_view.currentIndex() + selection_model = self._tasks_view.selectionModel() + if index.isValid() and selection_model.isSelected(index): + return index.data(TASK_NAME_ROLE) + return None + + def get_selected_task_type(self): + index = self._tasks_view.currentIndex() + selection_model = self._tasks_view.selectionModel() + if index.isValid() and selection_model.isSelected(index): + return index.data(TASK_TYPE_ROLE) + return None + + def _on_task_change(self): + self.task_changed.emit() diff --git a/openpype/tools/utils/views.py b/openpype/tools/utils/views.py index 89e49fe142..97aaf622a4 100644 --- a/openpype/tools/utils/views.py +++ b/openpype/tools/utils/views.py @@ -61,26 +61,3 @@ class TreeViewSpinner(QtWidgets.QTreeView): self.paint_empty(event) else: super(TreeViewSpinner, self).paintEvent(event) - - -class AssetsView(TreeViewSpinner, DeselectableTreeView): - """Item view. - This implements a context menu. - """ - - def __init__(self, parent=None): - super(AssetsView, self).__init__(parent) - self.setIndentation(15) - self.setContextMenuPolicy(QtCore.Qt.CustomContextMenu) - self.setHeaderHidden(True) - - def mousePressEvent(self, event): - index = self.indexAt(event.pos()) - if not index.isValid(): - modifiers = QtWidgets.QApplication.keyboardModifiers() - if modifiers == QtCore.Qt.ShiftModifier: - return - elif modifiers == QtCore.Qt.ControlModifier: - return - - super(AssetsView, self).mousePressEvent(event) diff --git a/openpype/tools/utils/widgets.py b/openpype/tools/utils/widgets.py index 15bcbeff90..009c1dc506 100644 --- a/openpype/tools/utils/widgets.py +++ b/openpype/tools/utils/widgets.py @@ -1,305 +1,59 @@ import logging -import time - -from . import lib from Qt import QtWidgets, QtCore, QtGui + from avalon.vendor import qtawesome, qargparse - -from avalon import style - -from .models import AssetModel, RecursiveSortFilterProxyModel -from .views import AssetsView -from .delegates import AssetDelegate +from openpype.style import get_objected_colors log = logging.getLogger(__name__) -class AssetWidget(QtWidgets.QWidget): - """A Widget to display a tree of assets with filter +class PlaceholderLineEdit(QtWidgets.QLineEdit): + """Set placeholder color of QLineEdit in Qt 5.12 and higher.""" + def __init__(self, *args, **kwargs): + super(PlaceholderLineEdit, self).__init__(*args, **kwargs) + self._first_show = True - To list the assets of the active project: - >>> # widget = AssetWidget() - >>> # widget.refresh() - >>> # widget.show() + def showEvent(self, event): + super(PlaceholderLineEdit, self).showEvent(event) + if self._first_show: + self._first_show = False + filter_palette = self.palette() + if hasattr(filter_palette, "PlaceholderText"): + color_obj = get_objected_colors()["font"] + color = color_obj.get_qcolor() + color.setAlpha(67) + filter_palette.setColor( + filter_palette.PlaceholderText, + color + ) + self.setPalette(filter_palette) + +class ImageButton(QtWidgets.QPushButton): + """PushButton with icon and size of font. + + Using font metrics height as icon size reference. + + TODO: + - handle changes of screen (different resolution) """ - refresh_triggered = QtCore.Signal() # on model refresh - refreshed = QtCore.Signal() - selection_changed = QtCore.Signal() # on view selection change - current_changed = QtCore.Signal() # on view current index change + def __init__(self, *args, **kwargs): + super(ImageButton, self).__init__(*args, **kwargs) + self.setObjectName("ImageButton") - def __init__(self, dbcon, multiselection=False, parent=None): - super(AssetWidget, self).__init__(parent=parent) + def _change_size(self): + font_height = self.fontMetrics().height() + self.setIconSize(QtCore.QSize(font_height, font_height)) - self.dbcon = dbcon + def showEvent(self, event): + super(ImageButton, self).showEvent(event) - # Tree View - model = AssetModel(dbcon=self.dbcon, parent=self) - proxy = RecursiveSortFilterProxyModel() - proxy.setSourceModel(model) - proxy.setFilterCaseSensitivity(QtCore.Qt.CaseInsensitive) + self._change_size() - view = AssetsView(self) - view.setModel(proxy) - if multiselection: - asset_delegate = AssetDelegate() - view.setSelectionMode(view.ExtendedSelection) - view.setItemDelegate(asset_delegate) - - icon = qtawesome.icon("fa.arrow-down", color=style.colors.light) - set_current_asset_btn = QtWidgets.QPushButton(icon, "") - set_current_asset_btn.setToolTip("Go to Asset from current Session") - # Hide by default - set_current_asset_btn.setVisible(False) - - icon = qtawesome.icon("fa.refresh", color=style.colors.light) - refresh = QtWidgets.QPushButton(icon, "", parent=self) - refresh.setToolTip("Refresh items") - - filter_input = QtWidgets.QLineEdit(self) - filter_input.setPlaceholderText("Filter assets..") - - # Header - header_layout = QtWidgets.QHBoxLayout() - header_layout.addWidget(filter_input) - header_layout.addWidget(set_current_asset_btn) - header_layout.addWidget(refresh) - - # Layout - layout = QtWidgets.QVBoxLayout(self) - layout.setContentsMargins(0, 0, 0, 0) - layout.setSpacing(4) - layout.addLayout(header_layout) - layout.addWidget(view) - - # Signals/Slots - filter_input.textChanged.connect(proxy.setFilterFixedString) - - selection = view.selectionModel() - selection.selectionChanged.connect(self.selection_changed) - selection.currentChanged.connect(self.current_changed) - refresh.clicked.connect(self.refresh) - set_current_asset_btn.clicked.connect(self.set_current_session_asset) - - self.set_current_asset_btn = set_current_asset_btn - self.model = model - self.proxy = proxy - self.view = view - - self.model_selection = {} - - def set_current_asset_btn_visibility(self, visible=None): - """Hide set current asset button. - - Not all tools support using of current context asset. - """ - if visible is None: - visible = not self.set_current_asset_btn.isVisible() - self.set_current_asset_btn.setVisible(visible) - - def _refresh_model(self): - # Store selection - self._store_model_selection() - time_start = time.time() - - self.set_loading_state( - loading=True, - empty=True - ) - - def on_refreshed(has_item): - self.set_loading_state(loading=False, empty=not has_item) - self._restore_model_selection() - self.model.refreshed.disconnect() - self.refreshed.emit() - print("Duration: %.3fs" % (time.time() - time_start)) - - # Connect to signal - self.model.refreshed.connect(on_refreshed) - # Trigger signal before refresh is called - self.refresh_triggered.emit() - # Refresh model - self.model.refresh() - - def refresh(self): - self._refresh_model() - - def get_active_asset(self): - """Return the asset item of the current selection.""" - current = self.view.currentIndex() - return current.data(self.model.ItemRole) - - def get_active_asset_document(self): - """Return the asset document of the current selection.""" - current = self.view.currentIndex() - return current.data(self.model.DocumentRole) - - def get_active_index(self): - return self.view.currentIndex() - - def get_selected_assets(self): - """Return the documents of selected assets.""" - selection = self.view.selectionModel() - rows = selection.selectedRows() - assets = [row.data(self.model.DocumentRole) for row in rows] - - # NOTE: skip None object assumed they are silo (backwards comp.) - return [asset for asset in assets if asset] - - def select_assets(self, assets, expand=True, key="name"): - """Select assets by item key. - - Args: - assets (list): List of asset values that can be found under - specified `key` - expand (bool): Whether to also expand to the asset in the view - key (string): Key that specifies where to look for `assets` values - - Returns: - None - - Default `key` is "name" in that case `assets` should contain single - asset name or list of asset names. (It is good idea to use "_id" key - instead of name in that case `assets` must contain `ObjectId` object/s) - It is expected that each value in `assets` will be found only once. - If the filters according to the `key` and `assets` correspond to - the more asset, only the first found will be selected. - - """ - - if not isinstance(assets, (tuple, list)): - assets = [assets] - - # convert to list - tuple cant be modified - assets = set(assets) - - # Clear selection - selection_model = self.view.selectionModel() - selection_model.clearSelection() - - # Select - mode = selection_model.Select | selection_model.Rows - for index in lib.iter_model_rows( - self.proxy, column=0, include_root=False - ): - # stop iteration if there are no assets to process - if not assets: - break - - value = index.data(self.model.ItemRole).get(key) - if value not in assets: - continue - - # Remove processed asset - assets.discard(value) - - selection_model.select(index, mode) - if expand: - # Expand parent index - self.view.expand(self.proxy.parent(index)) - - # Set the currently active index - self.view.setCurrentIndex(index) - - def set_loading_state(self, loading, empty): - if self.view.is_loading != loading: - if loading: - self.view.spinner.repaintNeeded.connect( - self.view.viewport().update - ) - else: - self.view.spinner.repaintNeeded.disconnect() - - self.view.is_loading = loading - self.view.is_empty = empty - - def _store_model_selection(self): - index = self.view.currentIndex() - current = None - if index and index.isValid(): - current = index.data(self.model.ObjectIdRole) - - expanded = set() - model = self.view.model() - for index in lib.iter_model_rows( - model, column=0, include_root=False - ): - if self.view.isExpanded(index): - value = index.data(self.model.ObjectIdRole) - expanded.add(value) - - selection_model = self.view.selectionModel() - - selected = None - selected_rows = selection_model.selectedRows() - if selected_rows: - selected = set( - row.data(self.model.ObjectIdRole) - for row in selected_rows - ) - - self.model_selection = { - "expanded": expanded, - "selected": selected, - "current": current - } - - def _restore_model_selection(self): - model = self.view.model() - not_set = object() - expanded = self.model_selection.pop("expanded", not_set) - selected = self.model_selection.pop("selected", not_set) - current = self.model_selection.pop("current", not_set) - - if ( - expanded is not_set - or selected is not_set - or current is not_set - ): - return - - if expanded: - for index in lib.iter_model_rows( - model, column=0, include_root=False - ): - is_expanded = index.data(self.model.ObjectIdRole) in expanded - self.view.setExpanded(index, is_expanded) - - if not selected and not current: - self.set_current_session_asset() - return - - current_index = None - selected_indexes = [] - # Go through all indices, select the ones with similar data - for index in lib.iter_model_rows( - model, column=0, include_root=False - ): - object_id = index.data(self.model.ObjectIdRole) - if object_id in selected: - selected_indexes.append(index) - - if not current_index and object_id == current: - current_index = index - - if current_index: - self.view.setCurrentIndex(current_index) - - if not selected_indexes: - return - selection_model = self.view.selectionModel() - flags = selection_model.Select | selection_model.Rows - for index in selected_indexes: - # Ensure item is visible - self.view.scrollTo(index) - selection_model.select(index, flags) - - def set_current_session_asset(self): - asset_name = self.dbcon.Session.get("AVALON_ASSET") - if asset_name: - self.select_assets([asset_name]) + def sizeHint(self): + return self.iconSize() class OptionalMenu(QtWidgets.QMenu): diff --git a/openpype/tools/workfiles/app.py b/openpype/tools/workfiles/app.py index 4135eeccc9..a4b1717a1c 100644 --- a/openpype/tools/workfiles/app.py +++ b/openpype/tools/workfiles/app.py @@ -12,22 +12,12 @@ from avalon import io, api, pipeline from openpype import style from openpype.tools.utils.lib import ( - schedule, qt_app_context + schedule, + qt_app_context ) -from openpype.tools.utils.widgets import AssetWidget +from openpype.tools.utils.assets_widget import SingleSelectAssetsWidget +from openpype.tools.utils.tasks_widget import TasksWidget from openpype.tools.utils.delegates import PrettyTimeDelegate - -from openpype.tools.utils.constants import ( - TASK_NAME_ROLE, - TASK_TYPE_ROLE -) -from openpype.tools.utils.models import ( - TasksModel, - TasksProxyModel -) -from .model import FilesModel -from .view import FilesView - from openpype.lib import ( Anatomy, get_workdir, @@ -37,6 +27,9 @@ from openpype.lib import ( get_workfile_template_key ) +from .model import FilesModel +from .view import FilesView + log = logging.getLogger(__name__) module = sys.modules[__name__] @@ -66,20 +59,39 @@ class NameWindow(QtWidgets.QDialog): # Set work file data for template formatting asset_name = session["AVALON_ASSET"] + task_name = session["AVALON_TASK"] project_doc = io.find_one( {"type": "project"}, { "name": True, - "data.code": True + "data.code": True, + "config.tasks": True, } ) + asset_doc = io.find_one( + { + "type": "asset", + "name": asset_name + }, + {"data.tasks": True} + ) + + task_type = asset_doc["data"]["tasks"].get(task_name, {}).get("type") + + project_task_types = project_doc["config"]["tasks"] + task_short = project_task_types.get(task_type, {}).get("short_name") + self.data = { "project": { "name": project_doc["name"], "code": project_doc["data"].get("code") }, "asset": asset_name, - "task": session["AVALON_TASK"], + "task": { + "name": task_name, + "type": task_type, + "short": task_short, + }, "version": 1, "user": getpass.getuser(), "comment": "", @@ -323,110 +335,6 @@ class NameWindow(QtWidgets.QDialog): ) -class TasksWidget(QtWidgets.QWidget): - """Widget showing active Tasks""" - - task_changed = QtCore.Signal() - - def __init__(self, dbcon=None, parent=None): - super(TasksWidget, self).__init__(parent) - - tasks_view = QtWidgets.QTreeView(self) - tasks_view.setIndentation(0) - tasks_view.setSortingEnabled(True) - if dbcon is None: - dbcon = io - - tasks_model = TasksModel(dbcon) - tasks_proxy = TasksProxyModel() - tasks_proxy.setSourceModel(tasks_model) - tasks_view.setModel(tasks_proxy) - - layout = QtWidgets.QVBoxLayout(self) - layout.setContentsMargins(0, 0, 0, 0) - layout.addWidget(tasks_view) - - selection_model = tasks_view.selectionModel() - selection_model.currentChanged.connect(self.task_changed) - - self._tasks_model = tasks_model - self._tasks_proxy = tasks_proxy - self._tasks_view = tasks_view - - self._last_selected_task = None - - def set_asset(self, asset_doc): - # Asset deselected - if asset_doc is None: - return - - # Try and preserve the last selected task and reselect it - # after switching assets. If there's no currently selected - # asset keep whatever the "last selected" was prior to it. - current = self.get_current_task_name() - if current: - self._last_selected_task = current - - self._tasks_model.set_asset(asset_doc) - self._tasks_proxy.sort(0, QtCore.Qt.AscendingOrder) - - if self._last_selected_task: - self.select_task(self._last_selected_task) - - # Force a task changed emit. - self.task_changed.emit() - - def select_task(self, task_name): - """Select a task by name. - - If the task does not exist in the current model then selection is only - cleared. - - Args: - task (str): Name of the task to select. - - """ - task_view_model = self._tasks_view.model() - if not task_view_model: - return - - # Clear selection - selection_model = self._tasks_view.selectionModel() - selection_model.clearSelection() - - # Select the task - mode = selection_model.Select | selection_model.Rows - for row in range(task_view_model.rowCount()): - index = task_view_model.index(row, 0) - name = index.data(TASK_NAME_ROLE) - if name == task_name: - selection_model.select(index, mode) - - # Set the currently active index - self._tasks_view.setCurrentIndex(index) - break - - def get_current_task_name(self): - """Return name of task at current index (selected) - - Returns: - str: Name of the current task. - - """ - index = self._tasks_view.currentIndex() - selection_model = self._tasks_view.selectionModel() - if index.isValid() and selection_model.isSelected(index): - return index.data(TASK_NAME_ROLE) - return None - - def get_current_task_type(self): - index = self._tasks_view.currentIndex() - selection_model = self._tasks_view.selectionModel() - if index.isValid() and selection_model.isSelected(index): - return index.data(TASK_TYPE_ROLE) - return None - - class FilesWidget(QtWidgets.QWidget): """A widget displaying files that allows to save and open files.""" file_selected = QtCore.Signal(str) @@ -437,7 +345,8 @@ class FilesWidget(QtWidgets.QWidget): super(FilesWidget, self).__init__(parent=parent) # Setup - self._asset = None + self._asset_id = None + self._asset_doc = None self._task_name = None self._task_type = None @@ -533,15 +442,17 @@ class FilesWidget(QtWidgets.QWidget): self.btn_browse = btn_browse self.btn_save = btn_save - def set_asset_task(self, asset, task_name, task_type): - self._asset = asset + def set_asset_task(self, asset_id, task_name, task_type): + if asset_id != self._asset_id: + self._asset_doc = None + self._asset_id = asset_id self._task_name = task_name self._task_type = task_type # Define a custom session so we can query the work root # for a "Work area" that is not our current Session. # This way we can browse it even before we enter it. - if self._asset and self._task_name and self._task_type: + if self._asset_id and self._task_name and self._task_type: session = self._get_session() self.root = self.host.work_root(session) self.files_model.set_root(self.root) @@ -557,6 +468,14 @@ class FilesWidget(QtWidgets.QWidget): # Manually trigger file selection self.on_file_select() + def _get_asset_doc(self): + if self._asset_id is None: + return None + + if self._asset_doc is None: + self._asset_doc = io.find_one({"_id": self._asset_id}) + return self._asset_doc + def _get_session(self): """Return a modified session for the current asset and task""" @@ -568,7 +487,7 @@ class FilesWidget(QtWidgets.QWidget): ) changes = pipeline.compute_session_changes( session, - asset=self._asset, + asset=self._get_asset_doc(), task=self._task_name, template_key=self.template_key ) @@ -582,7 +501,7 @@ class FilesWidget(QtWidgets.QWidget): session = api.Session.copy() changes = pipeline.compute_session_changes( session, - asset=self._asset, + asset=self._get_asset_doc(), task=self._task_name, template_key=self.template_key ) @@ -592,7 +511,7 @@ class FilesWidget(QtWidgets.QWidget): return api.update_current_task( - asset=self._asset, + asset=self._get_asset_doc(), task=self._task_name, template_key=self.template_key ) @@ -739,7 +658,9 @@ class FilesWidget(QtWidgets.QWidget): self._enter_session() # Make sure we are in the right session self.host.save_file(file_path) - self.set_asset_task(self._asset, self._task_name, self._task_type) + self.set_asset_task( + self._asset_id, self._task_name, self._task_type + ) pipeline.emit("after.workfile.save", [file_path]) @@ -765,7 +686,7 @@ class FilesWidget(QtWidgets.QWidget): session = api.Session.copy() changes = pipeline.compute_session_changes( session, - asset=self._asset, + asset=self._get_asset_doc(), task=self._task_name, template_key=self.template_key ) @@ -790,7 +711,7 @@ class FilesWidget(QtWidgets.QWidget): # Force a full to the asset as opposed to just self.refresh() so # that it will actually check again whether the Work directory exists - self.set_asset_task(self._asset, self._task_name, self._task_type) + self.set_asset_task(self._asset_id, self._task_name, self._task_type) def refresh(self): """Refresh listed files for current selection in the interface""" @@ -886,10 +807,10 @@ class SidePanelWidget(QtWidgets.QWidget): self.on_note_change() self.save_clicked.emit() - def set_context(self, asset_doc, task_name, filepath, workfile_doc): + def set_context(self, asset_id, task_name, filepath, workfile_doc): # Check if asset, task and file are selected # NOTE workfile document is not requirement - enabled = bool(asset_doc) and bool(task_name) and bool(filepath) + enabled = bool(asset_id) and bool(task_name) and bool(filepath) self.details_input.setEnabled(enabled) self.note_input.setEnabled(enabled) @@ -967,7 +888,7 @@ class Window(QtWidgets.QMainWindow): home_page_widget = QtWidgets.QWidget(pages_widget) home_body_widget = QtWidgets.QWidget(home_page_widget) - assets_widget = AssetWidget(io, parent=home_body_widget) + assets_widget = SingleSelectAssetsWidget(io, parent=home_body_widget) assets_widget.set_current_asset_btn_visibility(True) tasks_widget = TasksWidget(io, home_body_widget) @@ -995,14 +916,21 @@ class Window(QtWidgets.QMainWindow): # the files widget has a filter field which tasks does not. tasks_widget.setContentsMargins(0, 32, 0, 0) + # Set context after asset widget is refreshed + # - to do so it is necessary to wait until refresh is done + set_context_timer = QtCore.QTimer() + set_context_timer.setInterval(100) + # Connect signals - assets_widget.current_changed.connect(self.on_asset_changed) + set_context_timer.timeout.connect(self._on_context_set_timeout) + assets_widget.selection_changed.connect(self.on_asset_changed) tasks_widget.task_changed.connect(self.on_task_changed) files_widget.file_selected.connect(self.on_file_select) files_widget.workfile_created.connect(self.on_workfile_create) files_widget.file_opened.connect(self._on_file_opened) side_panel.save_clicked.connect(self.on_side_panel_save) + self._set_context_timer = set_context_timer self.home_page_widget = home_page_widget self.pages_widget = pages_widget self.home_body_widget = home_body_widget @@ -1019,11 +947,13 @@ class Window(QtWidgets.QMainWindow): self.resize(1200, 600) self._first_show = True + self._context_to_set = None def showEvent(self, event): super(Window, self).showEvent(event) if self._first_show: self._first_show = False + self.refresh() self.setStyleSheet(style.load_stylesheet()) def keyPressEvent(self, event): @@ -1047,21 +977,17 @@ class Window(QtWidgets.QMainWindow): schedule(self._on_asset_changed, 50, channel="mongo") def on_file_select(self, filepath): - asset_docs = self.assets_widget.get_selected_assets() - asset_doc = None - if asset_docs: - asset_doc = asset_docs[0] - - task_name = self.tasks_widget.get_current_task_name() + asset_id = self.assets_widget.get_selected_asset_id() + task_name = self.tasks_widget.get_selected_task_name() workfile_doc = None - if asset_doc and task_name and filepath: + if asset_id and task_name and filepath: filename = os.path.split(filepath)[1] workfile_doc = get_workfile_doc( - asset_doc["_id"], task_name, filename, io + asset_id, task_name, filename, io ) self.side_panel.set_context( - asset_doc, task_name, filepath, workfile_doc + asset_id, task_name, filepath, workfile_doc ) def on_workfile_create(self, filepath): @@ -1082,15 +1008,14 @@ class Window(QtWidgets.QMainWindow): def _get_current_workfile_doc(self, filepath=None): if filepath is None: filepath = self.files_widget._get_selected_filepath() - task_name = self.tasks_widget.get_current_task_name() - asset_docs = self.assets_widget.get_selected_assets() - if not task_name or not asset_docs or not filepath: + task_name = self.tasks_widget.get_selected_task_name() + asset_id = self.assets_widget.get_selected_asset_id() + if not task_name or not asset_id or not filepath: return - asset_doc = asset_docs[0] filename = os.path.split(filepath)[1] return get_workfile_doc( - asset_doc["_id"], task_name, filename, io + asset_id, task_name, filename, io ) def _create_workfile_doc(self, filepath, force=False): @@ -1100,63 +1025,68 @@ class Window(QtWidgets.QMainWindow): if not workfile_doc: workdir, filename = os.path.split(filepath) - asset_docs = self.assets_widget.get_selected_assets() - asset_doc = asset_docs[0] - task_name = self.tasks_widget.get_current_task_name() + asset_id = self.assets_widget.get_selected_asset_id() + asset_doc = io.find_one({"_id": asset_id}) + task_name = self.tasks_widget.get_selected_task_name() create_workfile_doc(asset_doc, task_name, filename, workdir, io) - def set_context(self, context): - if "asset" in context: - asset = context["asset"] - asset_document = io.find_one( - { - "name": asset, - "type": "asset" - }, - { - "data.tasks": 1 - } - ) - - # Select the asset - self.assets_widget.select_assets([asset], expand=True) - - self.tasks_widget.set_asset(asset_document) - - if "task" in context: - self.tasks_widget.select_task(context["task"]) - def refresh(self): # Refresh asset widget self.assets_widget.refresh() self._on_task_changed() - def _on_asset_changed(self): - asset = self.assets_widget.get_selected_assets() or None + def set_context(self, context): + self._context_to_set = context + self._set_context_timer.start() - if not asset: + def _on_context_set_timeout(self): + if self._context_to_set is None: + self._set_context_timer.stop() + return + + if self.assets_widget.refreshing: + return + + self._context_to_set, context = None, self._context_to_set + if "asset" in context: + asset_doc = io.find_one( + { + "name": context["asset"], + "type": "asset" + }, + {"_id": 1} + ) or {} + asset_id = asset_doc.get("_id") + # Select the asset + self.assets_widget.select_asset(asset_id) + self.tasks_widget.set_asset_id(asset_id) + + if "task" in context: + self.tasks_widget.select_task_name(context["task"]) + + def _on_asset_changed(self): + asset_id = self.assets_widget.get_selected_asset_id() + if asset_id: + self.tasks_widget.setEnabled(True) + else: # Force disable the other widgets if no # active selection self.tasks_widget.setEnabled(False) self.files_widget.setEnabled(False) - else: - asset = asset[0] - self.tasks_widget.setEnabled(True) - self.tasks_widget.set_asset(asset) + self.tasks_widget.set_asset_id(asset_id) def _on_task_changed(self): - asset = self.assets_widget.get_selected_assets() or None - if asset is not None: - asset = asset[0] - task_name = self.tasks_widget.get_current_task_name() - task_type = self.tasks_widget.get_current_task_type() + asset_id = self.assets_widget.get_selected_asset_id() + task_name = self.tasks_widget.get_selected_task_name() + task_type = self.tasks_widget.get_selected_task_type() - self.tasks_widget.setEnabled(bool(asset)) + asset_is_valid = asset_id is not None + self.tasks_widget.setEnabled(asset_is_valid) - self.files_widget.setEnabled(all([bool(task_name), bool(asset)])) - self.files_widget.set_asset_task(asset, task_name, task_type) + self.files_widget.setEnabled(bool(task_name) and asset_is_valid) + self.files_widget.set_asset_task(asset_id, task_name, task_type) self.files_widget.refresh() diff --git a/openpype/version.py b/openpype/version.py index 7f85931698..2e9592f57d 100644 --- a/openpype/version.py +++ b/openpype/version.py @@ -1,3 +1,3 @@ # -*- coding: utf-8 -*- """Package declaring Pype version.""" -__version__ = "3.6.0-nightly.5" +__version__ = "3.7.0-nightly.3" diff --git a/openpype/widgets/message_window.py b/openpype/widgets/message_window.py index 969d6ccdd1..94e51f5d4f 100644 --- a/openpype/widgets/message_window.py +++ b/openpype/widgets/message_window.py @@ -1,6 +1,6 @@ -from Qt import QtWidgets, QtCore import sys import logging +from Qt import QtWidgets, QtCore log = logging.getLogger(__name__) diff --git a/openpype/widgets/nice_checkbox.py b/openpype/widgets/nice_checkbox.py index d550f361ff..ccd079c0fb 100644 --- a/openpype/widgets/nice_checkbox.py +++ b/openpype/widgets/nice_checkbox.py @@ -1,11 +1,18 @@ from math import floor, sqrt, ceil from Qt import QtWidgets, QtCore, QtGui +from openpype.style import get_objected_colors + class NiceCheckbox(QtWidgets.QFrame): stateChanged = QtCore.Signal(int) clicked = QtCore.Signal() + _checked_bg_color = None + _unchecked_bg_color = None + _checker_color = None + _checker_hover_color = None + def __init__(self, checked=False, draw_icons=False, parent=None): super(NiceCheckbox, self).__init__(parent) @@ -41,12 +48,6 @@ class NiceCheckbox(QtWidgets.QFrame): self._pressed = False self._under_mouse = False - self.checked_bg_color = QtGui.QColor(67, 181, 129) - self.unchecked_bg_color = QtGui.QColor(79, 79, 79) - - self.checker_checked_color = QtGui.QColor(255, 255, 255) - self.checker_unchecked_color = self.checker_checked_color - self.icon_scale_factor = sqrt(2) / 2 icon_path_stroker = QtGui.QPainterPathStroker() @@ -58,6 +59,37 @@ class NiceCheckbox(QtWidgets.QFrame): self._animation_timer.timeout.connect(self._on_animation_timeout) self._base_size = QtCore.QSize(90, 50) + self._load_colors() + + @classmethod + def _load_colors(cls): + if cls._checked_bg_color is not None: + return + + colors_data = get_objected_colors() + colors_info = colors_data["nice-checkbox"] + + cls._checked_bg_color = colors_info["bg-checked"].get_qcolor() + cls._unchecked_bg_color = colors_info["bg-unchecked"].get_qcolor() + + cls._checker_color = colors_info["bg-checker"].get_qcolor() + cls._checker_hover_color = colors_info["bg-checker-hover"].get_qcolor() + + @property + def checked_bg_color(self): + return self._checked_bg_color + + @property + def unchecked_bg_color(self): + return self._unchecked_bg_color + + @property + def checker_color(self): + return self._checker_color + + @property + def checker_hover_color(self): + return self._checker_hover_color def setTristate(self, tristate=True): if self._is_tristate != tristate: @@ -73,15 +105,6 @@ class NiceCheckbox(QtWidgets.QFrame): self._draw_icons = draw_icons self.repaint() - def _checkbox_size_hint(self): - checkbox_height = self.style().pixelMetric( - QtWidgets.QStyle.PM_IndicatorHeight - ) - checkbox_height += checkbox_height % 2 - width = (2 * checkbox_height) - (checkbox_height / 5) - new_size = QtCore.QSize(width, checkbox_height) - return new_size - def sizeHint(self): height = self.fontMetrics().height() width = self.get_width_hint_by_height(height) @@ -159,7 +182,7 @@ class NiceCheckbox(QtWidgets.QFrame): if self._animation_timer.isActive(): self._animation_timer.stop() - if self.isEnabled(): + if self.isVisible() and self.isEnabled(): # Start animation self._animation_timer.start(self._animation_timeout) else: @@ -235,14 +258,16 @@ class NiceCheckbox(QtWidgets.QFrame): def _on_animation_timeout(self): if self._checkstate == QtCore.Qt.Checked: - self._current_step += 1 if self._current_step == self._steps: self._animation_timer.stop() + return + self._current_step += 1 elif self._checkstate == QtCore.Qt.Unchecked: - self._current_step -= 1 if self._current_step == 0: self._animation_timer.stop() + return + self._current_step -= 1 else: if self._current_step < self._middle_step: @@ -291,11 +316,9 @@ class NiceCheckbox(QtWidgets.QFrame): # Draw inner background if self._current_step == self._steps: bg_color = self.checked_bg_color - checker_color = self.checker_checked_color elif self._current_step == 0: bg_color = self.unchecked_bg_color - checker_color = self.checker_unchecked_color else: offset_ratio = self._current_step / self._steps @@ -305,11 +328,6 @@ class NiceCheckbox(QtWidgets.QFrame): self.unchecked_bg_color, offset_ratio ) - checker_color = self.steped_color( - self.checker_checked_color, - self.checker_unchecked_color, - offset_ratio - ) margins_ratio = self._checker_margins_divider if margins_ratio > 0: @@ -359,52 +377,14 @@ class NiceCheckbox(QtWidgets.QFrame): checker_rect = QtCore.QRect(pos_x, pos_y, checker_size, checker_size) under_mouse = self.isEnabled() and self._under_mouse - - shadow_x = checker_rect.x() - shadow_y = checker_rect.y() + margin_size_c - shadow_size = min( - frame_rect.right() - shadow_x, - frame_rect.bottom() - shadow_y, - checker_size + (2 * margin_size_c) - ) - shadow_rect = QtCore.QRect( - checker_rect.x(), - shadow_y, - shadow_size, - shadow_size - ) - - shadow_brush = QtGui.QRadialGradient( - shadow_rect.center(), - shadow_rect.height() / 2 - ) - shadow_brush.setColorAt(0.6, QtCore.Qt.black) - shadow_brush.setColorAt(1, QtCore.Qt.transparent) - - painter.setPen(QtCore.Qt.transparent) - painter.setBrush(shadow_brush) - painter.drawEllipse(shadow_rect) + if under_mouse: + checker_color = self.checker_hover_color + else: + checker_color = self.checker_color painter.setBrush(checker_color) painter.drawEllipse(checker_rect) - if under_mouse: - adjust = margin_size_c - if adjust < 1 and checker_rect.height() > 4: - adjust = 1 - - smaller_checker_rect = checker_rect.adjusted( - adjust, adjust, -adjust, -adjust - ) - gradient = QtGui.QLinearGradient( - smaller_checker_rect.bottomRight(), - smaller_checker_rect.topLeft() - ) - gradient.setColorAt(0, checker_color) - gradient.setColorAt(1, checker_color.darker(155)) - painter.setBrush(gradient) - painter.drawEllipse(smaller_checker_rect) - if self._draw_icons: painter.setBrush(bg_color) icon_path = self._get_icon_path(painter, checker_rect) diff --git a/openpype/widgets/popup.py b/openpype/widgets/popup.py index 7c0fa0f5c5..3c3f6283c4 100644 --- a/openpype/widgets/popup.py +++ b/openpype/widgets/popup.py @@ -3,7 +3,7 @@ import logging import contextlib -from avalon.vendor.Qt import QtCore, QtWidgets, QtGui +from Qt import QtCore, QtWidgets log = logging.getLogger(__name__) diff --git a/openpype/widgets/project_settings.py b/openpype/widgets/project_settings.py index c69d55fb39..43ff9f2789 100644 --- a/openpype/widgets/project_settings.py +++ b/openpype/widgets/project_settings.py @@ -1,10 +1,9 @@ - - -from avalon.vendor.Qt import QtCore, QtGui, QtWidgets import os import getpass import platform +from Qt import QtCore, QtGui, QtWidgets + from avalon import style import ftrack_api diff --git a/poetry.lock b/poetry.lock index 36105f4213..c07a20253c 100644 --- a/poetry.lock +++ b/poetry.lock @@ -80,7 +80,7 @@ python-dateutil = ">=2.7.0" [[package]] name = "astroid" -version = "2.5.6" +version = "2.8.4" description = "An abstract syntax tree for Python with inference support." category = "dev" optional = false @@ -89,7 +89,8 @@ python-versions = "~=3.6" [package.dependencies] lazy-object-proxy = ">=1.4.0" typed-ast = {version = ">=1.4.0,<1.5", markers = "implementation_name == \"cpython\" and python_version < \"3.8\""} -wrapt = ">=1.11,<1.13" +typing-extensions = {version = ">=3.10", markers = "python_version < \"3.10\""} +wrapt = ">=1.11,<1.14" [[package]] name = "async-timeout" @@ -162,20 +163,20 @@ typecheck = ["mypy"] [[package]] name = "blessed" -version = "1.18.0" +version = "1.19.0" description = "Easy, practical library for making terminal apps, by providing an elegant, well-documented interface to Colors, Keyboard input, and screen Positioning capabilities." category = "main" optional = false -python-versions = "*" +python-versions = ">=2.7" [package.dependencies] -jinxed = {version = ">=0.5.4", markers = "platform_system == \"Windows\""} +jinxed = {version = ">=1.1.0", markers = "platform_system == \"Windows\""} six = ">=1.9.0" wcwidth = ">=0.1.4" [[package]] name = "cachetools" -version = "4.2.2" +version = "4.2.4" description = "Extensible memoizing collections and decorators" category = "main" optional = false @@ -183,7 +184,7 @@ python-versions = "~=3.5" [[package]] name = "certifi" -version = "2021.5.30" +version = "2021.10.8" description = "Python package for providing Mozilla's CA Bundle." category = "main" optional = false @@ -191,7 +192,7 @@ python-versions = "*" [[package]] name = "cffi" -version = "1.14.5" +version = "1.15.0" description = "Foreign Function Interface for Python calling C code." category = "main" optional = false @@ -258,18 +259,21 @@ python-versions = "*" [[package]] name = "coverage" -version = "5.5" +version = "6.0.2" description = "Code coverage measurement for Python" category = "dev" optional = false -python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, <4" +python-versions = ">=3.6" + +[package.dependencies] +tomli = {version = "*", optional = true, markers = "extra == \"toml\""} [package.extras] -toml = ["toml"] +toml = ["tomli"] [[package]] name = "cryptography" -version = "3.4.7" +version = "35.0.0" description = "cryptography is a package which provides cryptographic recipes and primitives to Python developers." category = "main" optional = false @@ -282,9 +286,9 @@ cffi = ">=1.12" docs = ["sphinx (>=1.6.5,!=1.8.0,!=3.1.0,!=3.1.1)", "sphinx-rtd-theme"] docstest = ["doc8", "pyenchant (>=1.6.11)", "twine (>=1.12.0)", "sphinxcontrib-spelling (>=4.0.1)"] pep8test = ["black", "flake8", "flake8-import-order", "pep8-naming"] -sdist = ["setuptools-rust (>=0.11.4)"] +sdist = ["setuptools_rust (>=0.11.4)"] ssh = ["bcrypt (>=3.1.5)"] -test = ["pytest (>=6.0)", "pytest-cov", "pytest-subtests", "pytest-xdist", "pretend", "iso8601", "pytz", "hypothesis (>=1.11.4,!=3.79.2)"] +test = ["pytest (>=6.2.0)", "pytest-cov", "pytest-subtests", "pytest-xdist", "pretend", "iso8601", "pytz", "hypothesis (>=1.11.4,!=3.79.2)"] [[package]] name = "cx-freeze" @@ -328,7 +332,7 @@ trio = ["trio (>=0.14.0)", "sniffio (>=1.1)"] [[package]] name = "docutils" -version = "0.16" +version = "0.18" description = "Docutils -- Python Documentation Utilities" category = "dev" optional = false @@ -336,7 +340,7 @@ python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*" [[package]] name = "dropbox" -version = "11.20.0" +version = "11.22.0" description = "Official Dropbox API Client" category = "main" optional = false @@ -409,30 +413,30 @@ python-versions = ">=2.6, !=3.0.*, !=3.1.*, !=3.2.*" [[package]] name = "gitdb" -version = "4.0.7" +version = "4.0.9" description = "Git Object Database" category = "dev" optional = false -python-versions = ">=3.4" +python-versions = ">=3.6" [package.dependencies] -smmap = ">=3.0.1,<5" +smmap = ">=3.0.1,<6" [[package]] name = "gitpython" -version = "3.1.17" -description = "Python Git Library" +version = "3.1.24" +description = "GitPython is a python library used to interact with Git repositories" category = "dev" optional = false -python-versions = ">=3.5" +python-versions = ">=3.7" [package.dependencies] gitdb = ">=4.0.1,<5" -typing-extensions = {version = ">=3.7.4.0", markers = "python_version < \"3.8\""} +typing-extensions = {version = ">=3.7.4.3", markers = "python_version < \"3.10\""} [[package]] name = "google-api-core" -version = "1.30.0" +version = "1.31.3" description = "Google API client core library" category = "main" optional = false @@ -442,7 +446,7 @@ python-versions = ">=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*" google-auth = ">=1.25.0,<2.0dev" googleapis-common-protos = ">=1.6.0,<2.0dev" packaging = ">=14.3" -protobuf = ">=3.12.0" +protobuf = ">=3.12.0,<3.18.0" pytz = "*" requests = ">=2.18.0,<3.0.0dev" six = ">=1.13.0" @@ -470,7 +474,7 @@ uritemplate = ">=3.0.0,<4dev" [[package]] name = "google-auth" -version = "1.31.0" +version = "1.35.0" description = "Google Authentication Library" category = "main" optional = false @@ -516,11 +520,11 @@ grpc = ["grpcio (>=1.0.0)"] [[package]] name = "httplib2" -version = "0.19.1" +version = "0.20.1" description = "A comprehensive HTTP client library." category = "main" optional = false -python-versions = "*" +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" [package.dependencies] pyparsing = ">=2.4.2,<3" @@ -543,7 +547,7 @@ python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" [[package]] name = "importlib-metadata" -version = "4.5.0" +version = "4.8.1" description = "Read metadata from Python packages" category = "main" optional = false @@ -555,7 +559,8 @@ zipp = ">=0.5" [package.extras] docs = ["sphinx", "jaraco.packaging (>=8.2)", "rst.linker (>=1.9)"] -testing = ["pytest (>=4.6)", "pytest-checkdocs (>=2.4)", "pytest-flake8", "pytest-cov", "pytest-enabler (>=1.0.1)", "packaging", "pep517", "pyfakefs", "flufl.flake8", "pytest-black (>=0.3.7)", "pytest-mypy", "importlib-resources (>=1.3)"] +perf = ["ipython"] +testing = ["pytest (>=4.6)", "pytest-checkdocs (>=2.4)", "pytest-flake8", "pytest-cov", "pytest-enabler (>=1.0.1)", "packaging", "pep517", "pyfakefs", "flufl.flake8", "pytest-perf (>=0.9.2)", "pytest-black (>=0.3.7)", "pytest-mypy", "importlib-resources (>=1.3)"] [[package]] name = "iniconfig" @@ -567,16 +572,17 @@ python-versions = "*" [[package]] name = "isort" -version = "5.8.0" +version = "5.9.3" description = "A Python utility / library to sort Python imports." category = "dev" optional = false -python-versions = ">=3.6,<4.0" +python-versions = ">=3.6.1,<4.0" [package.extras] pipfile_deprecated_finder = ["pipreqs", "requirementslib"] requirements_deprecated_finder = ["pipreqs", "pip-api"] colors = ["colorama (>=0.4.3,<0.5.0)"] +plugins = ["setuptools"] [[package]] name = "jedi" @@ -594,14 +600,15 @@ testing = ["colorama", "docopt", "pytest (>=3.1.0)"] [[package]] name = "jeepney" -version = "0.6.0" +version = "0.7.1" description = "Low-level, pure Python DBus protocol wrapper." category = "main" optional = false python-versions = ">=3.6" [package.extras] -test = ["pytest", "pytest-trio", "pytest-asyncio", "testpath", "trio"] +test = ["pytest", "pytest-trio", "pytest-asyncio", "testpath", "trio", "async-timeout"] +trio = ["trio", "async-generator"] [[package]] name = "jinja2" @@ -701,7 +708,7 @@ python-versions = "*" [[package]] name = "multidict" -version = "5.1.0" +version = "5.2.0" description = "multidict implementation" category = "main" optional = false @@ -729,18 +736,18 @@ reference = "openpype" [[package]] name = "packaging" -version = "20.9" +version = "21.2" description = "Core utilities for Python packages" category = "main" optional = false -python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" +python-versions = ">=3.6" [package.dependencies] -pyparsing = ">=2.0.2" +pyparsing = ">=2.0.2,<3" [[package]] name = "paramiko" -version = "2.7.2" +version = "2.8.0" description = "SSH2 protocol library" category = "main" optional = false @@ -771,7 +778,7 @@ testing = ["docopt", "pytest (<6.0.0)"] [[package]] name = "pathlib2" -version = "2.3.5" +version = "2.3.6" description = "Object-oriented filesystem paths" category = "main" optional = false @@ -782,25 +789,38 @@ six = "*" [[package]] name = "pillow" -version = "8.3.2" +version = "8.4.0" description = "Python Imaging Library (Fork)" category = "main" optional = false python-versions = ">=3.6" +[[package]] +name = "platformdirs" +version = "2.4.0" +description = "A small Python module for determining appropriate platform-specific dirs, e.g. a \"user data dir\"." +category = "dev" +optional = false +python-versions = ">=3.6" + +[package.extras] +docs = ["Sphinx (>=4)", "furo (>=2021.7.5b38)", "proselint (>=0.10.2)", "sphinx-autodoc-typehints (>=1.12)"] +test = ["appdirs (==1.4.4)", "pytest (>=6)", "pytest-cov (>=2.7)", "pytest-mock (>=3.6)"] + [[package]] name = "pluggy" -version = "0.13.1" +version = "1.0.0" description = "plugin and hook calling mechanisms for python" category = "dev" optional = false -python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" +python-versions = ">=3.6" [package.dependencies] importlib-metadata = {version = ">=0.12", markers = "python_version < \"3.8\""} [package.extras] dev = ["pre-commit", "tox"] +testing = ["pytest", "pytest-benchmark"] [[package]] name = "ply" @@ -910,7 +930,7 @@ python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" [[package]] name = "pygments" -version = "2.9.0" +version = "2.10.0" description = "Pygments is a syntax highlighting package written in Python." category = "dev" optional = false @@ -918,22 +938,24 @@ python-versions = ">=3.5" [[package]] name = "pylint" -version = "2.8.3" +version = "2.11.1" description = "python code static checker" category = "dev" optional = false python-versions = "~=3.6" [package.dependencies] -astroid = "2.5.6" +astroid = ">=2.8.0,<2.9" colorama = {version = "*", markers = "sys_platform == \"win32\""} isort = ">=4.2.5,<6" mccabe = ">=0.6,<0.7" +platformdirs = ">=2.2.0" toml = ">=0.7.1" +typing-extensions = {version = ">=3.10.0", markers = "python_version < \"3.10\""} [[package]] name = "pymongo" -version = "3.11.4" +version = "3.12.1" description = "Python driver for MongoDB " category = "main" optional = false @@ -941,9 +963,9 @@ python-versions = "*" [package.extras] aws = ["pymongo-auth-aws (<2.0.0)"] -encryption = ["pymongocrypt (<2.0.0)"] +encryption = ["pymongocrypt (>=1.1.0,<2.0.0)"] gssapi = ["pykerberos"] -ocsp = ["pyopenssl (>=17.2.0)", "requests (<3.0.0)", "service-identity (>=18.1.0)"] +ocsp = ["pyopenssl (>=17.2.0)", "requests (<3.0.0)", "service-identity (>=18.1.0)", "certifi"] snappy = ["python-snappy"] srv = ["dnspython (>=1.16.0,<1.17.0)"] tls = ["ipaddress"] @@ -967,7 +989,7 @@ tests = ["pytest (>=3.2.1,!=3.3.0)", "hypothesis (>=3.27.0)"] [[package]] name = "pynput" -version = "1.7.3" +version = "1.7.4" description = "Monitor and control user input devices" category = "main" optional = false @@ -975,7 +997,8 @@ python-versions = "*" [package.dependencies] evdev = {version = ">=1.3", markers = "sys_platform in \"linux\""} -pyobjc-framework-Quartz = {version = ">=7.0", markers = "sys_platform == \"darwin\""} +pyobjc-framework-ApplicationServices = {version = ">=7.3", markers = "sys_platform == \"darwin\""} +pyobjc-framework-Quartz = {version = ">=7.3", markers = "sys_platform == \"darwin\""} python-xlib = {version = ">=0.17", markers = "sys_platform in \"linux\""} six = "*" @@ -987,6 +1010,19 @@ category = "main" optional = false python-versions = ">=3.6" +[[package]] +name = "pyobjc-framework-applicationservices" +version = "7.3" +description = "Wrappers for the framework ApplicationServices on macOS" +category = "main" +optional = false +python-versions = ">=3.6" + +[package.dependencies] +pyobjc-core = ">=7.3" +pyobjc-framework-Cocoa = ">=7.3" +pyobjc-framework-Quartz = ">=7.3" + [[package]] name = "pyobjc-framework-cocoa" version = "7.3" @@ -1020,11 +1056,11 @@ python-versions = ">=2.6, !=3.0.*, !=3.1.*, !=3.2.*" [[package]] name = "pyrsistent" -version = "0.17.3" +version = "0.18.0" description = "Persistent/Functional/Immutable data structures" category = "main" optional = false -python-versions = ">=3.5" +python-versions = ">=3.6" [[package]] name = "pysftp" @@ -1039,7 +1075,7 @@ paramiko = ">=1.17" [[package]] name = "pytest" -version = "6.2.4" +version = "6.2.5" description = "pytest: simple powerful testing with Python" category = "dev" optional = false @@ -1052,7 +1088,7 @@ colorama = {version = "*", markers = "sys_platform == \"win32\""} importlib-metadata = {version = ">=0.12", markers = "python_version < \"3.8\""} iniconfig = "*" packaging = "*" -pluggy = ">=0.12,<1.0.0a1" +pluggy = ">=0.12,<2.0" py = ">=1.8.2" toml = "*" @@ -1061,37 +1097,36 @@ testing = ["argcomplete", "hypothesis (>=3.56)", "mock", "nose", "requests", "xm [[package]] name = "pytest-cov" -version = "2.12.1" +version = "3.0.0" description = "Pytest plugin for measuring coverage." category = "dev" optional = false -python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*" +python-versions = ">=3.6" [package.dependencies] -coverage = ">=5.2.1" +coverage = {version = ">=5.2.1", extras = ["toml"]} pytest = ">=4.6" -toml = "*" [package.extras] testing = ["fields", "hunter", "process-tests", "six", "pytest-xdist", "virtualenv"] [[package]] name = "pytest-print" -version = "0.2.1" +version = "0.3.0" description = "pytest-print adds the printer fixture you can use to print messages to the user (directly to the pytest runner, not stdout)" category = "dev" optional = false -python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,>=2.7" +python-versions = ">=3.6" [package.dependencies] -pytest = ">=3.0.0" +pytest = ">=6" [package.extras] -test = ["coverage (>=5)", "pytest (>=4)"] +test = ["coverage (>=5)"] [[package]] name = "python-dateutil" -version = "2.8.1" +version = "2.8.2" description = "Extensions to the standard Python datetime module" category = "main" optional = false @@ -1102,7 +1137,7 @@ six = ">=1.5" [[package]] name = "python-xlib" -version = "0.30" +version = "0.31" description = "Python X Library" category = "main" optional = false @@ -1121,7 +1156,7 @@ python-versions = "*" [[package]] name = "pytz" -version = "2021.1" +version = "2021.3" description = "World timezone definitions, modern and historical" category = "main" optional = false @@ -1145,7 +1180,7 @@ python-versions = "*" [[package]] name = "qt.py" -version = "1.3.3" +version = "1.3.6" description = "Python 2 & 3 compatibility wrapper around all Qt bindings - PySide, PySide2, PyQt4 and PyQt5." category = "main" optional = false @@ -1223,23 +1258,23 @@ python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*" [[package]] name = "slack-sdk" -version = "3.6.0" +version = "3.11.2" description = "The Slack API Platform SDK for Python" category = "main" optional = false python-versions = ">=3.6.0" [package.extras] -optional = ["aiodns (>1.0)", "aiohttp (>=3.7.3,<4)", "boto3 (<=2)", "SQLAlchemy (>=1,<2)", "websockets (>=9.1,<10)", "websocket-client (>=0.57,<1)"] -testing = ["pytest (>=5.4,<6)", "pytest-asyncio (<1)", "Flask-Sockets (>=0.2,<1)", "pytest-cov (>=2,<3)", "codecov (>=2,<3)", "flake8 (>=3,<4)", "black (==21.5b1)", "psutil (>=5,<6)", "databases (>=0.3)"] +optional = ["aiodns (>1.0)", "aiohttp (>=3.7.3,<4)", "boto3 (<=2)", "SQLAlchemy (>=1,<2)", "websockets (>=9.1,<10)", "websocket-client (>=1,<2)"] +testing = ["pytest (>=5.4,<6)", "pytest-asyncio (<1)", "Flask-Sockets (>=0.2,<1)", "Flask (>=1,<2)", "Werkzeug (<2)", "pytest-cov (>=2,<3)", "codecov (>=2,<3)", "flake8 (>=3,<4)", "black (==21.9b0)", "psutil (>=5,<6)", "databases (>=0.3)", "boto3 (<=2)", "moto (<2)"] [[package]] name = "smmap" -version = "4.0.0" +version = "5.0.0" description = "A pure Python implementation of a sliding window memory map manager" category = "dev" optional = false -python-versions = ">=3.5" +python-versions = ">=3.6" [[package]] name = "snowballstemmer" @@ -1259,17 +1294,17 @@ python-versions = "*" [[package]] name = "sphinx" -version = "4.0.2" +version = "3.5.3" description = "Python documentation generator" category = "dev" optional = false -python-versions = ">=3.6" +python-versions = ">=3.5" [package.dependencies] alabaster = ">=0.7,<0.8" babel = ">=1.3" colorama = {version = ">=0.3.5", markers = "sys_platform == \"win32\""} -docutils = ">=0.14,<0.18" +docutils = ">=0.12" imagesize = "*" Jinja2 = ">=2.3" packaging = "*" @@ -1302,14 +1337,13 @@ sphinx = "*" [[package]] name = "sphinx-rtd-theme" -version = "0.5.2" +version = "0.5.1" description = "Read the Docs theme for Sphinx" category = "dev" optional = false python-versions = "*" [package.dependencies] -docutils = "<0.17" sphinx = "*" [package.extras] @@ -1429,6 +1463,14 @@ category = "dev" optional = false python-versions = ">=2.6, !=3.0.*, !=3.1.*, !=3.2.*" +[[package]] +name = "tomli" +version = "1.2.2" +description = "A lil' TOML parser" +category = "dev" +optional = false +python-versions = ">=3.6" + [[package]] name = "typed-ast" version = "1.4.3" @@ -1439,7 +1481,7 @@ python-versions = "*" [[package]] name = "typing-extensions" -version = "3.10.0.0" +version = "3.10.0.2" description = "Backported and Experimental Type Hints for Python 3.5+" category = "main" optional = false @@ -1455,7 +1497,7 @@ python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" [[package]] name = "urllib3" -version = "1.26.5" +version = "1.26.7" description = "HTTP library with thread-safe connection pooling, file post, and more." category = "main" optional = false @@ -1487,11 +1529,11 @@ six = "*" [[package]] name = "wrapt" -version = "1.12.1" +version = "1.13.2" description = "Module for decorators, wrappers and monkey patching." category = "dev" optional = false -python-versions = "*" +python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,>=2.7" [[package]] name = "wsrpc-aiohttp" @@ -1512,7 +1554,7 @@ ujson = ["ujson"] [[package]] name = "yarl" -version = "1.6.3" +version = "1.7.0" description = "Yet another URL library" category = "main" optional = false @@ -1525,7 +1567,7 @@ typing-extensions = {version = ">=3.7.4", markers = "python_version < \"3.8\""} [[package]] name = "zipp" -version = "3.4.1" +version = "3.6.0" description = "Backport of pathlib-compatible object wrapper for zip files" category = "main" optional = false @@ -1533,7 +1575,7 @@ python-versions = ">=3.6" [package.extras] docs = ["sphinx", "jaraco.packaging (>=8.2)", "rst.linker (>=1.9)"] -testing = ["pytest (>=4.6)", "pytest-checkdocs (>=1.2.3)", "pytest-flake8", "pytest-cov", "pytest-enabler", "jaraco.itertools", "func-timeout", "pytest-black (>=0.3.7)", "pytest-mypy"] +testing = ["pytest (>=4.6)", "pytest-checkdocs (>=2.4)", "pytest-flake8", "pytest-cov", "pytest-enabler (>=1.0.1)", "jaraco.itertools", "func-timeout", "pytest-black (>=0.3.7)", "pytest-mypy"] [metadata] lock-version = "1.1" @@ -1602,8 +1644,8 @@ arrow = [ {file = "arrow-0.17.0.tar.gz", hash = "sha256:ff08d10cda1d36c68657d6ad20d74fbea493d980f8b2d45344e00d6ed2bf6ed4"}, ] astroid = [ - {file = "astroid-2.5.6-py3-none-any.whl", hash = "sha256:4db03ab5fc3340cf619dbc25e42c2cc3755154ce6009469766d7143d1fc2ee4e"}, - {file = "astroid-2.5.6.tar.gz", hash = "sha256:8a398dfce302c13f14bab13e2b14fe385d32b73f4e4853b9bdfb64598baa1975"}, + {file = "astroid-2.8.4-py3-none-any.whl", hash = "sha256:0755c998e7117078dcb7d0bda621391dd2a85da48052d948c7411ab187325346"}, + {file = "astroid-2.8.4.tar.gz", hash = "sha256:1e83a69fd51b013ebf5912d26b9338d6643a55fec2f20c787792680610eed4a2"}, ] async-timeout = [ {file = "async-timeout-3.0.1.tar.gz", hash = "sha256:0c3c816a028d47f659d6ff5c745cb2acf1f966da1fe5c19c77a70282b25f4c5f"}, @@ -1635,67 +1677,68 @@ bcrypt = [ {file = "bcrypt-3.2.0.tar.gz", hash = "sha256:5b93c1726e50a93a033c36e5ca7fdcd29a5c7395af50a6892f5d9e7c6cfbfb29"}, ] blessed = [ - {file = "blessed-1.18.0-py2.py3-none-any.whl", hash = "sha256:5b5e2f0563d5a668c282f3f5946f7b1abb70c85829461900e607e74d7725106e"}, - {file = "blessed-1.18.0.tar.gz", hash = "sha256:1312879f971330a1b7f2c6341f2ae7e2cbac244bfc9d0ecfbbecd4b0293bc755"}, + {file = "blessed-1.19.0-py2.py3-none-any.whl", hash = "sha256:1f2d462631b2b6d2d4c3c65b54ef79ad87a6ca2dd55255df2f8d739fcc8a1ddb"}, + {file = "blessed-1.19.0.tar.gz", hash = "sha256:4db0f94e5761aea330b528e84a250027ffe996b5a94bf03e502600c9a5ad7a61"}, ] cachetools = [ - {file = "cachetools-4.2.2-py3-none-any.whl", hash = "sha256:2cc0b89715337ab6dbba85b5b50effe2b0c74e035d83ee8ed637cf52f12ae001"}, - {file = "cachetools-4.2.2.tar.gz", hash = "sha256:61b5ed1e22a0924aed1d23b478f37e8d52549ff8a961de2909c69bf950020cff"}, + {file = "cachetools-4.2.4-py3-none-any.whl", hash = "sha256:92971d3cb7d2a97efff7c7bb1657f21a8f5fb309a37530537c71b1774189f2d1"}, + {file = "cachetools-4.2.4.tar.gz", hash = "sha256:89ea6f1b638d5a73a4f9226be57ac5e4f399d22770b92355f92dcb0f7f001693"}, ] certifi = [ - {file = "certifi-2021.5.30-py2.py3-none-any.whl", hash = "sha256:50b1e4f8446b06f41be7dd6338db18e0990601dce795c2b1686458aa7e8fa7d8"}, - {file = "certifi-2021.5.30.tar.gz", hash = "sha256:2bbf76fd432960138b3ef6dda3dde0544f27cbf8546c458e60baf371917ba9ee"}, + {file = "certifi-2021.10.8-py2.py3-none-any.whl", hash = "sha256:d62a0163eb4c2344ac042ab2bdf75399a71a2d8c7d47eac2e2ee91b9d6339569"}, + {file = "certifi-2021.10.8.tar.gz", hash = "sha256:78884e7c1d4b00ce3cea67b44566851c4343c120abd683433ce934a68ea58872"}, ] cffi = [ - {file = "cffi-1.14.5-cp27-cp27m-macosx_10_9_x86_64.whl", hash = "sha256:bb89f306e5da99f4d922728ddcd6f7fcebb3241fc40edebcb7284d7514741991"}, - {file = "cffi-1.14.5-cp27-cp27m-manylinux1_i686.whl", hash = "sha256:34eff4b97f3d982fb93e2831e6750127d1355a923ebaeeb565407b3d2f8d41a1"}, - {file = "cffi-1.14.5-cp27-cp27m-manylinux1_x86_64.whl", hash = "sha256:99cd03ae7988a93dd00bcd9d0b75e1f6c426063d6f03d2f90b89e29b25b82dfa"}, - {file = "cffi-1.14.5-cp27-cp27m-win32.whl", hash = "sha256:65fa59693c62cf06e45ddbb822165394a288edce9e276647f0046e1ec26920f3"}, - {file = "cffi-1.14.5-cp27-cp27m-win_amd64.whl", hash = "sha256:51182f8927c5af975fece87b1b369f722c570fe169f9880764b1ee3bca8347b5"}, - {file = "cffi-1.14.5-cp27-cp27mu-manylinux1_i686.whl", hash = "sha256:43e0b9d9e2c9e5d152946b9c5fe062c151614b262fda2e7b201204de0b99e482"}, - {file = "cffi-1.14.5-cp27-cp27mu-manylinux1_x86_64.whl", hash = "sha256:cbde590d4faaa07c72bf979734738f328d239913ba3e043b1e98fe9a39f8b2b6"}, - {file = "cffi-1.14.5-cp35-cp35m-macosx_10_9_x86_64.whl", hash = "sha256:5de7970188bb46b7bf9858eb6890aad302577a5f6f75091fd7cdd3ef13ef3045"}, - {file = "cffi-1.14.5-cp35-cp35m-manylinux1_i686.whl", hash = "sha256:a465da611f6fa124963b91bf432d960a555563efe4ed1cc403ba5077b15370aa"}, - {file = "cffi-1.14.5-cp35-cp35m-manylinux1_x86_64.whl", hash = "sha256:d42b11d692e11b6634f7613ad8df5d6d5f8875f5d48939520d351007b3c13406"}, - {file = "cffi-1.14.5-cp35-cp35m-win32.whl", hash = "sha256:72d8d3ef52c208ee1c7b2e341f7d71c6fd3157138abf1a95166e6165dd5d4369"}, - {file = "cffi-1.14.5-cp35-cp35m-win_amd64.whl", hash = "sha256:29314480e958fd8aab22e4a58b355b629c59bf5f2ac2492b61e3dc06d8c7a315"}, - {file = "cffi-1.14.5-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:3d3dd4c9e559eb172ecf00a2a7517e97d1e96de2a5e610bd9b68cea3925b4892"}, - {file = "cffi-1.14.5-cp36-cp36m-manylinux1_i686.whl", hash = "sha256:48e1c69bbacfc3d932221851b39d49e81567a4d4aac3b21258d9c24578280058"}, - {file = "cffi-1.14.5-cp36-cp36m-manylinux1_x86_64.whl", hash = "sha256:69e395c24fc60aad6bb4fa7e583698ea6cc684648e1ffb7fe85e3c1ca131a7d5"}, - {file = "cffi-1.14.5-cp36-cp36m-manylinux2014_aarch64.whl", hash = "sha256:9e93e79c2551ff263400e1e4be085a1210e12073a31c2011dbbda14bda0c6132"}, - {file = "cffi-1.14.5-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:24ec4ff2c5c0c8f9c6b87d5bb53555bf267e1e6f70e52e5a9740d32861d36b6f"}, - {file = "cffi-1.14.5-cp36-cp36m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3c3f39fa737542161d8b0d680df2ec249334cd70a8f420f71c9304bd83c3cbed"}, - {file = "cffi-1.14.5-cp36-cp36m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:681d07b0d1e3c462dd15585ef5e33cb021321588bebd910124ef4f4fb71aef55"}, - {file = "cffi-1.14.5-cp36-cp36m-win32.whl", hash = "sha256:58e3f59d583d413809d60779492342801d6e82fefb89c86a38e040c16883be53"}, - {file = "cffi-1.14.5-cp36-cp36m-win_amd64.whl", hash = "sha256:005a36f41773e148deac64b08f233873a4d0c18b053d37da83f6af4d9087b813"}, - {file = "cffi-1.14.5-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:2894f2df484ff56d717bead0a5c2abb6b9d2bf26d6960c4604d5c48bbc30ee73"}, - {file = "cffi-1.14.5-cp37-cp37m-manylinux1_i686.whl", hash = "sha256:0857f0ae312d855239a55c81ef453ee8fd24136eaba8e87a2eceba644c0d4c06"}, - {file = "cffi-1.14.5-cp37-cp37m-manylinux1_x86_64.whl", hash = "sha256:cd2868886d547469123fadc46eac7ea5253ea7fcb139f12e1dfc2bbd406427d1"}, - {file = "cffi-1.14.5-cp37-cp37m-manylinux2014_aarch64.whl", hash = "sha256:35f27e6eb43380fa080dccf676dece30bef72e4a67617ffda586641cd4508d49"}, - {file = "cffi-1.14.5-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:06d7cd1abac2ffd92e65c0609661866709b4b2d82dd15f611e602b9b188b0b69"}, - {file = "cffi-1.14.5-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:0f861a89e0043afec2a51fd177a567005847973be86f709bbb044d7f42fc4e05"}, - {file = "cffi-1.14.5-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:cc5a8e069b9ebfa22e26d0e6b97d6f9781302fe7f4f2b8776c3e1daea35f1adc"}, - {file = "cffi-1.14.5-cp37-cp37m-win32.whl", hash = "sha256:9ff227395193126d82e60319a673a037d5de84633f11279e336f9c0f189ecc62"}, - {file = "cffi-1.14.5-cp37-cp37m-win_amd64.whl", hash = "sha256:9cf8022fb8d07a97c178b02327b284521c7708d7c71a9c9c355c178ac4bbd3d4"}, - {file = "cffi-1.14.5-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:8b198cec6c72df5289c05b05b8b0969819783f9418e0409865dac47288d2a053"}, - {file = "cffi-1.14.5-cp38-cp38-manylinux1_i686.whl", hash = "sha256:ad17025d226ee5beec591b52800c11680fca3df50b8b29fe51d882576e039ee0"}, - {file = "cffi-1.14.5-cp38-cp38-manylinux1_x86_64.whl", hash = "sha256:6c97d7350133666fbb5cf4abdc1178c812cb205dc6f41d174a7b0f18fb93337e"}, - {file = "cffi-1.14.5-cp38-cp38-manylinux2014_aarch64.whl", hash = "sha256:8ae6299f6c68de06f136f1f9e69458eae58f1dacf10af5c17353eae03aa0d827"}, - {file = "cffi-1.14.5-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:04c468b622ed31d408fea2346bec5bbffba2cc44226302a0de1ade9f5ea3d373"}, - {file = "cffi-1.14.5-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:06db6321b7a68b2bd6df96d08a5adadc1fa0e8f419226e25b2a5fbf6ccc7350f"}, - {file = "cffi-1.14.5-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:293e7ea41280cb28c6fcaaa0b1aa1f533b8ce060b9e701d78511e1e6c4a1de76"}, - {file = "cffi-1.14.5-cp38-cp38-win32.whl", hash = "sha256:b85eb46a81787c50650f2392b9b4ef23e1f126313b9e0e9013b35c15e4288e2e"}, - {file = "cffi-1.14.5-cp38-cp38-win_amd64.whl", hash = "sha256:1f436816fc868b098b0d63b8920de7d208c90a67212546d02f84fe78a9c26396"}, - {file = "cffi-1.14.5-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:1071534bbbf8cbb31b498d5d9db0f274f2f7a865adca4ae429e147ba40f73dea"}, - {file = "cffi-1.14.5-cp39-cp39-manylinux1_i686.whl", hash = "sha256:9de2e279153a443c656f2defd67769e6d1e4163952b3c622dcea5b08a6405322"}, - {file = "cffi-1.14.5-cp39-cp39-manylinux1_x86_64.whl", hash = "sha256:6e4714cc64f474e4d6e37cfff31a814b509a35cb17de4fb1999907575684479c"}, - {file = "cffi-1.14.5-cp39-cp39-manylinux2014_aarch64.whl", hash = "sha256:158d0d15119b4b7ff6b926536763dc0714313aa59e320ddf787502c70c4d4bee"}, - {file = "cffi-1.14.5-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1bf1ac1984eaa7675ca8d5745a8cb87ef7abecb5592178406e55858d411eadc0"}, - {file = "cffi-1.14.5-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:df5052c5d867c1ea0b311fb7c3cd28b19df469c056f7fdcfe88c7473aa63e333"}, - {file = "cffi-1.14.5-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:24a570cd11895b60829e941f2613a4f79df1a27344cbbb82164ef2e0116f09c7"}, - {file = "cffi-1.14.5-cp39-cp39-win32.whl", hash = "sha256:afb29c1ba2e5a3736f1c301d9d0abe3ec8b86957d04ddfa9d7a6a42b9367e396"}, - {file = "cffi-1.14.5-cp39-cp39-win_amd64.whl", hash = "sha256:f2d45f97ab6bb54753eab54fffe75aaf3de4ff2341c9daee1987ee1837636f1d"}, - {file = "cffi-1.14.5.tar.gz", hash = "sha256:fd78e5fee591709f32ef6edb9a015b4aa1a5022598e36227500c8f4e02328d9c"}, + {file = "cffi-1.15.0-cp27-cp27m-macosx_10_9_x86_64.whl", hash = "sha256:c2502a1a03b6312837279c8c1bd3ebedf6c12c4228ddbad40912d671ccc8a962"}, + {file = "cffi-1.15.0-cp27-cp27m-manylinux1_i686.whl", hash = "sha256:23cfe892bd5dd8941608f93348c0737e369e51c100d03718f108bf1add7bd6d0"}, + {file = "cffi-1.15.0-cp27-cp27m-manylinux1_x86_64.whl", hash = "sha256:41d45de54cd277a7878919867c0f08b0cf817605e4eb94093e7516505d3c8d14"}, + {file = "cffi-1.15.0-cp27-cp27m-win32.whl", hash = "sha256:4a306fa632e8f0928956a41fa8e1d6243c71e7eb59ffbd165fc0b41e316b2474"}, + {file = "cffi-1.15.0-cp27-cp27m-win_amd64.whl", hash = "sha256:e7022a66d9b55e93e1a845d8c9eba2a1bebd4966cd8bfc25d9cd07d515b33fa6"}, + {file = "cffi-1.15.0-cp27-cp27mu-manylinux1_i686.whl", hash = "sha256:14cd121ea63ecdae71efa69c15c5543a4b5fbcd0bbe2aad864baca0063cecf27"}, + {file = "cffi-1.15.0-cp27-cp27mu-manylinux1_x86_64.whl", hash = "sha256:d4d692a89c5cf08a8557fdeb329b82e7bf609aadfaed6c0d79f5a449a3c7c023"}, + {file = "cffi-1.15.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:0104fb5ae2391d46a4cb082abdd5c69ea4eab79d8d44eaaf79f1b1fd806ee4c2"}, + {file = "cffi-1.15.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:91ec59c33514b7c7559a6acda53bbfe1b283949c34fe7440bcf917f96ac0723e"}, + {file = "cffi-1.15.0-cp310-cp310-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:f5c7150ad32ba43a07c4479f40241756145a1f03b43480e058cfd862bf5041c7"}, + {file = "cffi-1.15.0-cp310-cp310-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:00c878c90cb53ccfaae6b8bc18ad05d2036553e6d9d1d9dbcf323bbe83854ca3"}, + {file = "cffi-1.15.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:abb9a20a72ac4e0fdb50dae135ba5e77880518e742077ced47eb1499e29a443c"}, + {file = "cffi-1.15.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a5263e363c27b653a90078143adb3d076c1a748ec9ecc78ea2fb916f9b861962"}, + {file = "cffi-1.15.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f54a64f8b0c8ff0b64d18aa76675262e1700f3995182267998c31ae974fbc382"}, + {file = "cffi-1.15.0-cp310-cp310-win32.whl", hash = "sha256:c21c9e3896c23007803a875460fb786118f0cdd4434359577ea25eb556e34c55"}, + {file = "cffi-1.15.0-cp310-cp310-win_amd64.whl", hash = "sha256:5e069f72d497312b24fcc02073d70cb989045d1c91cbd53979366077959933e0"}, + {file = "cffi-1.15.0-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:64d4ec9f448dfe041705426000cc13e34e6e5bb13736e9fd62e34a0b0c41566e"}, + {file = "cffi-1.15.0-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2756c88cbb94231c7a147402476be2c4df2f6078099a6f4a480d239a8817ae39"}, + {file = "cffi-1.15.0-cp36-cp36m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3b96a311ac60a3f6be21d2572e46ce67f09abcf4d09344c49274eb9e0bf345fc"}, + {file = "cffi-1.15.0-cp36-cp36m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:75e4024375654472cc27e91cbe9eaa08567f7fbdf822638be2814ce059f58032"}, + {file = "cffi-1.15.0-cp36-cp36m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:59888172256cac5629e60e72e86598027aca6bf01fa2465bdb676d37636573e8"}, + {file = "cffi-1.15.0-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:27c219baf94952ae9d50ec19651a687b826792055353d07648a5695413e0c605"}, + {file = "cffi-1.15.0-cp36-cp36m-win32.whl", hash = "sha256:4958391dbd6249d7ad855b9ca88fae690783a6be9e86df65865058ed81fc860e"}, + {file = "cffi-1.15.0-cp36-cp36m-win_amd64.whl", hash = "sha256:f6f824dc3bce0edab5f427efcfb1d63ee75b6fcb7282900ccaf925be84efb0fc"}, + {file = "cffi-1.15.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:06c48159c1abed75c2e721b1715c379fa3200c7784271b3c46df01383b593636"}, + {file = "cffi-1.15.0-cp37-cp37m-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:c2051981a968d7de9dd2d7b87bcb9c939c74a34626a6e2f8181455dd49ed69e4"}, + {file = "cffi-1.15.0-cp37-cp37m-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:fd8a250edc26254fe5b33be00402e6d287f562b6a5b2152dec302fa15bb3e997"}, + {file = "cffi-1.15.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:91d77d2a782be4274da750752bb1650a97bfd8f291022b379bb8e01c66b4e96b"}, + {file = "cffi-1.15.0-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:45db3a33139e9c8f7c09234b5784a5e33d31fd6907800b316decad50af323ff2"}, + {file = "cffi-1.15.0-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:263cc3d821c4ab2213cbe8cd8b355a7f72a8324577dc865ef98487c1aeee2bc7"}, + {file = "cffi-1.15.0-cp37-cp37m-win32.whl", hash = "sha256:17771976e82e9f94976180f76468546834d22a7cc404b17c22df2a2c81db0c66"}, + {file = "cffi-1.15.0-cp37-cp37m-win_amd64.whl", hash = "sha256:3415c89f9204ee60cd09b235810be700e993e343a408693e80ce7f6a40108029"}, + {file = "cffi-1.15.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:4238e6dab5d6a8ba812de994bbb0a79bddbdf80994e4ce802b6f6f3142fcc880"}, + {file = "cffi-1.15.0-cp38-cp38-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:0808014eb713677ec1292301ea4c81ad277b6cdf2fdd90fd540af98c0b101d20"}, + {file = "cffi-1.15.0-cp38-cp38-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:57e9ac9ccc3101fac9d6014fba037473e4358ef4e89f8e181f8951a2c0162024"}, + {file = "cffi-1.15.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8b6c2ea03845c9f501ed1313e78de148cd3f6cad741a75d43a29b43da27f2e1e"}, + {file = "cffi-1.15.0-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:10dffb601ccfb65262a27233ac273d552ddc4d8ae1bf93b21c94b8511bffe728"}, + {file = "cffi-1.15.0-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:786902fb9ba7433aae840e0ed609f45c7bcd4e225ebb9c753aa39725bb3e6ad6"}, + {file = "cffi-1.15.0-cp38-cp38-win32.whl", hash = "sha256:da5db4e883f1ce37f55c667e5c0de439df76ac4cb55964655906306918e7363c"}, + {file = "cffi-1.15.0-cp38-cp38-win_amd64.whl", hash = "sha256:181dee03b1170ff1969489acf1c26533710231c58f95534e3edac87fff06c443"}, + {file = "cffi-1.15.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:45e8636704eacc432a206ac7345a5d3d2c62d95a507ec70d62f23cd91770482a"}, + {file = "cffi-1.15.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:31fb708d9d7c3f49a60f04cf5b119aeefe5644daba1cd2a0fe389b674fd1de37"}, + {file = "cffi-1.15.0-cp39-cp39-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:6dc2737a3674b3e344847c8686cf29e500584ccad76204efea14f451d4cc669a"}, + {file = "cffi-1.15.0-cp39-cp39-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:74fdfdbfdc48d3f47148976f49fab3251e550a8720bebc99bf1483f5bfb5db3e"}, + {file = "cffi-1.15.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ffaa5c925128e29efbde7301d8ecaf35c8c60ffbcd6a1ffd3a552177c8e5e796"}, + {file = "cffi-1.15.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3f7d084648d77af029acb79a0ff49a0ad7e9d09057a9bf46596dac9514dc07df"}, + {file = "cffi-1.15.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:ef1f279350da2c586a69d32fc8733092fd32cc8ac95139a00377841f59a3f8d8"}, + {file = "cffi-1.15.0-cp39-cp39-win32.whl", hash = "sha256:2a23af14f408d53d5e6cd4e3d9a24ff9e05906ad574822a10563efcef137979a"}, + {file = "cffi-1.15.0-cp39-cp39-win_amd64.whl", hash = "sha256:3773c4d81e6e818df2efbc7dd77325ca0dcb688116050fb2b3011218eda36139"}, + {file = "cffi-1.15.0.tar.gz", hash = "sha256:920f0d66a896c2d99f0adbb391f990a84091179542c205fa53ce5787aff87954"}, ] chardet = [ {file = "chardet-4.0.0-py2.py3-none-any.whl", hash = "sha256:f864054d66fd9118f2e67044ac8981a54775ec5b67aed0441892edb553d21da5"}, @@ -1722,74 +1765,61 @@ coolname = [ {file = "coolname-1.1.0.tar.gz", hash = "sha256:410fe6ea9999bf96f2856ef0c726d5f38782bbefb7bb1aca0e91e0dc98ed09e3"}, ] coverage = [ - {file = "coverage-5.5-cp27-cp27m-macosx_10_9_x86_64.whl", hash = "sha256:b6d534e4b2ab35c9f93f46229363e17f63c53ad01330df9f2d6bd1187e5eaacf"}, - {file = "coverage-5.5-cp27-cp27m-manylinux1_i686.whl", hash = "sha256:b7895207b4c843c76a25ab8c1e866261bcfe27bfaa20c192de5190121770672b"}, - {file = "coverage-5.5-cp27-cp27m-manylinux1_x86_64.whl", hash = "sha256:c2723d347ab06e7ddad1a58b2a821218239249a9e4365eaff6649d31180c1669"}, - {file = "coverage-5.5-cp27-cp27m-manylinux2010_i686.whl", hash = "sha256:900fbf7759501bc7807fd6638c947d7a831fc9fdf742dc10f02956ff7220fa90"}, - {file = "coverage-5.5-cp27-cp27m-manylinux2010_x86_64.whl", hash = "sha256:004d1880bed2d97151facef49f08e255a20ceb6f9432df75f4eef018fdd5a78c"}, - {file = "coverage-5.5-cp27-cp27m-win32.whl", hash = "sha256:06191eb60f8d8a5bc046f3799f8a07a2d7aefb9504b0209aff0b47298333302a"}, - {file = "coverage-5.5-cp27-cp27m-win_amd64.whl", hash = "sha256:7501140f755b725495941b43347ba8a2777407fc7f250d4f5a7d2a1050ba8e82"}, - {file = "coverage-5.5-cp27-cp27mu-manylinux1_i686.whl", hash = "sha256:372da284cfd642d8e08ef606917846fa2ee350f64994bebfbd3afb0040436905"}, - {file = "coverage-5.5-cp27-cp27mu-manylinux1_x86_64.whl", hash = "sha256:8963a499849a1fc54b35b1c9f162f4108017b2e6db2c46c1bed93a72262ed083"}, - {file = "coverage-5.5-cp27-cp27mu-manylinux2010_i686.whl", hash = "sha256:869a64f53488f40fa5b5b9dcb9e9b2962a66a87dab37790f3fcfb5144b996ef5"}, - {file = "coverage-5.5-cp27-cp27mu-manylinux2010_x86_64.whl", hash = "sha256:4a7697d8cb0f27399b0e393c0b90f0f1e40c82023ea4d45d22bce7032a5d7b81"}, - {file = "coverage-5.5-cp310-cp310-macosx_10_14_x86_64.whl", hash = "sha256:8d0a0725ad7c1a0bcd8d1b437e191107d457e2ec1084b9f190630a4fb1af78e6"}, - {file = "coverage-5.5-cp310-cp310-manylinux1_x86_64.whl", hash = "sha256:51cb9476a3987c8967ebab3f0fe144819781fca264f57f89760037a2ea191cb0"}, - {file = "coverage-5.5-cp310-cp310-win_amd64.whl", hash = "sha256:c0891a6a97b09c1f3e073a890514d5012eb256845c451bd48f7968ef939bf4ae"}, - {file = "coverage-5.5-cp35-cp35m-macosx_10_9_x86_64.whl", hash = "sha256:3487286bc29a5aa4b93a072e9592f22254291ce96a9fbc5251f566b6b7343cdb"}, - {file = "coverage-5.5-cp35-cp35m-manylinux1_i686.whl", hash = "sha256:deee1077aae10d8fa88cb02c845cfba9b62c55e1183f52f6ae6a2df6a2187160"}, - {file = "coverage-5.5-cp35-cp35m-manylinux1_x86_64.whl", hash = "sha256:f11642dddbb0253cc8853254301b51390ba0081750a8ac03f20ea8103f0c56b6"}, - {file = "coverage-5.5-cp35-cp35m-manylinux2010_i686.whl", hash = "sha256:6c90e11318f0d3c436a42409f2749ee1a115cd8b067d7f14c148f1ce5574d701"}, - {file = "coverage-5.5-cp35-cp35m-manylinux2010_x86_64.whl", hash = "sha256:30c77c1dc9f253283e34c27935fded5015f7d1abe83bc7821680ac444eaf7793"}, - {file = "coverage-5.5-cp35-cp35m-win32.whl", hash = "sha256:9a1ef3b66e38ef8618ce5fdc7bea3d9f45f3624e2a66295eea5e57966c85909e"}, - {file = "coverage-5.5-cp35-cp35m-win_amd64.whl", hash = "sha256:972c85d205b51e30e59525694670de6a8a89691186012535f9d7dbaa230e42c3"}, - {file = "coverage-5.5-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:af0e781009aaf59e25c5a678122391cb0f345ac0ec272c7961dc5455e1c40066"}, - {file = "coverage-5.5-cp36-cp36m-manylinux1_i686.whl", hash = "sha256:74d881fc777ebb11c63736622b60cb9e4aee5cace591ce274fb69e582a12a61a"}, - {file = "coverage-5.5-cp36-cp36m-manylinux1_x86_64.whl", hash = "sha256:92b017ce34b68a7d67bd6d117e6d443a9bf63a2ecf8567bb3d8c6c7bc5014465"}, - {file = "coverage-5.5-cp36-cp36m-manylinux2010_i686.whl", hash = "sha256:d636598c8305e1f90b439dbf4f66437de4a5e3c31fdf47ad29542478c8508bbb"}, - {file = "coverage-5.5-cp36-cp36m-manylinux2010_x86_64.whl", hash = "sha256:41179b8a845742d1eb60449bdb2992196e211341818565abded11cfa90efb821"}, - {file = "coverage-5.5-cp36-cp36m-win32.whl", hash = "sha256:040af6c32813fa3eae5305d53f18875bedd079960822ef8ec067a66dd8afcd45"}, - {file = "coverage-5.5-cp36-cp36m-win_amd64.whl", hash = "sha256:5fec2d43a2cc6965edc0bb9e83e1e4b557f76f843a77a2496cbe719583ce8184"}, - {file = "coverage-5.5-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:18ba8bbede96a2c3dde7b868de9dcbd55670690af0988713f0603f037848418a"}, - {file = "coverage-5.5-cp37-cp37m-manylinux1_i686.whl", hash = "sha256:2910f4d36a6a9b4214bb7038d537f015346f413a975d57ca6b43bf23d6563b53"}, - {file = "coverage-5.5-cp37-cp37m-manylinux1_x86_64.whl", hash = "sha256:f0b278ce10936db1a37e6954e15a3730bea96a0997c26d7fee88e6c396c2086d"}, - {file = "coverage-5.5-cp37-cp37m-manylinux2010_i686.whl", hash = "sha256:796c9c3c79747146ebd278dbe1e5c5c05dd6b10cc3bcb8389dfdf844f3ead638"}, - {file = "coverage-5.5-cp37-cp37m-manylinux2010_x86_64.whl", hash = "sha256:53194af30d5bad77fcba80e23a1441c71abfb3e01192034f8246e0d8f99528f3"}, - {file = "coverage-5.5-cp37-cp37m-win32.whl", hash = "sha256:184a47bbe0aa6400ed2d41d8e9ed868b8205046518c52464fde713ea06e3a74a"}, - {file = "coverage-5.5-cp37-cp37m-win_amd64.whl", hash = "sha256:2949cad1c5208b8298d5686d5a85b66aae46d73eec2c3e08c817dd3513e5848a"}, - {file = "coverage-5.5-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:217658ec7187497e3f3ebd901afdca1af062b42cfe3e0dafea4cced3983739f6"}, - {file = "coverage-5.5-cp38-cp38-manylinux1_i686.whl", hash = "sha256:1aa846f56c3d49205c952d8318e76ccc2ae23303351d9270ab220004c580cfe2"}, - {file = "coverage-5.5-cp38-cp38-manylinux1_x86_64.whl", hash = "sha256:24d4a7de75446be83244eabbff746d66b9240ae020ced65d060815fac3423759"}, - {file = "coverage-5.5-cp38-cp38-manylinux2010_i686.whl", hash = "sha256:d1f8bf7b90ba55699b3a5e44930e93ff0189aa27186e96071fac7dd0d06a1873"}, - {file = "coverage-5.5-cp38-cp38-manylinux2010_x86_64.whl", hash = "sha256:970284a88b99673ccb2e4e334cfb38a10aab7cd44f7457564d11898a74b62d0a"}, - {file = "coverage-5.5-cp38-cp38-win32.whl", hash = "sha256:01d84219b5cdbfc8122223b39a954820929497a1cb1422824bb86b07b74594b6"}, - {file = "coverage-5.5-cp38-cp38-win_amd64.whl", hash = "sha256:2e0d881ad471768bf6e6c2bf905d183543f10098e3b3640fc029509530091502"}, - {file = "coverage-5.5-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:d1f9ce122f83b2305592c11d64f181b87153fc2c2bbd3bb4a3dde8303cfb1a6b"}, - {file = "coverage-5.5-cp39-cp39-manylinux1_i686.whl", hash = "sha256:13c4ee887eca0f4c5a247b75398d4114c37882658300e153113dafb1d76de529"}, - {file = "coverage-5.5-cp39-cp39-manylinux1_x86_64.whl", hash = "sha256:52596d3d0e8bdf3af43db3e9ba8dcdaac724ba7b5ca3f6358529d56f7a166f8b"}, - {file = "coverage-5.5-cp39-cp39-manylinux2010_i686.whl", hash = "sha256:2cafbbb3af0733db200c9b5f798d18953b1a304d3f86a938367de1567f4b5bff"}, - {file = "coverage-5.5-cp39-cp39-manylinux2010_x86_64.whl", hash = "sha256:44d654437b8ddd9eee7d1eaee28b7219bec228520ff809af170488fd2fed3e2b"}, - {file = "coverage-5.5-cp39-cp39-win32.whl", hash = "sha256:d314ed732c25d29775e84a960c3c60808b682c08d86602ec2c3008e1202e3bb6"}, - {file = "coverage-5.5-cp39-cp39-win_amd64.whl", hash = "sha256:13034c4409db851670bc9acd836243aeee299949bd5673e11844befcb0149f03"}, - {file = "coverage-5.5-pp36-none-any.whl", hash = "sha256:f030f8873312a16414c0d8e1a1ddff2d3235655a2174e3648b4fa66b3f2f1079"}, - {file = "coverage-5.5-pp37-none-any.whl", hash = "sha256:2a3859cb82dcbda1cfd3e6f71c27081d18aa251d20a17d87d26d4cd216fb0af4"}, - {file = "coverage-5.5.tar.gz", hash = "sha256:ebe78fe9a0e874362175b02371bdfbee64d8edc42a044253ddf4ee7d3c15212c"}, + {file = "coverage-6.0.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:1549e1d08ce38259de2bc3e9a0d5f3642ff4a8f500ffc1b2df73fd621a6cdfc0"}, + {file = "coverage-6.0.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bcae10fccb27ca2a5f456bf64d84110a5a74144be3136a5e598f9d9fb48c0caa"}, + {file = "coverage-6.0.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:53a294dc53cfb39c74758edaa6305193fb4258a30b1f6af24b360a6c8bd0ffa7"}, + {file = "coverage-6.0.2-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:8251b37be1f2cd9c0e5ccd9ae0380909c24d2a5ed2162a41fcdbafaf59a85ebd"}, + {file = "coverage-6.0.2-cp310-cp310-win32.whl", hash = "sha256:db42baa892cba723326284490283a68d4de516bfb5aaba369b4e3b2787a778b7"}, + {file = "coverage-6.0.2-cp310-cp310-win_amd64.whl", hash = "sha256:bbffde2a68398682623d9dd8c0ca3f46fda074709b26fcf08ae7a4c431a6ab2d"}, + {file = "coverage-6.0.2-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:60e51a3dd55540bec686d7fff61b05048ca31e804c1f32cbb44533e6372d9cc3"}, + {file = "coverage-6.0.2-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6a6a9409223a27d5ef3cca57dd7cd4dfcb64aadf2fad5c3b787830ac9223e01a"}, + {file = "coverage-6.0.2-cp36-cp36m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:4b34ae4f51bbfa5f96b758b55a163d502be3dcb24f505d0227858c2b3f94f5b9"}, + {file = "coverage-6.0.2-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:3bbda1b550e70fa6ac40533d3f23acd4f4e9cb4e6e77251ce77fdf41b3309fb2"}, + {file = "coverage-6.0.2-cp36-cp36m-win32.whl", hash = "sha256:4e28d2a195c533b58fc94a12826f4431726d8eb029ac21d874345f943530c122"}, + {file = "coverage-6.0.2-cp36-cp36m-win_amd64.whl", hash = "sha256:a82d79586a0a4f5fd1cf153e647464ced402938fbccb3ffc358c7babd4da1dd9"}, + {file = "coverage-6.0.2-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:3be1206dc09fb6298de3fce70593e27436862331a85daee36270b6d0e1c251c4"}, + {file = "coverage-6.0.2-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c9cd3828bbe1a40070c11fe16a51df733fd2f0cb0d745fb83b7b5c1f05967df7"}, + {file = "coverage-6.0.2-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:d036dc1ed8e1388e995833c62325df3f996675779541f682677efc6af71e96cc"}, + {file = "coverage-6.0.2-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:04560539c19ec26995ecfb3d9307ff154fbb9a172cb57e3b3cfc4ced673103d1"}, + {file = "coverage-6.0.2-cp37-cp37m-win32.whl", hash = "sha256:e4fb7ced4d9dec77d6cf533acfbf8e1415fe799430366affb18d69ee8a3c6330"}, + {file = "coverage-6.0.2-cp37-cp37m-win_amd64.whl", hash = "sha256:77b1da5767ed2f44611bc9bc019bc93c03fa495728ec389759b6e9e5039ac6b1"}, + {file = "coverage-6.0.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:61b598cbdbaae22d9e34e3f675997194342f866bb1d781da5d0be54783dce1ff"}, + {file = "coverage-6.0.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:36e9040a43d2017f2787b28d365a4bb33fcd792c7ff46a047a04094dc0e2a30d"}, + {file = "coverage-6.0.2-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:9f1627e162e3864a596486774876415a7410021f4b67fd2d9efdf93ade681afc"}, + {file = "coverage-6.0.2-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:e7a0b42db2a47ecb488cde14e0f6c7679a2c5a9f44814393b162ff6397fcdfbb"}, + {file = "coverage-6.0.2-cp38-cp38-win32.whl", hash = "sha256:a1b73c7c4d2a42b9d37dd43199c5711d91424ff3c6c22681bc132db4a4afec6f"}, + {file = "coverage-6.0.2-cp38-cp38-win_amd64.whl", hash = "sha256:1db67c497688fd4ba85b373b37cc52c50d437fd7267520ecd77bddbd89ea22c9"}, + {file = "coverage-6.0.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:f2f184bf38e74f152eed7f87e345b51f3ab0b703842f447c22efe35e59942c24"}, + {file = "coverage-6.0.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cd1cf1deb3d5544bd942356364a2fdc8959bad2b6cf6eb17f47d301ea34ae822"}, + {file = "coverage-6.0.2-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:ad9b8c1206ae41d46ec7380b78ba735ebb77758a650643e841dd3894966c31d0"}, + {file = "coverage-6.0.2-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:381d773d896cc7f8ba4ff3b92dee4ed740fb88dfe33b6e42efc5e8ab6dfa1cfe"}, + {file = "coverage-6.0.2-cp39-cp39-win32.whl", hash = "sha256:424c44f65e8be58b54e2b0bd1515e434b940679624b1b72726147cfc6a9fc7ce"}, + {file = "coverage-6.0.2-cp39-cp39-win_amd64.whl", hash = "sha256:abbff240f77347d17306d3201e14431519bf64495648ca5a49571f988f88dee9"}, + {file = "coverage-6.0.2-pp36-none-any.whl", hash = "sha256:7092eab374346121805fb637572483270324407bf150c30a3b161fc0c4ca5164"}, + {file = "coverage-6.0.2-pp37-none-any.whl", hash = "sha256:30922626ce6f7a5a30bdba984ad21021529d3d05a68b4f71ea3b16bda35b8895"}, + {file = "coverage-6.0.2.tar.gz", hash = "sha256:6807947a09510dc31fa86f43595bf3a14017cd60bf633cc746d52141bfa6b149"}, ] cryptography = [ - {file = "cryptography-3.4.7-cp36-abi3-macosx_10_10_x86_64.whl", hash = "sha256:3d8427734c781ea5f1b41d6589c293089704d4759e34597dce91014ac125aad1"}, - {file = "cryptography-3.4.7-cp36-abi3-macosx_11_0_arm64.whl", hash = "sha256:8e56e16617872b0957d1c9742a3f94b43533447fd78321514abbe7db216aa250"}, - {file = "cryptography-3.4.7-cp36-abi3-manylinux2010_x86_64.whl", hash = "sha256:37340614f8a5d2fb9aeea67fd159bfe4f5f4ed535b1090ce8ec428b2f15a11f2"}, - {file = "cryptography-3.4.7-cp36-abi3-manylinux2014_aarch64.whl", hash = "sha256:240f5c21aef0b73f40bb9f78d2caff73186700bf1bc6b94285699aff98cc16c6"}, - {file = "cryptography-3.4.7-cp36-abi3-manylinux2014_x86_64.whl", hash = "sha256:1e056c28420c072c5e3cb36e2b23ee55e260cb04eee08f702e0edfec3fb51959"}, - {file = "cryptography-3.4.7-cp36-abi3-win32.whl", hash = "sha256:0f1212a66329c80d68aeeb39b8a16d54ef57071bf22ff4e521657b27372e327d"}, - {file = "cryptography-3.4.7-cp36-abi3-win_amd64.whl", hash = "sha256:de4e5f7f68220d92b7637fc99847475b59154b7a1b3868fb7385337af54ac9ca"}, - {file = "cryptography-3.4.7-pp36-pypy36_pp73-manylinux2010_x86_64.whl", hash = "sha256:26965837447f9c82f1855e0bc8bc4fb910240b6e0d16a664bb722df3b5b06873"}, - {file = "cryptography-3.4.7-pp36-pypy36_pp73-manylinux2014_x86_64.whl", hash = "sha256:eb8cc2afe8b05acbd84a43905832ec78e7b3873fb124ca190f574dca7389a87d"}, - {file = "cryptography-3.4.7-pp37-pypy37_pp73-macosx_10_10_x86_64.whl", hash = "sha256:b01fd6f2737816cb1e08ed4807ae194404790eac7ad030b34f2ce72b332f5586"}, - {file = "cryptography-3.4.7-pp37-pypy37_pp73-manylinux2010_x86_64.whl", hash = "sha256:7ec5d3b029f5fa2b179325908b9cd93db28ab7b85bb6c1db56b10e0b54235177"}, - {file = "cryptography-3.4.7-pp37-pypy37_pp73-manylinux2014_x86_64.whl", hash = "sha256:ee77aa129f481be46f8d92a1a7db57269a2f23052d5f2433b4621bb457081cc9"}, - {file = "cryptography-3.4.7-pp37-pypy37_pp73-win_amd64.whl", hash = "sha256:bf40af59ca2465b24e54f671b2de2c59257ddc4f7e5706dbd6930e26823668d3"}, - {file = "cryptography-3.4.7.tar.gz", hash = "sha256:3d10de8116d25649631977cb37da6cbdd2d6fa0e0281d014a5b7d337255ca713"}, + {file = "cryptography-35.0.0-cp36-abi3-macosx_10_10_x86_64.whl", hash = "sha256:d57e0cdc1b44b6cdf8af1d01807db06886f10177469312fbde8f44ccbb284bc9"}, + {file = "cryptography-35.0.0-cp36-abi3-macosx_11_0_arm64.whl", hash = "sha256:ced40344e811d6abba00295ced98c01aecf0c2de39481792d87af4fa58b7b4d6"}, + {file = "cryptography-35.0.0-cp36-abi3-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:54b2605e5475944e2213258e0ab8696f4f357a31371e538ef21e8d61c843c28d"}, + {file = "cryptography-35.0.0-cp36-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_24_aarch64.whl", hash = "sha256:7b7ceeff114c31f285528ba8b390d3e9cfa2da17b56f11d366769a807f17cbaa"}, + {file = "cryptography-35.0.0-cp36-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2d69645f535f4b2c722cfb07a8eab916265545b3475fdb34e0be2f4ee8b0b15e"}, + {file = "cryptography-35.0.0-cp36-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4a2d0e0acc20ede0f06ef7aa58546eee96d2592c00f450c9acb89c5879b61992"}, + {file = "cryptography-35.0.0-cp36-abi3-manylinux_2_24_x86_64.whl", hash = "sha256:07bb7fbfb5de0980590ddfc7f13081520def06dc9ed214000ad4372fb4e3c7f6"}, + {file = "cryptography-35.0.0-cp36-abi3-musllinux_1_1_aarch64.whl", hash = "sha256:7eba2cebca600a7806b893cb1d541a6e910afa87e97acf2021a22b32da1df52d"}, + {file = "cryptography-35.0.0-cp36-abi3-musllinux_1_1_x86_64.whl", hash = "sha256:18d90f4711bf63e2fb21e8c8e51ed8189438e6b35a6d996201ebd98a26abbbe6"}, + {file = "cryptography-35.0.0-cp36-abi3-win32.whl", hash = "sha256:c10c797ac89c746e488d2ee92bd4abd593615694ee17b2500578b63cad6b93a8"}, + {file = "cryptography-35.0.0-cp36-abi3-win_amd64.whl", hash = "sha256:7075b304cd567694dc692ffc9747f3e9cb393cc4aa4fb7b9f3abd6f5c4e43588"}, + {file = "cryptography-35.0.0-pp36-pypy36_pp73-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:a688ebcd08250eab5bb5bca318cc05a8c66de5e4171a65ca51db6bd753ff8953"}, + {file = "cryptography-35.0.0-pp36-pypy36_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d99915d6ab265c22873f1b4d6ea5ef462ef797b4140be4c9d8b179915e0985c6"}, + {file = "cryptography-35.0.0-pp36-pypy36_pp73-manylinux_2_24_x86_64.whl", hash = "sha256:928185a6d1ccdb816e883f56ebe92e975a262d31cc536429041921f8cb5a62fd"}, + {file = "cryptography-35.0.0-pp37-pypy37_pp73-macosx_10_10_x86_64.whl", hash = "sha256:ebeddd119f526bcf323a89f853afb12e225902a24d29b55fe18dd6fcb2838a76"}, + {file = "cryptography-35.0.0-pp37-pypy37_pp73-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:22a38e96118a4ce3b97509443feace1d1011d0571fae81fc3ad35f25ba3ea999"}, + {file = "cryptography-35.0.0-pp37-pypy37_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:eb80e8a1f91e4b7ef8b33041591e6d89b2b8e122d787e87eeb2b08da71bb16ad"}, + {file = "cryptography-35.0.0-pp37-pypy37_pp73-manylinux_2_24_x86_64.whl", hash = "sha256:abb5a361d2585bb95012a19ed9b2c8f412c5d723a9836418fab7aaa0243e67d2"}, + {file = "cryptography-35.0.0-pp37-pypy37_pp73-win_amd64.whl", hash = "sha256:1ed82abf16df40a60942a8c211251ae72858b25b7421ce2497c2eb7a1cee817c"}, + {file = "cryptography-35.0.0.tar.gz", hash = "sha256:9933f28f70d0517686bd7de36166dda42094eac49415459d9bdf5e7df3e0086d"}, ] cx-freeze = [] cx-logging = [ @@ -1812,13 +1842,13 @@ dnspython = [ {file = "dnspython-2.1.0.zip", hash = "sha256:e4a87f0b573201a0f3727fa18a516b055fd1107e0e5477cded4a2de497df1dd4"}, ] docutils = [ - {file = "docutils-0.16-py2.py3-none-any.whl", hash = "sha256:0c5b78adfbf7762415433f5515cd5c9e762339e23369dbe8000d84a4bf4ab3af"}, - {file = "docutils-0.16.tar.gz", hash = "sha256:c2de3a60e9e7d07be26b7f2b00ca0309c207e06c100f9cc2a94931fc75a478fc"}, + {file = "docutils-0.18-py2.py3-none-any.whl", hash = "sha256:a31688b2ea858517fa54293e5d5df06fbb875fb1f7e4c64529271b77781ca8fc"}, + {file = "docutils-0.18.tar.gz", hash = "sha256:c1d5dab2b11d16397406a282e53953fe495a46d69ae329f55aa98a5c4e3c5fbb"}, ] dropbox = [ - {file = "dropbox-11.20.0-py2-none-any.whl", hash = "sha256:0926aab25445fe78b0284e0b86f4126ec4e5e2bf6cd2ac8562002008a21073b8"}, - {file = "dropbox-11.20.0-py3-none-any.whl", hash = "sha256:f2106aa566f9e3c175879c226c60b7089a39099b228061acbb7258670f6b859c"}, - {file = "dropbox-11.20.0.tar.gz", hash = "sha256:1aa351ec8bbb11cf3560e731b81d25f39c7edcb5fa92c06c5d68866cb9f90d54"}, + {file = "dropbox-11.22.0-py2-none-any.whl", hash = "sha256:f2efc924529be2e2e2a1d6f49246b25966c201b23dda231dfb148a6f5ae1a149"}, + {file = "dropbox-11.22.0-py3-none-any.whl", hash = "sha256:0a9cc253391cae7fccf1954da75edf8459d6567ba764e21b471019f0fa001ab4"}, + {file = "dropbox-11.22.0.tar.gz", hash = "sha256:ab84c9c78606faa0dc94cdb95c6b2bdb579beb5f34fff42091c98a1e0fbeb16c"}, ] enlighten = [ {file = "enlighten-1.10.1-py2.py3-none-any.whl", hash = "sha256:3d6c3eec8cf3eb626ee7b65eddc1b3e904d01f4547a2b9fe7f1da8892a0297e8"}, @@ -1839,24 +1869,24 @@ future = [ {file = "future-0.18.2.tar.gz", hash = "sha256:b1bead90b70cf6ec3f0710ae53a525360fa360d306a86583adc6bf83a4db537d"}, ] gitdb = [ - {file = "gitdb-4.0.7-py3-none-any.whl", hash = "sha256:6c4cc71933456991da20917998acbe6cf4fb41eeaab7d6d67fbc05ecd4c865b0"}, - {file = "gitdb-4.0.7.tar.gz", hash = "sha256:96bf5c08b157a666fec41129e6d327235284cca4c81e92109260f353ba138005"}, + {file = "gitdb-4.0.9-py3-none-any.whl", hash = "sha256:8033ad4e853066ba6ca92050b9df2f89301b8fc8bf7e9324d412a63f8bf1a8fd"}, + {file = "gitdb-4.0.9.tar.gz", hash = "sha256:bac2fd45c0a1c9cf619e63a90d62bdc63892ef92387424b855792a6cabe789aa"}, ] gitpython = [ - {file = "GitPython-3.1.17-py3-none-any.whl", hash = "sha256:29fe82050709760081f588dd50ce83504feddbebdc4da6956d02351552b1c135"}, - {file = "GitPython-3.1.17.tar.gz", hash = "sha256:ee24bdc93dce357630764db659edaf6b8d664d4ff5447ccfeedd2dc5c253f41e"}, + {file = "GitPython-3.1.24-py3-none-any.whl", hash = "sha256:dc0a7f2f697657acc8d7f89033e8b1ea94dd90356b2983bca89dc8d2ab3cc647"}, + {file = "GitPython-3.1.24.tar.gz", hash = "sha256:df83fdf5e684fef7c6ee2c02fc68a5ceb7e7e759d08b694088d0cacb4eba59e5"}, ] google-api-core = [ - {file = "google-api-core-1.30.0.tar.gz", hash = "sha256:0724d354d394b3d763bc10dfee05807813c5210f0bd9b8e2ddf6b6925603411c"}, - {file = "google_api_core-1.30.0-py2.py3-none-any.whl", hash = "sha256:92cd9e9f366e84bfcf2524e34d2dc244906c645e731962617ba620da1620a1e0"}, + {file = "google-api-core-1.31.3.tar.gz", hash = "sha256:4b7ad965865aef22afa4aded3318b8fa09b20bcc7e8dbb639a3753cf60af08ea"}, + {file = "google_api_core-1.31.3-py2.py3-none-any.whl", hash = "sha256:f52c708ab9fd958862dea9ac94d9db1a065608073fe583c3b9c18537b177f59a"}, ] google-api-python-client = [ {file = "google-api-python-client-1.12.8.tar.gz", hash = "sha256:f3b9684442eec2cfe9f9bb48e796ef919456b82142c7528c5fd527e5224f08bb"}, {file = "google_api_python_client-1.12.8-py2.py3-none-any.whl", hash = "sha256:3c4c4ca46b5c21196bec7ee93453443e477d82cbfa79234d1ce0645f81170eaf"}, ] google-auth = [ - {file = "google-auth-1.31.0.tar.gz", hash = "sha256:154f7889c5d679a6f626f36adb12afbd4dbb0a9a04ec575d989d6ba79c4fd65e"}, - {file = "google_auth-1.31.0-py2.py3-none-any.whl", hash = "sha256:6d47c79b5d09fbc7e8355fd9594cc4cf65fdde5d401c63951eaac4baa1ba2ae1"}, + {file = "google-auth-1.35.0.tar.gz", hash = "sha256:b7033be9028c188ee30200b204ea00ed82ea1162e8ac1df4aa6ded19a191d88e"}, + {file = "google_auth-1.35.0-py2.py3-none-any.whl", hash = "sha256:997516b42ecb5b63e8d80f5632c1a61dddf41d2a4c2748057837e06e00014258"}, ] google-auth-httplib2 = [ {file = "google-auth-httplib2-0.1.0.tar.gz", hash = "sha256:a07c39fd632becacd3f07718dfd6021bf396978f03ad3ce4321d060015cc30ac"}, @@ -1867,8 +1897,8 @@ googleapis-common-protos = [ {file = "googleapis_common_protos-1.53.0-py2.py3-none-any.whl", hash = "sha256:f6d561ab8fb16b30020b940e2dd01cd80082f4762fa9f3ee670f4419b4b8dbd0"}, ] httplib2 = [ - {file = "httplib2-0.19.1-py3-none-any.whl", hash = "sha256:2ad195faf9faf079723f6714926e9a9061f694d07724b846658ce08d40f522b4"}, - {file = "httplib2-0.19.1.tar.gz", hash = "sha256:0b12617eeca7433d4c396a100eaecfa4b08ee99aa881e6df6e257a7aad5d533d"}, + {file = "httplib2-0.20.1-py3-none-any.whl", hash = "sha256:8fa4dbf2fbf839b71f8c7837a831e00fcdc860feca99b8bda58ceae4bc53d185"}, + {file = "httplib2-0.20.1.tar.gz", hash = "sha256:0efbcb8bfbfbc11578130d87d8afcc65c2274c6eb446e59fc674e4d7c972d327"}, ] idna = [ {file = "idna-2.10-py2.py3-none-any.whl", hash = "sha256:b97d804b1e9b523befed77c48dacec60e6dcb0b5391d57af6a65a312a90648c0"}, @@ -1879,24 +1909,24 @@ imagesize = [ {file = "imagesize-1.2.0.tar.gz", hash = "sha256:b1f6b5a4eab1f73479a50fb79fcf729514a900c341d8503d62a62dbc4127a2b1"}, ] importlib-metadata = [ - {file = "importlib_metadata-4.5.0-py3-none-any.whl", hash = "sha256:833b26fb89d5de469b24a390e9df088d4e52e4ba33b01dc5e0e4f41b81a16c00"}, - {file = "importlib_metadata-4.5.0.tar.gz", hash = "sha256:b142cc1dd1342f31ff04bb7d022492b09920cb64fed867cd3ea6f80fe3ebd139"}, + {file = "importlib_metadata-4.8.1-py3-none-any.whl", hash = "sha256:b618b6d2d5ffa2f16add5697cf57a46c76a56229b0ed1c438322e4e95645bd15"}, + {file = "importlib_metadata-4.8.1.tar.gz", hash = "sha256:f284b3e11256ad1e5d03ab86bb2ccd6f5339688ff17a4d797a0fe7df326f23b1"}, ] iniconfig = [ {file = "iniconfig-1.1.1-py2.py3-none-any.whl", hash = "sha256:011e24c64b7f47f6ebd835bb12a743f2fbe9a26d4cecaa7f53bc4f35ee9da8b3"}, {file = "iniconfig-1.1.1.tar.gz", hash = "sha256:bc3af051d7d14b2ee5ef9969666def0cd1a000e121eaea580d4a313df4b37f32"}, ] isort = [ - {file = "isort-5.8.0-py3-none-any.whl", hash = "sha256:2bb1680aad211e3c9944dbce1d4ba09a989f04e238296c87fe2139faa26d655d"}, - {file = "isort-5.8.0.tar.gz", hash = "sha256:0a943902919f65c5684ac4e0154b1ad4fac6dcaa5d9f3426b732f1c8b5419be6"}, + {file = "isort-5.9.3-py3-none-any.whl", hash = "sha256:e17d6e2b81095c9db0a03a8025a957f334d6ea30b26f9ec70805411e5c7c81f2"}, + {file = "isort-5.9.3.tar.gz", hash = "sha256:9c2ea1e62d871267b78307fe511c0838ba0da28698c5732d54e2790bf3ba9899"}, ] jedi = [ {file = "jedi-0.13.3-py2.py3-none-any.whl", hash = "sha256:2c6bcd9545c7d6440951b12b44d373479bf18123a401a52025cf98563fbd826c"}, {file = "jedi-0.13.3.tar.gz", hash = "sha256:2bb0603e3506f708e792c7f4ad8fc2a7a9d9c2d292a358fbbd58da531695595b"}, ] jeepney = [ - {file = "jeepney-0.6.0-py3-none-any.whl", hash = "sha256:aec56c0eb1691a841795111e184e13cad504f7703b9a64f63020816afa79a8ae"}, - {file = "jeepney-0.6.0.tar.gz", hash = "sha256:7d59b6622675ca9e993a6bd38de845051d315f8b0c72cca3aef733a20b648657"}, + {file = "jeepney-0.7.1-py3-none-any.whl", hash = "sha256:1b5a0ea5c0e7b166b2f5895b91a08c14de8915afda4407fb5022a195224958ac"}, + {file = "jeepney-0.7.1.tar.gz", hash = "sha256:fa9e232dfa0c498bd0b8a3a73b8d8a31978304dcef0515adc859d4e096f96f4f"}, ] jinja2 = [ {file = "Jinja2-2.11.3-py2.py3-none-any.whl", hash = "sha256:03e47ad063331dd6a3f04a43eddca8a966a26ba0c5b7207a9a9e4e08f1b29419"}, @@ -1942,22 +1972,12 @@ log4mongo = [ {file = "log4mongo-1.7.0.tar.gz", hash = "sha256:dc374617206162a0b14167fbb5feac01dbef587539a235dadba6200362984a68"}, ] markupsafe = [ - {file = "MarkupSafe-2.0.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:d8446c54dc28c01e5a2dbac5a25f071f6653e6e40f3a8818e8b45d790fe6ef53"}, - {file = "MarkupSafe-2.0.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:36bc903cbb393720fad60fc28c10de6acf10dc6cc883f3e24ee4012371399a38"}, - {file = "MarkupSafe-2.0.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2d7d807855b419fc2ed3e631034685db6079889a1f01d5d9dac950f764da3dad"}, - {file = "MarkupSafe-2.0.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:add36cb2dbb8b736611303cd3bfcee00afd96471b09cda130da3581cbdc56a6d"}, - {file = "MarkupSafe-2.0.1-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:168cd0a3642de83558a5153c8bd34f175a9a6e7f6dc6384b9655d2697312a646"}, - {file = "MarkupSafe-2.0.1-cp310-cp310-win32.whl", hash = "sha256:99df47edb6bda1249d3e80fdabb1dab8c08ef3975f69aed437cb69d0a5de1e28"}, - {file = "MarkupSafe-2.0.1-cp310-cp310-win_amd64.whl", hash = "sha256:e0f138900af21926a02425cf736db95be9f4af72ba1bb21453432a07f6082134"}, {file = "MarkupSafe-2.0.1-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:f9081981fe268bd86831e5c75f7de206ef275defcb82bc70740ae6dc507aee51"}, {file = "MarkupSafe-2.0.1-cp36-cp36m-manylinux1_i686.whl", hash = "sha256:0955295dd5eec6cb6cc2fe1698f4c6d84af2e92de33fbcac4111913cd100a6ff"}, {file = "MarkupSafe-2.0.1-cp36-cp36m-manylinux1_x86_64.whl", hash = "sha256:0446679737af14f45767963a1a9ef7620189912317d095f2d9ffa183a4d25d2b"}, {file = "MarkupSafe-2.0.1-cp36-cp36m-manylinux2010_i686.whl", hash = "sha256:f826e31d18b516f653fe296d967d700fddad5901ae07c622bb3705955e1faa94"}, {file = "MarkupSafe-2.0.1-cp36-cp36m-manylinux2010_x86_64.whl", hash = "sha256:fa130dd50c57d53368c9d59395cb5526eda596d3ffe36666cd81a44d56e48872"}, {file = "MarkupSafe-2.0.1-cp36-cp36m-manylinux2014_aarch64.whl", hash = "sha256:905fec760bd2fa1388bb5b489ee8ee5f7291d692638ea5f67982d968366bef9f"}, - {file = "MarkupSafe-2.0.1-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bf5d821ffabf0ef3533c39c518f3357b171a1651c1ff6827325e4489b0e46c3c"}, - {file = "MarkupSafe-2.0.1-cp36-cp36m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:0d4b31cc67ab36e3392bbf3862cfbadac3db12bdd8b02a2731f509ed5b829724"}, - {file = "MarkupSafe-2.0.1-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:baa1a4e8f868845af802979fcdbf0bb11f94f1cb7ced4c4b8a351bb60d108145"}, {file = "MarkupSafe-2.0.1-cp36-cp36m-win32.whl", hash = "sha256:6c4ca60fa24e85fe25b912b01e62cb969d69a23a5d5867682dd3e80b5b02581d"}, {file = "MarkupSafe-2.0.1-cp36-cp36m-win_amd64.whl", hash = "sha256:b2f4bf27480f5e5e8ce285a8c8fd176c0b03e93dcc6646477d4630e83440c6a9"}, {file = "MarkupSafe-2.0.1-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:0717a7390a68be14b8c793ba258e075c6f4ca819f15edfc2a3a027c823718567"}, @@ -1966,21 +1986,14 @@ markupsafe = [ {file = "MarkupSafe-2.0.1-cp37-cp37m-manylinux2010_i686.whl", hash = "sha256:d7f9850398e85aba693bb640262d3611788b1f29a79f0c93c565694658f4071f"}, {file = "MarkupSafe-2.0.1-cp37-cp37m-manylinux2010_x86_64.whl", hash = "sha256:6a7fae0dd14cf60ad5ff42baa2e95727c3d81ded453457771d02b7d2b3f9c0c2"}, {file = "MarkupSafe-2.0.1-cp37-cp37m-manylinux2014_aarch64.whl", hash = "sha256:b7f2d075102dc8c794cbde1947378051c4e5180d52d276987b8d28a3bd58c17d"}, - {file = "MarkupSafe-2.0.1-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e9936f0b261d4df76ad22f8fee3ae83b60d7c3e871292cd42f40b81b70afae85"}, - {file = "MarkupSafe-2.0.1-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:2a7d351cbd8cfeb19ca00de495e224dea7e7d919659c2841bbb7f420ad03e2d6"}, - {file = "MarkupSafe-2.0.1-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:60bf42e36abfaf9aff1f50f52644b336d4f0a3fd6d8a60ca0d054ac9f713a864"}, {file = "MarkupSafe-2.0.1-cp37-cp37m-win32.whl", hash = "sha256:a30e67a65b53ea0a5e62fe23682cfe22712e01f453b95233b25502f7c61cb415"}, {file = "MarkupSafe-2.0.1-cp37-cp37m-win_amd64.whl", hash = "sha256:611d1ad9a4288cf3e3c16014564df047fe08410e628f89805e475368bd304914"}, - {file = "MarkupSafe-2.0.1-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:5bb28c636d87e840583ee3adeb78172efc47c8b26127267f54a9c0ec251d41a9"}, {file = "MarkupSafe-2.0.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:be98f628055368795d818ebf93da628541e10b75b41c559fdf36d104c5787066"}, {file = "MarkupSafe-2.0.1-cp38-cp38-manylinux1_i686.whl", hash = "sha256:1d609f577dc6e1aa17d746f8bd3c31aa4d258f4070d61b2aa5c4166c1539de35"}, {file = "MarkupSafe-2.0.1-cp38-cp38-manylinux1_x86_64.whl", hash = "sha256:7d91275b0245b1da4d4cfa07e0faedd5b0812efc15b702576d103293e252af1b"}, {file = "MarkupSafe-2.0.1-cp38-cp38-manylinux2010_i686.whl", hash = "sha256:01a9b8ea66f1658938f65b93a85ebe8bc016e6769611be228d797c9d998dd298"}, {file = "MarkupSafe-2.0.1-cp38-cp38-manylinux2010_x86_64.whl", hash = "sha256:47ab1e7b91c098ab893b828deafa1203de86d0bc6ab587b160f78fe6c4011f75"}, {file = "MarkupSafe-2.0.1-cp38-cp38-manylinux2014_aarch64.whl", hash = "sha256:97383d78eb34da7e1fa37dd273c20ad4320929af65d156e35a5e2d89566d9dfb"}, - {file = "MarkupSafe-2.0.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6fcf051089389abe060c9cd7caa212c707e58153afa2c649f00346ce6d260f1b"}, - {file = "MarkupSafe-2.0.1-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:5855f8438a7d1d458206a2466bf82b0f104a3724bf96a1c781ab731e4201731a"}, - {file = "MarkupSafe-2.0.1-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:3dd007d54ee88b46be476e293f48c85048603f5f516008bee124ddd891398ed6"}, {file = "MarkupSafe-2.0.1-cp38-cp38-win32.whl", hash = "sha256:023cb26ec21ece8dc3907c0e8320058b2e0cb3c55cf9564da612bc325bed5e64"}, {file = "MarkupSafe-2.0.1-cp38-cp38-win_amd64.whl", hash = "sha256:984d76483eb32f1bcb536dc27e4ad56bba4baa70be32fa87152832cdd9db0833"}, {file = "MarkupSafe-2.0.1-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:2ef54abee730b502252bcdf31b10dacb0a416229b72c18b19e24a4509f273d26"}, @@ -1990,9 +2003,6 @@ markupsafe = [ {file = "MarkupSafe-2.0.1-cp39-cp39-manylinux2010_i686.whl", hash = "sha256:4efca8f86c54b22348a5467704e3fec767b2db12fc39c6d963168ab1d3fc9135"}, {file = "MarkupSafe-2.0.1-cp39-cp39-manylinux2010_x86_64.whl", hash = "sha256:ab3ef638ace319fa26553db0624c4699e31a28bb2a835c5faca8f8acf6a5a902"}, {file = "MarkupSafe-2.0.1-cp39-cp39-manylinux2014_aarch64.whl", hash = "sha256:f8ba0e8349a38d3001fae7eadded3f6606f0da5d748ee53cc1dab1d6527b9509"}, - {file = "MarkupSafe-2.0.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c47adbc92fc1bb2b3274c4b3a43ae0e4573d9fbff4f54cd484555edbf030baf1"}, - {file = "MarkupSafe-2.0.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:37205cac2a79194e3750b0af2a5720d95f786a55ce7df90c3af697bfa100eaac"}, - {file = "MarkupSafe-2.0.1-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:1f2ade76b9903f39aa442b4aadd2177decb66525062db244b35d71d0ee8599b6"}, {file = "MarkupSafe-2.0.1-cp39-cp39-win32.whl", hash = "sha256:10f82115e21dc0dfec9ab5c0223652f7197feb168c940f3ef61563fc2d6beb74"}, {file = "MarkupSafe-2.0.1-cp39-cp39-win_amd64.whl", hash = "sha256:693ce3f9e70a6cf7d2fb9e6c9d8b204b6b39897a2c4a1aa65728d5ac97dcc1d8"}, {file = "MarkupSafe-2.0.1.tar.gz", hash = "sha256:594c67807fb16238b30c44bdf74f36c02cdf22d1c8cda91ef8a0ed8dabf5620a"}, @@ -2002,119 +2012,146 @@ mccabe = [ {file = "mccabe-0.6.1.tar.gz", hash = "sha256:dd8d182285a0fe56bace7f45b5e7d1a6ebcbf524e8f3bd87eb0f125271b8831f"}, ] multidict = [ - {file = "multidict-5.1.0-cp36-cp36m-macosx_10_14_x86_64.whl", hash = "sha256:b7993704f1a4b204e71debe6095150d43b2ee6150fa4f44d6d966ec356a8d61f"}, - {file = "multidict-5.1.0-cp36-cp36m-manylinux1_i686.whl", hash = "sha256:9dd6e9b1a913d096ac95d0399bd737e00f2af1e1594a787e00f7975778c8b2bf"}, - {file = "multidict-5.1.0-cp36-cp36m-manylinux2014_aarch64.whl", hash = "sha256:f21756997ad8ef815d8ef3d34edd98804ab5ea337feedcd62fb52d22bf531281"}, - {file = "multidict-5.1.0-cp36-cp36m-manylinux2014_i686.whl", hash = "sha256:1ab820665e67373de5802acae069a6a05567ae234ddb129f31d290fc3d1aa56d"}, - {file = "multidict-5.1.0-cp36-cp36m-manylinux2014_ppc64le.whl", hash = "sha256:9436dc58c123f07b230383083855593550c4d301d2532045a17ccf6eca505f6d"}, - {file = "multidict-5.1.0-cp36-cp36m-manylinux2014_s390x.whl", hash = "sha256:830f57206cc96ed0ccf68304141fec9481a096c4d2e2831f311bde1c404401da"}, - {file = "multidict-5.1.0-cp36-cp36m-manylinux2014_x86_64.whl", hash = "sha256:2e68965192c4ea61fff1b81c14ff712fc7dc15d2bd120602e4a3494ea6584224"}, - {file = "multidict-5.1.0-cp36-cp36m-win32.whl", hash = "sha256:2f1a132f1c88724674271d636e6b7351477c27722f2ed789f719f9e3545a3d26"}, - {file = "multidict-5.1.0-cp36-cp36m-win_amd64.whl", hash = "sha256:3a4f32116f8f72ecf2a29dabfb27b23ab7cdc0ba807e8459e59a93a9be9506f6"}, - {file = "multidict-5.1.0-cp37-cp37m-macosx_10_14_x86_64.whl", hash = "sha256:46c73e09ad374a6d876c599f2328161bcd95e280f84d2060cf57991dec5cfe76"}, - {file = "multidict-5.1.0-cp37-cp37m-manylinux1_i686.whl", hash = "sha256:018132dbd8688c7a69ad89c4a3f39ea2f9f33302ebe567a879da8f4ca73f0d0a"}, - {file = "multidict-5.1.0-cp37-cp37m-manylinux2014_aarch64.whl", hash = "sha256:4b186eb7d6ae7c06eb4392411189469e6a820da81447f46c0072a41c748ab73f"}, - {file = "multidict-5.1.0-cp37-cp37m-manylinux2014_i686.whl", hash = "sha256:3a041b76d13706b7fff23b9fc83117c7b8fe8d5fe9e6be45eee72b9baa75f348"}, - {file = "multidict-5.1.0-cp37-cp37m-manylinux2014_ppc64le.whl", hash = "sha256:051012ccee979b2b06be928a6150d237aec75dd6bf2d1eeeb190baf2b05abc93"}, - {file = "multidict-5.1.0-cp37-cp37m-manylinux2014_s390x.whl", hash = "sha256:6a4d5ce640e37b0efcc8441caeea8f43a06addace2335bd11151bc02d2ee31f9"}, - {file = "multidict-5.1.0-cp37-cp37m-manylinux2014_x86_64.whl", hash = "sha256:5cf3443199b83ed9e955f511b5b241fd3ae004e3cb81c58ec10f4fe47c7dce37"}, - {file = "multidict-5.1.0-cp37-cp37m-win32.whl", hash = "sha256:f200755768dc19c6f4e2b672421e0ebb3dd54c38d5a4f262b872d8cfcc9e93b5"}, - {file = "multidict-5.1.0-cp37-cp37m-win_amd64.whl", hash = "sha256:05c20b68e512166fddba59a918773ba002fdd77800cad9f55b59790030bab632"}, - {file = "multidict-5.1.0-cp38-cp38-macosx_10_14_x86_64.whl", hash = "sha256:54fd1e83a184e19c598d5e70ba508196fd0bbdd676ce159feb412a4a6664f952"}, - {file = "multidict-5.1.0-cp38-cp38-manylinux1_i686.whl", hash = "sha256:0e3c84e6c67eba89c2dbcee08504ba8644ab4284863452450520dad8f1e89b79"}, - {file = "multidict-5.1.0-cp38-cp38-manylinux2014_aarch64.whl", hash = "sha256:dc862056f76443a0db4509116c5cd480fe1b6a2d45512a653f9a855cc0517456"}, - {file = "multidict-5.1.0-cp38-cp38-manylinux2014_i686.whl", hash = "sha256:0e929169f9c090dae0646a011c8b058e5e5fb391466016b39d21745b48817fd7"}, - {file = "multidict-5.1.0-cp38-cp38-manylinux2014_ppc64le.whl", hash = "sha256:d81eddcb12d608cc08081fa88d046c78afb1bf8107e6feab5d43503fea74a635"}, - {file = "multidict-5.1.0-cp38-cp38-manylinux2014_s390x.whl", hash = "sha256:585fd452dd7782130d112f7ddf3473ffdd521414674c33876187e101b588738a"}, - {file = "multidict-5.1.0-cp38-cp38-manylinux2014_x86_64.whl", hash = "sha256:37e5438e1c78931df5d3c0c78ae049092877e5e9c02dd1ff5abb9cf27a5914ea"}, - {file = "multidict-5.1.0-cp38-cp38-win32.whl", hash = "sha256:07b42215124aedecc6083f1ce6b7e5ec5b50047afa701f3442054373a6deb656"}, - {file = "multidict-5.1.0-cp38-cp38-win_amd64.whl", hash = "sha256:929006d3c2d923788ba153ad0de8ed2e5ed39fdbe8e7be21e2f22ed06c6783d3"}, - {file = "multidict-5.1.0-cp39-cp39-macosx_10_14_x86_64.whl", hash = "sha256:b797515be8743b771aa868f83563f789bbd4b236659ba52243b735d80b29ed93"}, - {file = "multidict-5.1.0-cp39-cp39-manylinux1_i686.whl", hash = "sha256:d5c65bdf4484872c4af3150aeebe101ba560dcfb34488d9a8ff8dbcd21079647"}, - {file = "multidict-5.1.0-cp39-cp39-manylinux2014_aarch64.whl", hash = "sha256:b47a43177a5e65b771b80db71e7be76c0ba23cc8aa73eeeb089ed5219cdbe27d"}, - {file = "multidict-5.1.0-cp39-cp39-manylinux2014_i686.whl", hash = "sha256:806068d4f86cb06af37cd65821554f98240a19ce646d3cd24e1c33587f313eb8"}, - {file = "multidict-5.1.0-cp39-cp39-manylinux2014_ppc64le.whl", hash = "sha256:46dd362c2f045095c920162e9307de5ffd0a1bfbba0a6e990b344366f55a30c1"}, - {file = "multidict-5.1.0-cp39-cp39-manylinux2014_s390x.whl", hash = "sha256:ace010325c787c378afd7f7c1ac66b26313b3344628652eacd149bdd23c68841"}, - {file = "multidict-5.1.0-cp39-cp39-manylinux2014_x86_64.whl", hash = "sha256:ecc771ab628ea281517e24fd2c52e8f31c41e66652d07599ad8818abaad38cda"}, - {file = "multidict-5.1.0-cp39-cp39-win32.whl", hash = "sha256:fc13a9524bc18b6fb6e0dbec3533ba0496bbed167c56d0aabefd965584557d80"}, - {file = "multidict-5.1.0-cp39-cp39-win_amd64.whl", hash = "sha256:7df80d07818b385f3129180369079bd6934cf70469f99daaebfac89dca288359"}, - {file = "multidict-5.1.0.tar.gz", hash = "sha256:25b4e5f22d3a37ddf3effc0710ba692cfc792c2b9edfb9c05aefe823256e84d5"}, + {file = "multidict-5.2.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:3822c5894c72e3b35aae9909bef66ec83e44522faf767c0ad39e0e2de11d3b55"}, + {file = "multidict-5.2.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:28e6d883acd8674887d7edc896b91751dc2d8e87fbdca8359591a13872799e4e"}, + {file = "multidict-5.2.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:b61f85101ef08cbbc37846ac0e43f027f7844f3fade9b7f6dd087178caedeee7"}, + {file = "multidict-5.2.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d9b668c065968c5979fe6b6fa6760bb6ab9aeb94b75b73c0a9c1acf6393ac3bf"}, + {file = "multidict-5.2.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:517d75522b7b18a3385726b54a081afd425d4f41144a5399e5abd97ccafdf36b"}, + {file = "multidict-5.2.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:1b4ac3ba7a97b35a5ccf34f41b5a8642a01d1e55454b699e5e8e7a99b5a3acf5"}, + {file = "multidict-5.2.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:df23c83398715b26ab09574217ca21e14694917a0c857e356fd39e1c64f8283f"}, + {file = "multidict-5.2.0-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:e58a9b5cc96e014ddf93c2227cbdeca94b56a7eb77300205d6e4001805391747"}, + {file = "multidict-5.2.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:f76440e480c3b2ca7f843ff8a48dc82446b86ed4930552d736c0bac507498a52"}, + {file = "multidict-5.2.0-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:cfde464ca4af42a629648c0b0d79b8f295cf5b695412451716531d6916461628"}, + {file = "multidict-5.2.0-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:0fed465af2e0eb6357ba95795d003ac0bdb546305cc2366b1fc8f0ad67cc3fda"}, + {file = "multidict-5.2.0-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:b70913cbf2e14275013be98a06ef4b412329fe7b4f83d64eb70dce8269ed1e1a"}, + {file = "multidict-5.2.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:a5635bcf1b75f0f6ef3c8a1ad07b500104a971e38d3683167b9454cb6465ac86"}, + {file = "multidict-5.2.0-cp310-cp310-win32.whl", hash = "sha256:77f0fb7200cc7dedda7a60912f2059086e29ff67cefbc58d2506638c1a9132d7"}, + {file = "multidict-5.2.0-cp310-cp310-win_amd64.whl", hash = "sha256:9416cf11bcd73c861267e88aea71e9fcc35302b3943e45e1dbb4317f91a4b34f"}, + {file = "multidict-5.2.0-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:fd77c8f3cba815aa69cb97ee2b2ef385c7c12ada9c734b0f3b32e26bb88bbf1d"}, + {file = "multidict-5.2.0-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:98ec9aea6223adf46999f22e2c0ab6cf33f5914be604a404f658386a8f1fba37"}, + {file = "multidict-5.2.0-cp36-cp36m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e5283c0a00f48e8cafcecadebfa0ed1dac8b39e295c7248c44c665c16dc1138b"}, + {file = "multidict-5.2.0-cp36-cp36m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5f79c19c6420962eb17c7e48878a03053b7ccd7b69f389d5831c0a4a7f1ac0a1"}, + {file = "multidict-5.2.0-cp36-cp36m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:e4a67f1080123de76e4e97a18d10350df6a7182e243312426d508712e99988d4"}, + {file = "multidict-5.2.0-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:94b117e27efd8e08b4046c57461d5a114d26b40824995a2eb58372b94f9fca02"}, + {file = "multidict-5.2.0-cp36-cp36m-musllinux_1_1_aarch64.whl", hash = "sha256:2e77282fd1d677c313ffcaddfec236bf23f273c4fba7cdf198108f5940ae10f5"}, + {file = "multidict-5.2.0-cp36-cp36m-musllinux_1_1_i686.whl", hash = "sha256:116347c63ba049c1ea56e157fa8aa6edaf5e92925c9b64f3da7769bdfa012858"}, + {file = "multidict-5.2.0-cp36-cp36m-musllinux_1_1_ppc64le.whl", hash = "sha256:dc3a866cf6c13d59a01878cd806f219340f3e82eed514485e094321f24900677"}, + {file = "multidict-5.2.0-cp36-cp36m-musllinux_1_1_s390x.whl", hash = "sha256:ac42181292099d91217a82e3fa3ce0e0ddf3a74fd891b7c2b347a7f5aa0edded"}, + {file = "multidict-5.2.0-cp36-cp36m-musllinux_1_1_x86_64.whl", hash = "sha256:f0bb0973f42ffcb5e3537548e0767079420aefd94ba990b61cf7bb8d47f4916d"}, + {file = "multidict-5.2.0-cp36-cp36m-win32.whl", hash = "sha256:ea21d4d5104b4f840b91d9dc8cbc832aba9612121eaba503e54eaab1ad140eb9"}, + {file = "multidict-5.2.0-cp36-cp36m-win_amd64.whl", hash = "sha256:e6453f3cbeb78440747096f239d282cc57a2997a16b5197c9bc839099e1633d0"}, + {file = "multidict-5.2.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:d3def943bfd5f1c47d51fd324df1e806d8da1f8e105cc7f1c76a1daf0f7e17b0"}, + {file = "multidict-5.2.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:35591729668a303a02b06e8dba0eb8140c4a1bfd4c4b3209a436a02a5ac1de11"}, + {file = "multidict-5.2.0-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ce8cacda0b679ebc25624d5de66c705bc53dcc7c6f02a7fb0f3ca5e227d80422"}, + {file = "multidict-5.2.0-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:baf1856fab8212bf35230c019cde7c641887e3fc08cadd39d32a421a30151ea3"}, + {file = "multidict-5.2.0-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:a43616aec0f0d53c411582c451f5d3e1123a68cc7b3475d6f7d97a626f8ff90d"}, + {file = "multidict-5.2.0-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:25cbd39a9029b409167aa0a20d8a17f502d43f2efebfe9e3ac019fe6796c59ac"}, + {file = "multidict-5.2.0-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:0a2cbcfbea6dc776782a444db819c8b78afe4db597211298dd8b2222f73e9cd0"}, + {file = "multidict-5.2.0-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:3d2d7d1fff8e09d99354c04c3fd5b560fb04639fd45926b34e27cfdec678a704"}, + {file = "multidict-5.2.0-cp37-cp37m-musllinux_1_1_ppc64le.whl", hash = "sha256:a37e9a68349f6abe24130846e2f1d2e38f7ddab30b81b754e5a1fde32f782b23"}, + {file = "multidict-5.2.0-cp37-cp37m-musllinux_1_1_s390x.whl", hash = "sha256:637c1896497ff19e1ee27c1c2c2ddaa9f2d134bbb5e0c52254361ea20486418d"}, + {file = "multidict-5.2.0-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:9815765f9dcda04921ba467957be543423e5ec6a1136135d84f2ae092c50d87b"}, + {file = "multidict-5.2.0-cp37-cp37m-win32.whl", hash = "sha256:8b911d74acdc1fe2941e59b4f1a278a330e9c34c6c8ca1ee21264c51ec9b67ef"}, + {file = "multidict-5.2.0-cp37-cp37m-win_amd64.whl", hash = "sha256:380b868f55f63d048a25931a1632818f90e4be71d2081c2338fcf656d299949a"}, + {file = "multidict-5.2.0-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:e7d81ce5744757d2f05fc41896e3b2ae0458464b14b5a2c1e87a6a9d69aefaa8"}, + {file = "multidict-5.2.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:2d1d55cdf706ddc62822d394d1df53573d32a7a07d4f099470d3cb9323b721b6"}, + {file = "multidict-5.2.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:a4771d0d0ac9d9fe9e24e33bed482a13dfc1256d008d101485fe460359476065"}, + {file = "multidict-5.2.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:da7d57ea65744d249427793c042094c4016789eb2562576fb831870f9c878d9e"}, + {file = "multidict-5.2.0-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:cdd68778f96216596218b4e8882944d24a634d984ee1a5a049b300377878fa7c"}, + {file = "multidict-5.2.0-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:ecc99bce8ee42dcad15848c7885197d26841cb24fa2ee6e89d23b8993c871c64"}, + {file = "multidict-5.2.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:067150fad08e6f2dd91a650c7a49ba65085303fcc3decbd64a57dc13a2733031"}, + {file = "multidict-5.2.0-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:78c106b2b506b4d895ddc801ff509f941119394b89c9115580014127414e6c2d"}, + {file = "multidict-5.2.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:e6c4fa1ec16e01e292315ba76eb1d012c025b99d22896bd14a66628b245e3e01"}, + {file = "multidict-5.2.0-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:b227345e4186809d31f22087d0265655114af7cda442ecaf72246275865bebe4"}, + {file = "multidict-5.2.0-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:06560fbdcf22c9387100979e65b26fba0816c162b888cb65b845d3def7a54c9b"}, + {file = "multidict-5.2.0-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:7878b61c867fb2df7a95e44b316f88d5a3742390c99dfba6c557a21b30180cac"}, + {file = "multidict-5.2.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:246145bff76cc4b19310f0ad28bd0769b940c2a49fc601b86bfd150cbd72bb22"}, + {file = "multidict-5.2.0-cp38-cp38-win32.whl", hash = "sha256:c30ac9f562106cd9e8071c23949a067b10211917fdcb75b4718cf5775356a940"}, + {file = "multidict-5.2.0-cp38-cp38-win_amd64.whl", hash = "sha256:f19001e790013ed580abfde2a4465388950728861b52f0da73e8e8a9418533c0"}, + {file = "multidict-5.2.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:c1ff762e2ee126e6f1258650ac641e2b8e1f3d927a925aafcfde943b77a36d24"}, + {file = "multidict-5.2.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:bd6c9c50bf2ad3f0448edaa1a3b55b2e6866ef8feca5d8dbec10ec7c94371d21"}, + {file = "multidict-5.2.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:fc66d4016f6e50ed36fb39cd287a3878ffcebfa90008535c62e0e90a7ab713ae"}, + {file = "multidict-5.2.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a9acb76d5f3dd9421874923da2ed1e76041cb51b9337fd7f507edde1d86535d6"}, + {file = "multidict-5.2.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:dfc924a7e946dd3c6360e50e8f750d51e3ef5395c95dc054bc9eab0f70df4f9c"}, + {file = "multidict-5.2.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:32fdba7333eb2351fee2596b756d730d62b5827d5e1ab2f84e6cbb287cc67fe0"}, + {file = "multidict-5.2.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:b9aad49466b8d828b96b9e3630006234879c8d3e2b0a9d99219b3121bc5cdb17"}, + {file = "multidict-5.2.0-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:93de39267c4c676c9ebb2057e98a8138bade0d806aad4d864322eee0803140a0"}, + {file = "multidict-5.2.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:f9bef5cff994ca3026fcc90680e326d1a19df9841c5e3d224076407cc21471a1"}, + {file = "multidict-5.2.0-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:5f841c4f14331fd1e36cbf3336ed7be2cb2a8f110ce40ea253e5573387db7621"}, + {file = "multidict-5.2.0-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:38ba256ee9b310da6a1a0f013ef4e422fca30a685bcbec86a969bd520504e341"}, + {file = "multidict-5.2.0-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:3bc3b1621b979621cee9f7b09f024ec76ec03cc365e638126a056317470bde1b"}, + {file = "multidict-5.2.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:6ee908c070020d682e9b42c8f621e8bb10c767d04416e2ebe44e37d0f44d9ad5"}, + {file = "multidict-5.2.0-cp39-cp39-win32.whl", hash = "sha256:1c7976cd1c157fa7ba5456ae5d31ccdf1479680dc9b8d8aa28afabc370df42b8"}, + {file = "multidict-5.2.0-cp39-cp39-win_amd64.whl", hash = "sha256:c9631c642e08b9fff1c6255487e62971d8b8e821808ddd013d8ac058087591ac"}, + {file = "multidict-5.2.0.tar.gz", hash = "sha256:0dd1c93edb444b33ba2274b66f63def8a327d607c6c790772f448a53b6ea59ce"}, ] opentimelineio = [] packaging = [ - {file = "packaging-20.9-py2.py3-none-any.whl", hash = "sha256:67714da7f7bc052e064859c05c595155bd1ee9f69f76557e21f051443c20947a"}, - {file = "packaging-20.9.tar.gz", hash = "sha256:5b327ac1320dc863dca72f4514ecc086f31186744b84a230374cc1fd776feae5"}, + {file = "packaging-21.2-py3-none-any.whl", hash = "sha256:14317396d1e8cdb122989b916fa2c7e9ca8e2be9e8060a6eff75b6b7b4d8a7e0"}, + {file = "packaging-21.2.tar.gz", hash = "sha256:096d689d78ca690e4cd8a89568ba06d07ca097e3306a4381635073ca91479966"}, ] paramiko = [ - {file = "paramiko-2.7.2-py2.py3-none-any.whl", hash = "sha256:4f3e316fef2ac628b05097a637af35685183111d4bc1b5979bd397c2ab7b5898"}, - {file = "paramiko-2.7.2.tar.gz", hash = "sha256:7f36f4ba2c0d81d219f4595e35f70d56cc94f9ac40a6acdf51d6ca210ce65035"}, + {file = "paramiko-2.8.0-py2.py3-none-any.whl", hash = "sha256:def3ec612399bab4e9f5eb66b0ae5983980db9dd9120d9e9c6ea3ff673865d1c"}, + {file = "paramiko-2.8.0.tar.gz", hash = "sha256:e673b10ee0f1c80d46182d3af7751d033d9b573dd7054d2d0aa46be186c3c1d2"}, ] parso = [ {file = "parso-0.8.2-py2.py3-none-any.whl", hash = "sha256:a8c4922db71e4fdb90e0d0bc6e50f9b273d3397925e5e60a717e719201778d22"}, {file = "parso-0.8.2.tar.gz", hash = "sha256:12b83492c6239ce32ff5eed6d3639d6a536170723c6f3f1506869f1ace413398"}, ] pathlib2 = [ - {file = "pathlib2-2.3.5-py2.py3-none-any.whl", hash = "sha256:0ec8205a157c80d7acc301c0b18fbd5d44fe655968f5d947b6ecef5290fc35db"}, - {file = "pathlib2-2.3.5.tar.gz", hash = "sha256:6cd9a47b597b37cc57de1c05e56fb1a1c9cc9fab04fe78c29acd090418529868"}, + {file = "pathlib2-2.3.6-py2.py3-none-any.whl", hash = "sha256:3a130b266b3a36134dcc79c17b3c7ac9634f083825ca6ea9d8f557ee6195c9c8"}, + {file = "pathlib2-2.3.6.tar.gz", hash = "sha256:7d8bcb5555003cdf4a8d2872c538faa3a0f5d20630cb360e518ca3b981795e5f"}, ] pillow = [ - {file = "Pillow-8.3.2-cp310-cp310-macosx_10_10_universal2.whl", hash = "sha256:c691b26283c3a31594683217d746f1dad59a7ae1d4cfc24626d7a064a11197d4"}, - {file = "Pillow-8.3.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:f514c2717012859ccb349c97862568fdc0479aad85b0270d6b5a6509dbc142e2"}, - {file = "Pillow-8.3.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:be25cb93442c6d2f8702c599b51184bd3ccd83adebd08886b682173e09ef0c3f"}, - {file = "Pillow-8.3.2-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d675a876b295afa114ca8bf42d7f86b5fb1298e1b6bb9a24405a3f6c8338811c"}, - {file = "Pillow-8.3.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:59697568a0455764a094585b2551fd76bfd6b959c9f92d4bdec9d0e14616303a"}, - {file = "Pillow-8.3.2-cp310-cp310-win32.whl", hash = "sha256:2d5e9dc0bf1b5d9048a94c48d0813b6c96fccfa4ccf276d9c36308840f40c228"}, - {file = "Pillow-8.3.2-cp310-cp310-win_amd64.whl", hash = "sha256:11c27e74bab423eb3c9232d97553111cc0be81b74b47165f07ebfdd29d825875"}, - {file = "Pillow-8.3.2-cp36-cp36m-macosx_10_10_x86_64.whl", hash = "sha256:11eb7f98165d56042545c9e6db3ce394ed8b45089a67124298f0473b29cb60b2"}, - {file = "Pillow-8.3.2-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2f23b2d3079522fdf3c09de6517f625f7a964f916c956527bed805ac043799b8"}, - {file = "Pillow-8.3.2-cp36-cp36m-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:19ec4cfe4b961edc249b0e04b5618666c23a83bc35842dea2bfd5dfa0157f81b"}, - {file = "Pillow-8.3.2-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e5a31c07cea5edbaeb4bdba6f2b87db7d3dc0f446f379d907e51cc70ea375629"}, - {file = "Pillow-8.3.2-cp36-cp36m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:15ccb81a6ffc57ea0137f9f3ac2737ffa1d11f786244d719639df17476d399a7"}, - {file = "Pillow-8.3.2-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:8f284dc1695caf71a74f24993b7c7473d77bc760be45f776a2c2f4e04c170550"}, - {file = "Pillow-8.3.2-cp36-cp36m-win32.whl", hash = "sha256:4abc247b31a98f29e5224f2d31ef15f86a71f79c7f4d2ac345a5d551d6393073"}, - {file = "Pillow-8.3.2-cp36-cp36m-win_amd64.whl", hash = "sha256:a048dad5ed6ad1fad338c02c609b862dfaa921fcd065d747194a6805f91f2196"}, - {file = "Pillow-8.3.2-cp37-cp37m-macosx_10_10_x86_64.whl", hash = "sha256:06d1adaa284696785375fa80a6a8eb309be722cf4ef8949518beb34487a3df71"}, - {file = "Pillow-8.3.2-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bd24054aaf21e70a51e2a2a5ed1183560d3a69e6f9594a4bfe360a46f94eba83"}, - {file = "Pillow-8.3.2-cp37-cp37m-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:27a330bf7014ee034046db43ccbb05c766aa9e70b8d6c5260bfc38d73103b0ba"}, - {file = "Pillow-8.3.2-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:13654b521fb98abdecec105ea3fb5ba863d1548c9b58831dd5105bb3873569f1"}, - {file = "Pillow-8.3.2-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:a1bd983c565f92779be456ece2479840ec39d386007cd4ae83382646293d681b"}, - {file = "Pillow-8.3.2-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:4326ea1e2722f3dc00ed77c36d3b5354b8fb7399fb59230249ea6d59cbed90da"}, - {file = "Pillow-8.3.2-cp37-cp37m-win32.whl", hash = "sha256:085a90a99404b859a4b6c3daa42afde17cb3ad3115e44a75f0d7b4a32f06a6c9"}, - {file = "Pillow-8.3.2-cp37-cp37m-win_amd64.whl", hash = "sha256:18a07a683805d32826c09acfce44a90bf474e6a66ce482b1c7fcd3757d588df3"}, - {file = "Pillow-8.3.2-cp38-cp38-macosx_10_10_x86_64.whl", hash = "sha256:4e59e99fd680e2b8b11bbd463f3c9450ab799305d5f2bafb74fefba6ac058616"}, - {file = "Pillow-8.3.2-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:4d89a2e9219a526401015153c0e9dd48319ea6ab9fe3b066a20aa9aee23d9fd3"}, - {file = "Pillow-8.3.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:56fd98c8294f57636084f4b076b75f86c57b2a63a8410c0cd172bc93695ee979"}, - {file = "Pillow-8.3.2-cp38-cp38-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2b11c9d310a3522b0fd3c35667914271f570576a0e387701f370eb39d45f08a4"}, - {file = "Pillow-8.3.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0412516dcc9de9b0a1e0ae25a280015809de8270f134cc2c1e32c4eeb397cf30"}, - {file = "Pillow-8.3.2-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:bcb04ff12e79b28be6c9988f275e7ab69f01cc2ba319fb3114f87817bb7c74b6"}, - {file = "Pillow-8.3.2-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:0b9911ec70731711c3b6ebcde26caea620cbdd9dcb73c67b0730c8817f24711b"}, - {file = "Pillow-8.3.2-cp38-cp38-win32.whl", hash = "sha256:ce2e5e04bb86da6187f96d7bab3f93a7877830981b37f0287dd6479e27a10341"}, - {file = "Pillow-8.3.2-cp38-cp38-win_amd64.whl", hash = "sha256:35d27687f027ad25a8d0ef45dd5208ef044c588003cdcedf05afb00dbc5c2deb"}, - {file = "Pillow-8.3.2-cp39-cp39-macosx_10_10_x86_64.whl", hash = "sha256:04835e68ef12904bc3e1fd002b33eea0779320d4346082bd5b24bec12ad9c3e9"}, - {file = "Pillow-8.3.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:10e00f7336780ca7d3653cf3ac26f068fa11b5a96894ea29a64d3dc4b810d630"}, - {file = "Pillow-8.3.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2cde7a4d3687f21cffdf5bb171172070bb95e02af448c4c8b2f223d783214056"}, - {file = "Pillow-8.3.2-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1c3ff00110835bdda2b1e2b07f4a2548a39744bb7de5946dc8e95517c4fb2ca6"}, - {file = "Pillow-8.3.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:35d409030bf3bd05fa66fb5fdedc39c521b397f61ad04309c90444e893d05f7d"}, - {file = "Pillow-8.3.2-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:6bff50ba9891be0a004ef48828e012babaaf7da204d81ab9be37480b9020a82b"}, - {file = "Pillow-8.3.2-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:7dbfbc0020aa1d9bc1b0b8bcf255a7d73f4ad0336f8fd2533fcc54a4ccfb9441"}, - {file = "Pillow-8.3.2-cp39-cp39-win32.whl", hash = "sha256:963ebdc5365d748185fdb06daf2ac758116deecb2277ec5ae98139f93844bc09"}, - {file = "Pillow-8.3.2-cp39-cp39-win_amd64.whl", hash = "sha256:cc9d0dec711c914ed500f1d0d3822868760954dce98dfb0b7382a854aee55d19"}, - {file = "Pillow-8.3.2-pp36-pypy36_pp73-macosx_10_10_x86_64.whl", hash = "sha256:2c661542c6f71dfd9dc82d9d29a8386287e82813b0375b3a02983feac69ef864"}, - {file = "Pillow-8.3.2-pp36-pypy36_pp73-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:548794f99ff52a73a156771a0402f5e1c35285bd981046a502d7e4793e8facaa"}, - {file = "Pillow-8.3.2-pp36-pypy36_pp73-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:8b68f565a4175e12e68ca900af8910e8fe48aaa48fd3ca853494f384e11c8bcd"}, - {file = "Pillow-8.3.2-pp36-pypy36_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:838eb85de6d9307c19c655c726f8d13b8b646f144ca6b3771fa62b711ebf7624"}, - {file = "Pillow-8.3.2-pp36-pypy36_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:feb5db446e96bfecfec078b943cc07744cc759893cef045aa8b8b6d6aaa8274e"}, - {file = "Pillow-8.3.2-pp37-pypy37_pp73-macosx_10_10_x86_64.whl", hash = "sha256:fc0db32f7223b094964e71729c0361f93db43664dd1ec86d3df217853cedda87"}, - {file = "Pillow-8.3.2-pp37-pypy37_pp73-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:fd4fd83aa912d7b89b4b4a1580d30e2a4242f3936882a3f433586e5ab97ed0d5"}, - {file = "Pillow-8.3.2-pp37-pypy37_pp73-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:d0c8ebbfd439c37624db98f3877d9ed12c137cadd99dde2d2eae0dab0bbfc355"}, - {file = "Pillow-8.3.2-pp37-pypy37_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6cb3dd7f23b044b0737317f892d399f9e2f0b3a02b22b2c692851fb8120d82c6"}, - {file = "Pillow-8.3.2-pp37-pypy37_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a66566f8a22561fc1a88dc87606c69b84fa9ce724f99522cf922c801ec68f5c1"}, - {file = "Pillow-8.3.2-pp37-pypy37_pp73-win_amd64.whl", hash = "sha256:ce651ca46d0202c302a535d3047c55a0131a720cf554a578fc1b8a2aff0e7d96"}, - {file = "Pillow-8.3.2.tar.gz", hash = "sha256:dde3f3ed8d00c72631bc19cbfff8ad3b6215062a5eed402381ad365f82f0c18c"}, + {file = "Pillow-8.4.0-cp310-cp310-macosx_10_10_universal2.whl", hash = "sha256:81f8d5c81e483a9442d72d182e1fb6dcb9723f289a57e8030811bac9ea3fef8d"}, + {file = "Pillow-8.4.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:3f97cfb1e5a392d75dd8b9fd274d205404729923840ca94ca45a0af57e13dbe6"}, + {file = "Pillow-8.4.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:eb9fc393f3c61f9054e1ed26e6fe912c7321af2f41ff49d3f83d05bacf22cc78"}, + {file = "Pillow-8.4.0-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d82cdb63100ef5eedb8391732375e6d05993b765f72cb34311fab92103314649"}, + {file = "Pillow-8.4.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:62cc1afda735a8d109007164714e73771b499768b9bb5afcbbee9d0ff374b43f"}, + {file = "Pillow-8.4.0-cp310-cp310-win32.whl", hash = "sha256:e3dacecfbeec9a33e932f00c6cd7996e62f53ad46fbe677577394aaa90ee419a"}, + {file = "Pillow-8.4.0-cp310-cp310-win_amd64.whl", hash = "sha256:620582db2a85b2df5f8a82ddeb52116560d7e5e6b055095f04ad828d1b0baa39"}, + {file = "Pillow-8.4.0-cp36-cp36m-macosx_10_10_x86_64.whl", hash = "sha256:1bc723b434fbc4ab50bb68e11e93ce5fb69866ad621e3c2c9bdb0cd70e345f55"}, + {file = "Pillow-8.4.0-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:72cbcfd54df6caf85cc35264c77ede902452d6df41166010262374155947460c"}, + {file = "Pillow-8.4.0-cp36-cp36m-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:70ad9e5c6cb9b8487280a02c0ad8a51581dcbbe8484ce058477692a27c151c0a"}, + {file = "Pillow-8.4.0-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:25a49dc2e2f74e65efaa32b153527fc5ac98508d502fa46e74fa4fd678ed6645"}, + {file = "Pillow-8.4.0-cp36-cp36m-win32.whl", hash = "sha256:93ce9e955cc95959df98505e4608ad98281fff037350d8c2671c9aa86bcf10a9"}, + {file = "Pillow-8.4.0-cp36-cp36m-win_amd64.whl", hash = "sha256:2e4440b8f00f504ee4b53fe30f4e381aae30b0568193be305256b1462216feff"}, + {file = "Pillow-8.4.0-cp37-cp37m-macosx_10_10_x86_64.whl", hash = "sha256:8c803ac3c28bbc53763e6825746f05cc407b20e4a69d0122e526a582e3b5e153"}, + {file = "Pillow-8.4.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c8a17b5d948f4ceeceb66384727dde11b240736fddeda54ca740b9b8b1556b29"}, + {file = "Pillow-8.4.0-cp37-cp37m-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1394a6ad5abc838c5cd8a92c5a07535648cdf6d09e8e2d6df916dfa9ea86ead8"}, + {file = "Pillow-8.4.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:792e5c12376594bfcb986ebf3855aa4b7c225754e9a9521298e460e92fb4a488"}, + {file = "Pillow-8.4.0-cp37-cp37m-win32.whl", hash = "sha256:d99ec152570e4196772e7a8e4ba5320d2d27bf22fdf11743dd882936ed64305b"}, + {file = "Pillow-8.4.0-cp37-cp37m-win_amd64.whl", hash = "sha256:7b7017b61bbcdd7f6363aeceb881e23c46583739cb69a3ab39cb384f6ec82e5b"}, + {file = "Pillow-8.4.0-cp38-cp38-macosx_10_10_x86_64.whl", hash = "sha256:d89363f02658e253dbd171f7c3716a5d340a24ee82d38aab9183f7fdf0cdca49"}, + {file = "Pillow-8.4.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:0a0956fdc5defc34462bb1c765ee88d933239f9a94bc37d132004775241a7585"}, + {file = "Pillow-8.4.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5b7bb9de00197fb4261825c15551adf7605cf14a80badf1761d61e59da347779"}, + {file = "Pillow-8.4.0-cp38-cp38-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:72b9e656e340447f827885b8d7a15fc8c4e68d410dc2297ef6787eec0f0ea409"}, + {file = "Pillow-8.4.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a5a4532a12314149d8b4e4ad8ff09dde7427731fcfa5917ff16d0291f13609df"}, + {file = "Pillow-8.4.0-cp38-cp38-win32.whl", hash = "sha256:82aafa8d5eb68c8463b6e9baeb4f19043bb31fefc03eb7b216b51e6a9981ae09"}, + {file = "Pillow-8.4.0-cp38-cp38-win_amd64.whl", hash = "sha256:066f3999cb3b070a95c3652712cffa1a748cd02d60ad7b4e485c3748a04d9d76"}, + {file = "Pillow-8.4.0-cp39-cp39-macosx_10_10_x86_64.whl", hash = "sha256:5503c86916d27c2e101b7f71c2ae2cddba01a2cf55b8395b0255fd33fa4d1f1a"}, + {file = "Pillow-8.4.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:4acc0985ddf39d1bc969a9220b51d94ed51695d455c228d8ac29fcdb25810e6e"}, + {file = "Pillow-8.4.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0b052a619a8bfcf26bd8b3f48f45283f9e977890263e4571f2393ed8898d331b"}, + {file = "Pillow-8.4.0-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:493cb4e415f44cd601fcec11c99836f707bb714ab03f5ed46ac25713baf0ff20"}, + {file = "Pillow-8.4.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b8831cb7332eda5dc89b21a7bce7ef6ad305548820595033a4b03cf3091235ed"}, + {file = "Pillow-8.4.0-cp39-cp39-win32.whl", hash = "sha256:5e9ac5f66616b87d4da618a20ab0a38324dbe88d8a39b55be8964eb520021e02"}, + {file = "Pillow-8.4.0-cp39-cp39-win_amd64.whl", hash = "sha256:3eb1ce5f65908556c2d8685a8f0a6e989d887ec4057326f6c22b24e8a172c66b"}, + {file = "Pillow-8.4.0-pp36-pypy36_pp73-macosx_10_10_x86_64.whl", hash = "sha256:ddc4d832a0f0b4c52fff973a0d44b6c99839a9d016fe4e6a1cb8f3eea96479c2"}, + {file = "Pillow-8.4.0-pp36-pypy36_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9a3e5ddc44c14042f0844b8cf7d2cd455f6cc80fd7f5eefbe657292cf601d9ad"}, + {file = "Pillow-8.4.0-pp36-pypy36_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c70e94281588ef053ae8998039610dbd71bc509e4acbc77ab59d7d2937b10698"}, + {file = "Pillow-8.4.0-pp37-pypy37_pp73-macosx_10_10_x86_64.whl", hash = "sha256:3862b7256046fcd950618ed22d1d60b842e3a40a48236a5498746f21189afbbc"}, + {file = "Pillow-8.4.0-pp37-pypy37_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a4901622493f88b1a29bd30ec1a2f683782e57c3c16a2dbc7f2595ba01f639df"}, + {file = "Pillow-8.4.0-pp37-pypy37_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:84c471a734240653a0ec91dec0996696eea227eafe72a33bd06c92697728046b"}, + {file = "Pillow-8.4.0-pp37-pypy37_pp73-win_amd64.whl", hash = "sha256:244cf3b97802c34c41905d22810846802a3329ddcb93ccc432870243211c79fc"}, + {file = "Pillow-8.4.0.tar.gz", hash = "sha256:b8e2f83c56e141920c39464b852de3719dfbfb6e3c99a2d8da0edf4fb33176ed"}, +] +platformdirs = [ + {file = "platformdirs-2.4.0-py3-none-any.whl", hash = "sha256:8868bbe3c3c80d42f20156f22e7131d2fb321f5bc86a2a345375c6481a67021d"}, + {file = "platformdirs-2.4.0.tar.gz", hash = "sha256:367a5e80b3d04d2428ffa76d33f124cf11e8fff2acdaa9b43d545f5c7d661ef2"}, ] pluggy = [ - {file = "pluggy-0.13.1-py2.py3-none-any.whl", hash = "sha256:966c145cd83c96502c3c3868f50408687b38434af77734af1e9ca461a4081d2d"}, - {file = "pluggy-0.13.1.tar.gz", hash = "sha256:15b2acde666561e1298d71b523007ed7364de07029219b604cf808bfa1c765b0"}, + {file = "pluggy-1.0.0-py2.py3-none-any.whl", hash = "sha256:74134bbf457f031a36d68416e1509f34bd5ccc019f0bcc952c7b909d06b37bd3"}, + {file = "pluggy-1.0.0.tar.gz", hash = "sha256:4224373bacce55f955a878bf9cfa763c1e360858e330072059e10bad68531159"}, ] ply = [ {file = "ply-3.11-py2.py3-none-any.whl", hash = "sha256:096f9b8350b65ebd2fd1346b12452efe5b9607f7482813ffca50c22722a807ce"}, @@ -2143,13 +2180,9 @@ protobuf = [ {file = "protobuf-3.17.3-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:2ae692bb6d1992afb6b74348e7bb648a75bb0d3565a3f5eea5bec8f62bd06d87"}, {file = "protobuf-3.17.3-cp38-cp38-manylinux2014_aarch64.whl", hash = "sha256:99938f2a2d7ca6563c0ade0c5ca8982264c484fdecf418bd68e880a7ab5730b1"}, {file = "protobuf-3.17.3-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:6902a1e4b7a319ec611a7345ff81b6b004b36b0d2196ce7a748b3493da3d226d"}, - {file = "protobuf-3.17.3-cp38-cp38-win32.whl", hash = "sha256:59e5cf6b737c3a376932fbfb869043415f7c16a0cf176ab30a5bbc419cd709c1"}, - {file = "protobuf-3.17.3-cp38-cp38-win_amd64.whl", hash = "sha256:ebcb546f10069b56dc2e3da35e003a02076aaa377caf8530fe9789570984a8d2"}, {file = "protobuf-3.17.3-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:4ffbd23640bb7403574f7aff8368e2aeb2ec9a5c6306580be48ac59a6bac8bde"}, {file = "protobuf-3.17.3-cp39-cp39-manylinux2014_aarch64.whl", hash = "sha256:26010f693b675ff5a1d0e1bdb17689b8b716a18709113288fead438703d45539"}, {file = "protobuf-3.17.3-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:e76d9686e088fece2450dbc7ee905f9be904e427341d289acbe9ad00b78ebd47"}, - {file = "protobuf-3.17.3-cp39-cp39-win32.whl", hash = "sha256:a38bac25f51c93e4be4092c88b2568b9f407c27217d3dd23c7a57fa522a17554"}, - {file = "protobuf-3.17.3-cp39-cp39-win_amd64.whl", hash = "sha256:85d6303e4adade2827e43c2b54114d9a6ea547b671cb63fafd5011dc47d0e13d"}, {file = "protobuf-3.17.3-py2.py3-none-any.whl", hash = "sha256:2bfb815216a9cd9faec52b16fd2bfa68437a44b67c56bee59bc3926522ecb04e"}, {file = "protobuf-3.17.3.tar.gz", hash = "sha256:72804ea5eaa9c22a090d2803813e280fb273b62d5ae497aaf3553d141c4fdd7b"}, ] @@ -2212,78 +2245,121 @@ pyflakes = [ {file = "pyflakes-2.3.1.tar.gz", hash = "sha256:f5bc8ecabc05bb9d291eb5203d6810b49040f6ff446a756326104746cc00c1db"}, ] pygments = [ - {file = "Pygments-2.9.0-py3-none-any.whl", hash = "sha256:d66e804411278594d764fc69ec36ec13d9ae9147193a1740cd34d272ca383b8e"}, - {file = "Pygments-2.9.0.tar.gz", hash = "sha256:a18f47b506a429f6f4b9df81bb02beab9ca21d0a5fee38ed15aef65f0545519f"}, + {file = "Pygments-2.10.0-py3-none-any.whl", hash = "sha256:b8e67fe6af78f492b3c4b3e2970c0624cbf08beb1e493b2c99b9fa1b67a20380"}, + {file = "Pygments-2.10.0.tar.gz", hash = "sha256:f398865f7eb6874156579fdf36bc840a03cab64d1cde9e93d68f46a425ec52c6"}, ] pylint = [ - {file = "pylint-2.8.3-py3-none-any.whl", hash = "sha256:792b38ff30903884e4a9eab814ee3523731abd3c463f3ba48d7b627e87013484"}, - {file = "pylint-2.8.3.tar.gz", hash = "sha256:0a049c5d47b629d9070c3932d13bff482b12119b6a241a93bc460b0be16953c8"}, + {file = "pylint-2.11.1-py3-none-any.whl", hash = "sha256:0f358e221c45cbd4dad2a1e4b883e75d28acdcccd29d40c76eb72b307269b126"}, + {file = "pylint-2.11.1.tar.gz", hash = "sha256:2c9843fff1a88ca0ad98a256806c82c5a8f86086e7ccbdb93297d86c3f90c436"}, ] pymongo = [ - {file = "pymongo-3.11.4-cp27-cp27m-macosx_10_14_intel.whl", hash = "sha256:b7efc7e7049ef366777cfd35437c18a4166bb50a5606a1c840ee3b9624b54fc9"}, - {file = "pymongo-3.11.4-cp27-cp27m-manylinux1_i686.whl", hash = "sha256:517ba47ca04a55b1f50ee8df9fd97f6c37df5537d118fb2718952b8623860466"}, - {file = "pymongo-3.11.4-cp27-cp27m-manylinux1_x86_64.whl", hash = "sha256:225c61e08fe517aede7912937939e09adf086c8e6f7e40d4c85ad678c2c2aea3"}, - {file = "pymongo-3.11.4-cp27-cp27m-win32.whl", hash = "sha256:e4e9db78b71db2b1684ee4ecc3e32c4600f18cdf76e6b9ae03e338e52ee4b168"}, - {file = "pymongo-3.11.4-cp27-cp27m-win_amd64.whl", hash = "sha256:8e0004b0393d72d76de94b4792a006cb960c1c65c7659930fbf9a81ce4341982"}, - {file = "pymongo-3.11.4-cp27-cp27mu-manylinux1_i686.whl", hash = "sha256:fedf0dee7a412ca6d1d6d92c158fe9cbaa8ea0cae90d268f9ccc0744de7a97d0"}, - {file = "pymongo-3.11.4-cp27-cp27mu-manylinux1_x86_64.whl", hash = "sha256:f947b359cc4769af8b49be7e37af01f05fcf15b401da2528021148e4a54426d1"}, - {file = "pymongo-3.11.4-cp34-cp34m-macosx_10_6_intel.whl", hash = "sha256:3a3498a8326111221560e930f198b495ea6926937e249f475052ffc6893a6680"}, - {file = "pymongo-3.11.4-cp34-cp34m-manylinux1_i686.whl", hash = "sha256:9a4f6e0b01df820ba9ed0b4e618ca83a1c089e48d4f268d0e00dcd49893d4549"}, - {file = "pymongo-3.11.4-cp34-cp34m-manylinux1_x86_64.whl", hash = "sha256:d65bac5f6724d9ea6f0b5a0f0e4952fbbf209adcf6b5583b54c54bd2fcd74dc0"}, - {file = "pymongo-3.11.4-cp34-cp34m-win32.whl", hash = "sha256:15b083d1b789b230e5ac284442d9ecb113c93f3785a6824f748befaab803b812"}, - {file = "pymongo-3.11.4-cp34-cp34m-win_amd64.whl", hash = "sha256:f08665d3cc5abc2f770f472a9b5f720a9b3ab0b8b3bb97c7c1487515e5653d39"}, - {file = "pymongo-3.11.4-cp35-cp35m-macosx_10_6_intel.whl", hash = "sha256:977b1d4f868986b4ba5d03c317fde4d3b66e687d74473130cd598e3103db34fa"}, - {file = "pymongo-3.11.4-cp35-cp35m-manylinux1_i686.whl", hash = "sha256:510cd3bfabb63a07405b7b79fae63127e34c118b7531a2cbbafc7a24fd878594"}, - {file = "pymongo-3.11.4-cp35-cp35m-manylinux1_x86_64.whl", hash = "sha256:071552b065e809d24c5653fcc14968cfd6fde4e279408640d5ac58e3353a3c5f"}, - {file = "pymongo-3.11.4-cp35-cp35m-manylinux2014_aarch64.whl", hash = "sha256:f4ba58157e8ae33ee86fadf9062c506e535afd904f07f9be32731f4410a23b7f"}, - {file = "pymongo-3.11.4-cp35-cp35m-manylinux2014_i686.whl", hash = "sha256:b413117210fa6d92664c3d860571e8e8727c3e8f2ff197276c5d0cb365abd3ad"}, - {file = "pymongo-3.11.4-cp35-cp35m-manylinux2014_ppc64le.whl", hash = "sha256:08b8723248730599c9803ae4c97b8f3f76c55219104303c88cb962a31e3bb5ee"}, - {file = "pymongo-3.11.4-cp35-cp35m-manylinux2014_s390x.whl", hash = "sha256:8a41fdc751dc4707a4fafb111c442411816a7c225ebb5cadb57599534b5d5372"}, - {file = "pymongo-3.11.4-cp35-cp35m-manylinux2014_x86_64.whl", hash = "sha256:f664ed7613b8b18f0ce5696b146776266a038c19c5cd6efffa08ecc189b01b73"}, - {file = "pymongo-3.11.4-cp35-cp35m-win32.whl", hash = "sha256:5c36428cc4f7fae56354db7f46677fd21222fc3cb1e8829549b851172033e043"}, - {file = "pymongo-3.11.4-cp35-cp35m-win_amd64.whl", hash = "sha256:d0a70151d7de8a3194cdc906bcc1a42e14594787c64b0c1c9c975e5a2af3e251"}, - {file = "pymongo-3.11.4-cp36-cp36m-macosx_10_6_intel.whl", hash = "sha256:9b9298964389c180a063a9e8bac8a80ed42de11d04166b20249bfa0a489e0e0f"}, - {file = "pymongo-3.11.4-cp36-cp36m-manylinux1_i686.whl", hash = "sha256:b2f41261b648cf5dee425f37ff14f4ad151c2f24b827052b402637158fd056ef"}, - {file = "pymongo-3.11.4-cp36-cp36m-manylinux1_x86_64.whl", hash = "sha256:e02beaab433fd1104b2804f909e694cfbdb6578020740a9051597adc1cd4e19f"}, - {file = "pymongo-3.11.4-cp36-cp36m-manylinux2014_aarch64.whl", hash = "sha256:8898f6699f740ca93a0879ed07d8e6db02d68af889d0ebb3d13ab017e6b1af1e"}, - {file = "pymongo-3.11.4-cp36-cp36m-manylinux2014_i686.whl", hash = "sha256:62c29bc36a6d9be68fe7b5aaf1e120b4aa66a958d1e146601fcd583eb12cae7b"}, - {file = "pymongo-3.11.4-cp36-cp36m-manylinux2014_ppc64le.whl", hash = "sha256:424799c71ff435094e5fb823c40eebb4500f0e048133311e9c026467e8ccebac"}, - {file = "pymongo-3.11.4-cp36-cp36m-manylinux2014_s390x.whl", hash = "sha256:3551912f5c34d8dd7c32c6bb00ae04192af47f7b9f653608f107d19c1a21a194"}, - {file = "pymongo-3.11.4-cp36-cp36m-manylinux2014_x86_64.whl", hash = "sha256:5db59223ed1e634d842a053325f85f908359c6dac9c8ddce8ef145061fae7df8"}, - {file = "pymongo-3.11.4-cp36-cp36m-win32.whl", hash = "sha256:fea5cb1c63efe1399f0812532c7cf65458d38fd011be350bc5021dfcac39fba8"}, - {file = "pymongo-3.11.4-cp36-cp36m-win_amd64.whl", hash = "sha256:d4e62417e89b717a7bcd8576ac3108cd063225942cc91c5b37ff5465fdccd386"}, - {file = "pymongo-3.11.4-cp37-cp37m-macosx_10_6_intel.whl", hash = "sha256:4c7e8c8e1e1918dcf6a652ac4b9d87164587c26fd2ce5dd81e73a5ab3b3d492f"}, - {file = "pymongo-3.11.4-cp37-cp37m-manylinux1_i686.whl", hash = "sha256:38a7b5140a48fc91681cdb5cb95b7cd64640b43d19259fdd707fa9d5a715f2b2"}, - {file = "pymongo-3.11.4-cp37-cp37m-manylinux1_x86_64.whl", hash = "sha256:aff3656af2add93f290731a6b8930b23b35c0c09569150130a58192b3ec6fc61"}, - {file = "pymongo-3.11.4-cp37-cp37m-manylinux2014_aarch64.whl", hash = "sha256:03be7ad107d252bb7325d4af6309fdd2c025d08854d35f0e7abc8bf048f4245e"}, - {file = "pymongo-3.11.4-cp37-cp37m-manylinux2014_i686.whl", hash = "sha256:6060794aac9f7b0644b299f46a9c6cbc0bc470bd01572f4134df140afd41ded6"}, - {file = "pymongo-3.11.4-cp37-cp37m-manylinux2014_ppc64le.whl", hash = "sha256:73326b211e7410c8bd6a74500b1e3f392f39cf10862e243d00937e924f112c01"}, - {file = "pymongo-3.11.4-cp37-cp37m-manylinux2014_s390x.whl", hash = "sha256:20d75ea11527331a2980ab04762a9d960bcfea9475c54bbeab777af880de61cd"}, - {file = "pymongo-3.11.4-cp37-cp37m-manylinux2014_x86_64.whl", hash = "sha256:3135dd574ef1286189f3f04a36c8b7a256376914f8cbbce66b94f13125ded858"}, - {file = "pymongo-3.11.4-cp37-cp37m-win32.whl", hash = "sha256:7c97554ea521f898753d9773891d0347ebfaddcc1dee2ad94850b163171bf1f1"}, - {file = "pymongo-3.11.4-cp37-cp37m-win_amd64.whl", hash = "sha256:a08c8b322b671857c81f4c30cd3c8df2895fd3c0e9358714f39e0ef8fb327702"}, - {file = "pymongo-3.11.4-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:f3d851af3852f16ad4adc7ee054fd9c90a7a5063de94d815b7f6a88477b9f4c6"}, - {file = "pymongo-3.11.4-cp38-cp38-manylinux1_i686.whl", hash = "sha256:3bfc7689a1bacb9bcd2f2d5185d99507aa29f667a58dd8adaa43b5a348139e46"}, - {file = "pymongo-3.11.4-cp38-cp38-manylinux1_x86_64.whl", hash = "sha256:b8f94acd52e530a38f25e4d5bf7ddfdd4bea9193e718f58419def0d4406b58d3"}, - {file = "pymongo-3.11.4-cp38-cp38-manylinux2014_aarch64.whl", hash = "sha256:e4b631688dfbdd61b5610e20b64b99d25771c6d52d9da73349342d2a0f11c46a"}, - {file = "pymongo-3.11.4-cp38-cp38-manylinux2014_i686.whl", hash = "sha256:474e21d0e07cd09679e357d1dac76e570dab86665e79a9d3354b10a279ac6fb3"}, - {file = "pymongo-3.11.4-cp38-cp38-manylinux2014_ppc64le.whl", hash = "sha256:421d13523d11c57f57f257152bc4a6bb463aadf7a3918e9c96fefdd6be8dbfb8"}, - {file = "pymongo-3.11.4-cp38-cp38-manylinux2014_s390x.whl", hash = "sha256:0cabfc297f4cf921f15bc789a8fbfd7115eb9f813d3f47a74b609894bc66ab0d"}, - {file = "pymongo-3.11.4-cp38-cp38-manylinux2014_x86_64.whl", hash = "sha256:fe4189846448df013cd9df11bba38ddf78043f8c290a9f06430732a7a8601cce"}, - {file = "pymongo-3.11.4-cp38-cp38-win32.whl", hash = "sha256:eb4d176394c37a76e8b0afe54b12d58614a67a60a7f8c0dd3a5afbb013c01092"}, - {file = "pymongo-3.11.4-cp38-cp38-win_amd64.whl", hash = "sha256:fffff7bfb6799a763d3742c59c6ee7ffadda21abed557637bc44ed1080876484"}, - {file = "pymongo-3.11.4-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:13acf6164ead81c9fc2afa0e1ea6d6134352973ce2bb35496834fee057063c04"}, - {file = "pymongo-3.11.4-cp39-cp39-manylinux1_i686.whl", hash = "sha256:d360e5d5dd3d55bf5d1776964625018d85b937d1032bae1926dd52253decd0db"}, - {file = "pymongo-3.11.4-cp39-cp39-manylinux1_x86_64.whl", hash = "sha256:0aaf4d44f1f819360f9432df538d54bbf850f18152f34e20337c01b828479171"}, - {file = "pymongo-3.11.4-cp39-cp39-manylinux2014_aarch64.whl", hash = "sha256:08bda7b2c522ff9f1e554570da16298271ebb0c56ab9699446aacba249008988"}, - {file = "pymongo-3.11.4-cp39-cp39-manylinux2014_i686.whl", hash = "sha256:1a994a42f49dab5b6287e499be7d3d2751776486229980d8857ad53b8333d469"}, - {file = "pymongo-3.11.4-cp39-cp39-manylinux2014_ppc64le.whl", hash = "sha256:161fcd3281c42f644aa8dec7753cca2af03ce654e17d76da4f0dab34a12480ca"}, - {file = "pymongo-3.11.4-cp39-cp39-manylinux2014_s390x.whl", hash = "sha256:78f07961f4f214ea8e80be63cffd5cc158eb06cd922ffbf6c7155b11728f28f9"}, - {file = "pymongo-3.11.4-cp39-cp39-manylinux2014_x86_64.whl", hash = "sha256:ad31f184dcd3271de26ab1f9c51574afb99e1b0e484ab1da3641256b723e4994"}, - {file = "pymongo-3.11.4-cp39-cp39-win32.whl", hash = "sha256:5e606846c049ed40940524057bfdf1105af6066688c0e6a1a3ce2038589bae70"}, - {file = "pymongo-3.11.4-cp39-cp39-win_amd64.whl", hash = "sha256:3491c7de09e44eded16824cb58cf9b5cc1dc6f066a0bb7aa69929d02aa53b828"}, - {file = "pymongo-3.11.4-py2.7-macosx-10.14-intel.egg", hash = "sha256:506a6dab4c7ffdcacdf0b8e70bd20eb2e77fa994519547c9d88d676400fcad58"}, - {file = "pymongo-3.11.4.tar.gz", hash = "sha256:539d4cb1b16b57026999c53e5aab857fe706e70ae5310cc8c232479923f932e6"}, + {file = "pymongo-3.12.1-cp27-cp27m-macosx_10_14_intel.whl", hash = "sha256:c4653830375ab019b86d218c749ad38908b74182b2863d09936aa8d7f990d30e"}, + {file = "pymongo-3.12.1-cp27-cp27m-manylinux1_i686.whl", hash = "sha256:2462a68f6675da548e333fa299d8e9807e00f95a4d198cfe9194d7be69f40c9b"}, + {file = "pymongo-3.12.1-cp27-cp27m-manylinux1_x86_64.whl", hash = "sha256:4168b6c425d783e81723fc3dc382d374a228ff29530436a472a36d9f27593e73"}, + {file = "pymongo-3.12.1-cp27-cp27m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:36806ee53a85c3ba73939652f2ced2961e6a77cfbae385cd83f2e24cd97964b7"}, + {file = "pymongo-3.12.1-cp27-cp27m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:bf2d9d62178bb5c05e77d40becf89c309b1966fbcfb5c306238f81bf1ec2d6a2"}, + {file = "pymongo-3.12.1-cp27-cp27m-win32.whl", hash = "sha256:75c7ef67b4b8ec070e7a4740764f6c03ec9246b59d95e2ae45c029d41cb9efa1"}, + {file = "pymongo-3.12.1-cp27-cp27m-win_amd64.whl", hash = "sha256:49b0d92724d3fce1174fd30b0b428595072d5c6b14d6203e46a9ea347ae7b439"}, + {file = "pymongo-3.12.1-cp27-cp27mu-manylinux1_i686.whl", hash = "sha256:cef2675004d85d85a4ccc24730b73a99931547368d18ceeed1259a2d9fcddbc1"}, + {file = "pymongo-3.12.1-cp27-cp27mu-manylinux1_x86_64.whl", hash = "sha256:5e3833c001a04aa06a28c6fd9628256862a654c09b0f81c07734b5629bc014ab"}, + {file = "pymongo-3.12.1-cp27-cp27mu-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:6a96c04ce39d66df60d9ce89f4c254c4967bc7d9e2e2c52adc58f47be826ee96"}, + {file = "pymongo-3.12.1-cp27-cp27mu-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:c2a17752f97a942bdb4ff4a0516a67c5ade1658ebe1ab2edacdec0b42e39fa75"}, + {file = "pymongo-3.12.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:02e0c088f189ca69fac094cb5f851b43bbbd7cec42114495777d4d8f297f7f8a"}, + {file = "pymongo-3.12.1-cp310-cp310-manylinux1_i686.whl", hash = "sha256:45d6b47d70ed44e3c40bef618ed61866c48176e7e5dff80d06d8b1a6192e8584"}, + {file = "pymongo-3.12.1-cp310-cp310-manylinux2014_aarch64.whl", hash = "sha256:891f541c7ed29b95799da0cd249ae1db1842777b564e8205a197b038c5df6135"}, + {file = "pymongo-3.12.1-cp310-cp310-manylinux2014_i686.whl", hash = "sha256:dc4749c230a71b34db50ac2481d9008bb17b67c92671c443c3b40e192fbea78e"}, + {file = "pymongo-3.12.1-cp310-cp310-manylinux2014_ppc64le.whl", hash = "sha256:aa434534cc91f51a85e3099dc257ee8034b3d2be77f2ca58fb335a686e3a681f"}, + {file = "pymongo-3.12.1-cp310-cp310-manylinux2014_s390x.whl", hash = "sha256:180b405e17b90a877ea5dbc5efe7f4c171af4c89323148e100c0f12cedb86f12"}, + {file = "pymongo-3.12.1-cp310-cp310-manylinux2014_x86_64.whl", hash = "sha256:a472ca3d43d33e596ff5836c6cc71c3e61be33f44fe1cfdab4a1100f4af60333"}, + {file = "pymongo-3.12.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fe16517b275031d61261a4e3941c411fb7c46a9cd012f02381b56e7907cc9e06"}, + {file = "pymongo-3.12.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c0947d7be30335cb4c3d5d0983d8ebc8294ae52503cf1d596c926f7e7183900b"}, + {file = "pymongo-3.12.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:adb37bf22d25a51b84d989a2a5c770d4514ac590201eea1cb50ce8c9c5257f1d"}, + {file = "pymongo-3.12.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5f5fe59328838fa28958cc06ecf94be585726b97d637012f168bc3c7abe4fd81"}, + {file = "pymongo-3.12.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:87114b995506e7584cf3daf891e419b5f6e7e383e7df6267494da3a76312aa22"}, + {file = "pymongo-3.12.1-cp310-cp310-win32.whl", hash = "sha256:4f4bc64fe9cbd70d46f519f1e88c9e4677f7af18ab9cd4942abce2bcfa7549c3"}, + {file = "pymongo-3.12.1-cp310-cp310-win_amd64.whl", hash = "sha256:8f87f53c9cd89010ae45490ec2c963ff18b31f5f290dc08b04151709589fe8d9"}, + {file = "pymongo-3.12.1-cp34-cp34m-macosx_10_6_intel.whl", hash = "sha256:37a63da5ee623acdf98e6d511171c8a5827a6106b0712c18af4441ef4f11e6be"}, + {file = "pymongo-3.12.1-cp34-cp34m-manylinux1_i686.whl", hash = "sha256:b1b06038c9940a49c73db0aeb0f6809b308e198da1326171768cf68d843af521"}, + {file = "pymongo-3.12.1-cp34-cp34m-manylinux1_x86_64.whl", hash = "sha256:ab27d6d7d41a66d9e54269a290d27cd5c74f08e9add0054a754b4821026c4f42"}, + {file = "pymongo-3.12.1-cp34-cp34m-win32.whl", hash = "sha256:63be03f7ae1e15e72a234637ec7941ef229c7ab252c9ff6af48bba1e5418961c"}, + {file = "pymongo-3.12.1-cp34-cp34m-win_amd64.whl", hash = "sha256:56feb80ea1f5334ccab9bd16a5161571ab70392e51fcc752fb8a1dc67125f663"}, + {file = "pymongo-3.12.1-cp35-cp35m-macosx_10_6_intel.whl", hash = "sha256:a81e52dbf95f236a0c89a5abcd2b6e1331da0c0312f471c73fae76c79d2acf6b"}, + {file = "pymongo-3.12.1-cp35-cp35m-manylinux1_i686.whl", hash = "sha256:712de1876608fd5d76abc3fc8ec55077278dd5044073fbe9492631c9a2c58351"}, + {file = "pymongo-3.12.1-cp35-cp35m-manylinux1_x86_64.whl", hash = "sha256:47ed77f62c8417a86f9ad158b803f3459a636386cb9d3d4e9e7d6a82d051f907"}, + {file = "pymongo-3.12.1-cp35-cp35m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:1fa6f08ddb6975371777f97592d35c771e713ee2250e55618148a5e57e260aff"}, + {file = "pymongo-3.12.1-cp35-cp35m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:3a2fcbd04273a509fa85285d9eccf17ab65ce440bd4f5e5a58c978e563cd9e9a"}, + {file = "pymongo-3.12.1-cp35-cp35m-win32.whl", hash = "sha256:d1b98539b0de822b6f717498e59ae3e5ae2e7f564370ab513e6d0c060753e447"}, + {file = "pymongo-3.12.1-cp35-cp35m-win_amd64.whl", hash = "sha256:c660fd1e4a4b52f79f7d134a3d31d452948477b7f46ff5061074a534c5805ba6"}, + {file = "pymongo-3.12.1-cp36-cp36m-macosx_10_6_intel.whl", hash = "sha256:460bdaa3f65ddb5b7474ae08589a1763b5da1a78b8348351b9ba1c63b459d67d"}, + {file = "pymongo-3.12.1-cp36-cp36m-manylinux1_i686.whl", hash = "sha256:1d55982e5335925c55e2b87467043866ce72bd30ea7e7e3eeed6ec3d95a806d4"}, + {file = "pymongo-3.12.1-cp36-cp36m-manylinux1_x86_64.whl", hash = "sha256:67e0b2ad3692f6d0335ae231a40de55ec395b6c2e971ad6f55b162244d1ec542"}, + {file = "pymongo-3.12.1-cp36-cp36m-manylinux2014_aarch64.whl", hash = "sha256:515e4708d6567901ffc06476a38abe2c9093733f52638235d9f149579c1d3de0"}, + {file = "pymongo-3.12.1-cp36-cp36m-manylinux2014_i686.whl", hash = "sha256:ed20ec5a01c43254f6047c5d8124b70d28e39f128c8ad960b437644fe94e1827"}, + {file = "pymongo-3.12.1-cp36-cp36m-manylinux2014_ppc64le.whl", hash = "sha256:e2bccadbe313b11704160aaba5eec95d2da1aa663f02f41d2d1520d02bbbdcd5"}, + {file = "pymongo-3.12.1-cp36-cp36m-manylinux2014_s390x.whl", hash = "sha256:ef8b927813c27c3bdfc82c55682d7767403bcdadfd9f9c0fc49f4be4553a877b"}, + {file = "pymongo-3.12.1-cp36-cp36m-manylinux2014_x86_64.whl", hash = "sha256:2d3abe548a280b49269c7907d5b71199882510c484d680a5ea7860f30c4a695f"}, + {file = "pymongo-3.12.1-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e30cce3cc86d6082c8596b3fbee0d4f54bc4d337a4fa1bf536920e2e319e24f0"}, + {file = "pymongo-3.12.1-cp36-cp36m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:fe3ae4294d593da54862f0140fdcc89d1aeeb94258ca97f094119ed7f0e5882d"}, + {file = "pymongo-3.12.1-cp36-cp36m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9641be893ccce7d192a0094efd0a0d9f1783a1ebf314b4128f8a27bfadb8a77c"}, + {file = "pymongo-3.12.1-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:13a7c6d055af58a1e9c505e736da8b6a2e95ccc8cec10b008143f7a536e5de8a"}, + {file = "pymongo-3.12.1-cp36-cp36m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:25fd76deabe9ea37c8360c362b32f702cc095a208dd1c5328189938ca7685847"}, + {file = "pymongo-3.12.1-cp36-cp36m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:e841695b5dbea38909ab2dbf17e91e9a823412d8d88d1ef77f1b94a7bc551c0f"}, + {file = "pymongo-3.12.1-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:6ead0126fb4424c6c6a4fdc603d699a9db7c03cdb8eac374c352a75fec8a820a"}, + {file = "pymongo-3.12.1-cp36-cp36m-win32.whl", hash = "sha256:a5dbeeea6a375fbd79448b48a54c46fc9351611a03ef8398d2a40b684ce46194"}, + {file = "pymongo-3.12.1-cp36-cp36m-win_amd64.whl", hash = "sha256:87db421c9eb915b8d9a9a13c5b2ee338350e36ee83e26ff0adfc48abc5db3ac3"}, + {file = "pymongo-3.12.1-cp37-cp37m-macosx_10_6_intel.whl", hash = "sha256:8851544168703fb519e95556e3b463fca4beeef7ed3f731d81a68c8268515d9d"}, + {file = "pymongo-3.12.1-cp37-cp37m-manylinux1_i686.whl", hash = "sha256:7d8cdd2f070c71366e64990653522cce84b08dc26ab0d1fa19aa8d14ee0cf9ba"}, + {file = "pymongo-3.12.1-cp37-cp37m-manylinux1_x86_64.whl", hash = "sha256:51437c77030bed72d57d8a61e22758e3c389b13fea7787c808030002bb05ca39"}, + {file = "pymongo-3.12.1-cp37-cp37m-manylinux2014_aarch64.whl", hash = "sha256:f43cacda46fc188f998e6d308afe1c61ff41dcb300949f4cbf731e9a0a5eb2d3"}, + {file = "pymongo-3.12.1-cp37-cp37m-manylinux2014_i686.whl", hash = "sha256:1a7b138a04fdd17849930dc8bf664002e17db38448850bfb96d200c9c5a8b3a1"}, + {file = "pymongo-3.12.1-cp37-cp37m-manylinux2014_ppc64le.whl", hash = "sha256:444c00ebc20f2f9dc62e34f7dc9453dc2f5f5a72419c8dccad6e26d546c35712"}, + {file = "pymongo-3.12.1-cp37-cp37m-manylinux2014_s390x.whl", hash = "sha256:81ce5f871f5d8e82615c8bd0b34b68a9650204c8b1a04ce7890d58c98eb66e39"}, + {file = "pymongo-3.12.1-cp37-cp37m-manylinux2014_x86_64.whl", hash = "sha256:837cdef094f39c6f4a2967abc646a412999c2540fbf5d3cce1dd3b671f4b876c"}, + {file = "pymongo-3.12.1-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2174d3279b8e2b6d7613b338f684cd78ff7adf1e7ec5b7b7bde5609a129c9898"}, + {file = "pymongo-3.12.1-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:303531649fa45f96b694054c1aa02f79bda32ef57affe42c5c339336717eed74"}, + {file = "pymongo-3.12.1-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:1821ce4e5a293313947fd017bbd2d2535aa6309680fa29b33d0442d15da296ec"}, + {file = "pymongo-3.12.1-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:15dae01341571d0af51526b7a21648ca575e9375e16ba045c9860848dfa8952f"}, + {file = "pymongo-3.12.1-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:fcc021530b7c71069132fe4846d95a3cdd74d143adc2f7e398d5fabf610f111c"}, + {file = "pymongo-3.12.1-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:f44bea60fd2178d7153deef9621c4b526a93939da30010bba24d3408a98b0f79"}, + {file = "pymongo-3.12.1-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:6f0f0a10f128ea0898e607d351ebfabf70941494fc94e87f12c76e2894d8e6c4"}, + {file = "pymongo-3.12.1-cp37-cp37m-win32.whl", hash = "sha256:afb16330ab6efbbf995375ad94e970fa2f89bb46bd10d854b7047620fdb0d67d"}, + {file = "pymongo-3.12.1-cp37-cp37m-win_amd64.whl", hash = "sha256:dcf906c1f7a33e4222e4bff18da1554d69323bc4dd95fe867a6fa80709ee5f93"}, + {file = "pymongo-3.12.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:9b62d84478f471fdb0dcea3876acff38f146bd23cbdbed15074fb4622064ec2e"}, + {file = "pymongo-3.12.1-cp38-cp38-manylinux1_i686.whl", hash = "sha256:48722e91981bb22a16b0431ea01da3e1cc5b96805634d3b8d3c2a5315c1ce7f1"}, + {file = "pymongo-3.12.1-cp38-cp38-manylinux1_x86_64.whl", hash = "sha256:d6c6989c10008ac70c2bb2ad2b940fcfe883712746c89f7e3308c14c213a70d7"}, + {file = "pymongo-3.12.1-cp38-cp38-manylinux2014_aarch64.whl", hash = "sha256:573e2387d0686976642142c50740dfc4d3494cc627e2a7d22782b99f70879055"}, + {file = "pymongo-3.12.1-cp38-cp38-manylinux2014_i686.whl", hash = "sha256:7117bfd8827cfe550f65a3c399dcd6e02226197a91c6d11a3540c3e8efc686d6"}, + {file = "pymongo-3.12.1-cp38-cp38-manylinux2014_ppc64le.whl", hash = "sha256:6eb6789f26c398c383225e1313c8e75a7d290d323b8eaf65f3f3ddd0eb8a5a3c"}, + {file = "pymongo-3.12.1-cp38-cp38-manylinux2014_s390x.whl", hash = "sha256:138248c542051eb462f88b50b0267bd5286d6661064bab06faa0ef6ac30cdb4b"}, + {file = "pymongo-3.12.1-cp38-cp38-manylinux2014_x86_64.whl", hash = "sha256:7abc87e45b572eb6d17a50422e69a9e5d6f13e691e821fe2312df512500faa50"}, + {file = "pymongo-3.12.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a7430f3987d232e782304c109be1d0e6fff46ca6405cb2479e4d8d08cd29541e"}, + {file = "pymongo-3.12.1-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:cb48ff6cc6109190e1ccf8ea1fc71cc244c9185813ce7d1c415dce991cfb8709"}, + {file = "pymongo-3.12.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:68409171ab2aa7ccd6e8e839233e4b8ddeec246383c9a3698614e814739356f9"}, + {file = "pymongo-3.12.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:13d74bf3435c1e58d8fafccc0d5e87f246ae2c6e9cbef4b35e32a1c3759e354f"}, + {file = "pymongo-3.12.1-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:849e641cfed05c75d772f9e9018f42c5fbd00655d43d52da1b9c56346fd3e4cc"}, + {file = "pymongo-3.12.1-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:5183b698d6542219e4135de583b57bc6286bd37df7f645b688278eb919bfa785"}, + {file = "pymongo-3.12.1-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:65f159c445761cab04b665fc448b3fc008aebc98e54fdcbfd1aff195ef1b1408"}, + {file = "pymongo-3.12.1-cp38-cp38-win32.whl", hash = "sha256:3b40e36d3036bfe69ba63ec8e746a390721f75467085a0384b528e1dda532c69"}, + {file = "pymongo-3.12.1-cp38-cp38-win_amd64.whl", hash = "sha256:58a67b3800476232f9989e533d0244060309451b436d46670a53e6d189f1a7e7"}, + {file = "pymongo-3.12.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:db3efec9dcecd96555d752215797816da40315d61878f90ca39c8e269791bf17"}, + {file = "pymongo-3.12.1-cp39-cp39-manylinux1_i686.whl", hash = "sha256:bfd073fea04061019a103a288847846b5ef40dfa2f73b940ed61e399ca95314f"}, + {file = "pymongo-3.12.1-cp39-cp39-manylinux1_x86_64.whl", hash = "sha256:5067c04d3b19c820faac6342854d887ade58e8d38c3db79b68c2a102bbb100e7"}, + {file = "pymongo-3.12.1-cp39-cp39-manylinux2014_aarch64.whl", hash = "sha256:1c4e51a3b69789b6f468a8e881a13f2d1e8f5e99e41f80fd44845e6ec0f701e1"}, + {file = "pymongo-3.12.1-cp39-cp39-manylinux2014_i686.whl", hash = "sha256:2fa101bb23619120673899694a65b094364269e597f551a87c4bdae3a474d726"}, + {file = "pymongo-3.12.1-cp39-cp39-manylinux2014_ppc64le.whl", hash = "sha256:eb65ec0255a0fccc47c87d44e505ef5180bfd71690bd5f84161b1f23949fb209"}, + {file = "pymongo-3.12.1-cp39-cp39-manylinux2014_s390x.whl", hash = "sha256:ed751a20840a31242e7bea566fcf93ba75bc11b33afe2777bbf46069c1af5094"}, + {file = "pymongo-3.12.1-cp39-cp39-manylinux2014_x86_64.whl", hash = "sha256:17238115e6d37f5423b046cb829f1ca02c4ea7edb163f5b8b88e0c975dc3fec9"}, + {file = "pymongo-3.12.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2fda3b3fb5c0d159195ab834b322a23808f1b059bcc7e475765abeddee6a2529"}, + {file = "pymongo-3.12.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:6235bf2157aa46e53568ed79b70603aa8874baa202d5d1de82fa0eb917696e73"}, + {file = "pymongo-3.12.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e5d6428b8b422ba5205140e8be11722fa7292a0bedaa8bc80fb34c92eb19ba45"}, + {file = "pymongo-3.12.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b1e6d1cf4bd6552b5f519432cce1530c09e6b0aab98d44803b991f7e880bd332"}, + {file = "pymongo-3.12.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:287c2a0063267c1458c4ddf528b44063ce7f376a6436eea5bccd7f625bbc3b5e"}, + {file = "pymongo-3.12.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:4a2d73a9281faefb273a5448f6d25f44ebd311ada9eb79b6801ae890508fe231"}, + {file = "pymongo-3.12.1-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:6f07888e3b73c0dfa46f12d098760494f5f23fd66923a6615edfe486e6a7649c"}, + {file = "pymongo-3.12.1-cp39-cp39-win32.whl", hash = "sha256:77dddf596fb065de29fb39992fbc81301f7fd0003be649b7fa7448c77ca53bed"}, + {file = "pymongo-3.12.1-cp39-cp39-win_amd64.whl", hash = "sha256:979e34db4f3dc5710c18db437aaf282f691092b352e708cb2afd4df287698c76"}, + {file = "pymongo-3.12.1-py2.7-macosx-10.14-intel.egg", hash = "sha256:c04e84ccf590933a266180286d8b6a5fc844078a5d934432628301bd8b5f9ca7"}, + {file = "pymongo-3.12.1.tar.gz", hash = "sha256:704879b6a54c45ad76cea7c6789c1ae7185050acea7afd15b58318fa1932ed45"}, ] pynacl = [ {file = "PyNaCl-1.4.0-cp27-cp27m-macosx_10_10_x86_64.whl", hash = "sha256:ea6841bc3a76fa4942ce00f3bda7d436fda21e2d91602b9e21b7ca9ecab8f3ff"}, @@ -2306,24 +2382,30 @@ pynacl = [ {file = "PyNaCl-1.4.0.tar.gz", hash = "sha256:54e9a2c849c742006516ad56a88f5c74bf2ce92c9f67435187c3c5953b346505"}, ] pynput = [ - {file = "pynput-1.7.3-py2.py3-none-any.whl", hash = "sha256:fea5777454f896bd79d35393088cd29a089f3b2da166f0848a922b1d5a807d4f"}, - {file = "pynput-1.7.3-py3.8.egg", hash = "sha256:6626e8ea9ca482bb5628a7169e1193824e382c4ad3053e40f4f24f41ee7b41c9"}, - {file = "pynput-1.7.3.tar.gz", hash = "sha256:4e50b1a0ab86847e87e58f6d1993688b9a44f9f4c88d4712315ea8eb552ef828"}, + {file = "pynput-1.7.4-py2.py3-none-any.whl", hash = "sha256:f78502cb2abd101721d867451bf315a4e1334666372f8682651393f16e1d2d9b"}, + {file = "pynput-1.7.4-py3.9.egg", hash = "sha256:225926bf5e98d36738911112c72e19e0cba830aafee3882ef8661c8d9cfb3b63"}, + {file = "pynput-1.7.4.tar.gz", hash = "sha256:16fecc4d1e53a28fb7c669c79e189c3f2cde14a08d6b457c3da07075c82f3b4c"}, ] pyobjc-core = [ {file = "pyobjc-core-7.3.tar.gz", hash = "sha256:5081aedf8bb40aac1a8ad95adac9e44e148a882686ded614adf46bb67fd67574"}, - {file = "pyobjc_core-7.3-1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:a1f1e6b457127cbf2b5bd2b94520a7c89fb590b739911eadb2b0499a3a5b0e6f"}, - {file = "pyobjc_core-7.3-1-cp38-cp38-macosx_11_0_universal2.whl", hash = "sha256:ed708cc47bae8b711f81f252af09898a5f986c7a38cec5ad5623d571d328bff8"}, {file = "pyobjc_core-7.3-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:4e93ad769a20b908778fe950f62a843a6d8f0fa71996e5f3cc9fab5ae7d17771"}, {file = "pyobjc_core-7.3-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:9f63fd37bbf3785af4ddb2f86cad5ca81c62cfc7d1c0099637ca18343c3656c1"}, {file = "pyobjc_core-7.3-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:e9b1311f72f2e170742a7ee3a8149f52c35158dc024a21e88d6f1e52ba5d718b"}, {file = "pyobjc_core-7.3-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:8d5e12a0729dfd1d998a861998b422d0a3e41923d75ea229bacf31372c831d7b"}, {file = "pyobjc_core-7.3-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:efdee8c4884405e0c0186c57f87d7bfaa0abc1f50b18e865db3caea3a1f329b9"}, ] +pyobjc-framework-applicationservices = [ + {file = "pyobjc-framework-ApplicationServices-7.3.tar.gz", hash = "sha256:1925ac30a817e557d1c08450005103bbf76ebd3ff473631fe9875070377b0b4d"}, + {file = "pyobjc_framework_ApplicationServices-7.3-1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:484e5b5e9f1757ad7e28799bb5d5d59ce861a3e5449f06fc3a0d05b998e9e6bb"}, + {file = "pyobjc_framework_ApplicationServices-7.3-1-cp38-cp38-macosx_11_0_universal2.whl", hash = "sha256:ec0c07775ff7034751306fa382117d12ae8e383b696cda1b2815dfd334c36ff7"}, + {file = "pyobjc_framework_ApplicationServices-7.3-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:daa4a9c51a927630fdd3d3f627e03ebc370aee3c397305db85a0a8ba4c28ae93"}, + {file = "pyobjc_framework_ApplicationServices-7.3-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:167aa21ee47b0ee6e4e399915371d183ae84880dc3813c27519e759acb9d20c9"}, + {file = "pyobjc_framework_ApplicationServices-7.3-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:7a98f0f1e21465868f9dd32588ae71e5e6a4cb5c434d4158c9e12273fd7b8f27"}, + {file = "pyobjc_framework_ApplicationServices-7.3-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:2d55796610e6293e83cc40183347e7f75a7c0682775cc19e5986945efa9cac1b"}, + {file = "pyobjc_framework_ApplicationServices-7.3-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:afd1ef147447fe7b06a271458eabb37ece6436705abf86265d7fb57310eca45f"}, +] pyobjc-framework-cocoa = [ {file = "pyobjc-framework-Cocoa-7.3.tar.gz", hash = "sha256:b18d05e7a795a3455ad191c3e43d6bfa673c2a4fd480bb1ccf57191051b80b7e"}, - {file = "pyobjc_framework_Cocoa-7.3-1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:1e31376806e5de883a1d7c7c87d9ff2a8b09fc05d267e0dfce6e42409fb70c67"}, - {file = "pyobjc_framework_Cocoa-7.3-1-cp38-cp38-macosx_11_0_universal2.whl", hash = "sha256:d999387927284346035cb63ebb51f86331abc41f9376f9a6970e7f18207db392"}, {file = "pyobjc_framework_Cocoa-7.3-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:9edffdfa6dd1f71f21b531c3e61fdd3e4d5d3bf6c5a528c98e88828cd60bac11"}, {file = "pyobjc_framework_Cocoa-7.3-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:35a6340437a4e0109a302150b7d1f6baf57004ccf74834f9e6062fcafe2fd8d7"}, {file = "pyobjc_framework_Cocoa-7.3-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:7c3886f2608ab3ed02482f8b2ebf9f782b324c559e84b52cfd92dba8a1109872"}, @@ -2332,8 +2414,6 @@ pyobjc-framework-cocoa = [ ] pyobjc-framework-quartz = [ {file = "pyobjc-framework-Quartz-7.3.tar.gz", hash = "sha256:98812844c34262def980bdf60923a875cd43428a8375b6fd53bd2cd800eccf0b"}, - {file = "pyobjc_framework_Quartz-7.3-1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:1139bc6874c0f8b58f0b8602015e0994198bc506a6bcec1071208de32b55ed26"}, - {file = "pyobjc_framework_Quartz-7.3-1-cp38-cp38-macosx_11_0_universal2.whl", hash = "sha256:d94a3ed7051266c52392ec07d3b5adbf28d4be83341a24df0d88639344dcd84f"}, {file = "pyobjc_framework_Quartz-7.3-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:1ef18f5a16511ded65980bf4f5983ea5d35c88224dbad1b3112abd29c60413ea"}, {file = "pyobjc_framework_Quartz-7.3-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:3b41eec8d4b10c7c7e011e2f9051367f5499ef315ba52dfbae573c3a2e05469c"}, {file = "pyobjc_framework_Quartz-7.3-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:2c65456ed045dfe1711d0298734e5a3ad670f8c770f7eb3b19979256c388bdd2"}, @@ -2345,37 +2425,57 @@ pyparsing = [ {file = "pyparsing-2.4.7.tar.gz", hash = "sha256:c203ec8783bf771a155b207279b9bccb8dea02d8f0c9e5f8ead507bc3246ecc1"}, ] pyrsistent = [ - {file = "pyrsistent-0.17.3.tar.gz", hash = "sha256:2e636185d9eb976a18a8a8e96efce62f2905fea90041958d8cc2a189756ebf3e"}, + {file = "pyrsistent-0.18.0-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:f4c8cabb46ff8e5d61f56a037974228e978f26bfefce4f61a4b1ac0ba7a2ab72"}, + {file = "pyrsistent-0.18.0-cp36-cp36m-manylinux1_i686.whl", hash = "sha256:da6e5e818d18459fa46fac0a4a4e543507fe1110e808101277c5a2b5bab0cd2d"}, + {file = "pyrsistent-0.18.0-cp36-cp36m-manylinux1_x86_64.whl", hash = "sha256:5e4395bbf841693eaebaa5bb5c8f5cdbb1d139e07c975c682ec4e4f8126e03d2"}, + {file = "pyrsistent-0.18.0-cp36-cp36m-win32.whl", hash = "sha256:527be2bfa8dc80f6f8ddd65242ba476a6c4fb4e3aedbf281dfbac1b1ed4165b1"}, + {file = "pyrsistent-0.18.0-cp36-cp36m-win_amd64.whl", hash = "sha256:2aaf19dc8ce517a8653746d98e962ef480ff34b6bc563fc067be6401ffb457c7"}, + {file = "pyrsistent-0.18.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:58a70d93fb79dc585b21f9d72487b929a6fe58da0754fa4cb9f279bb92369396"}, + {file = "pyrsistent-0.18.0-cp37-cp37m-manylinux1_i686.whl", hash = "sha256:4916c10896721e472ee12c95cdc2891ce5890898d2f9907b1b4ae0f53588b710"}, + {file = "pyrsistent-0.18.0-cp37-cp37m-manylinux1_x86_64.whl", hash = "sha256:73ff61b1411e3fb0ba144b8f08d6749749775fe89688093e1efef9839d2dcc35"}, + {file = "pyrsistent-0.18.0-cp37-cp37m-win32.whl", hash = "sha256:b29b869cf58412ca5738d23691e96d8aff535e17390128a1a52717c9a109da4f"}, + {file = "pyrsistent-0.18.0-cp37-cp37m-win_amd64.whl", hash = "sha256:097b96f129dd36a8c9e33594e7ebb151b1515eb52cceb08474c10a5479e799f2"}, + {file = "pyrsistent-0.18.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:772e94c2c6864f2cd2ffbe58bb3bdefbe2a32afa0acb1a77e472aac831f83427"}, + {file = "pyrsistent-0.18.0-cp38-cp38-manylinux1_i686.whl", hash = "sha256:c1a9ff320fa699337e05edcaae79ef8c2880b52720bc031b219e5b5008ebbdef"}, + {file = "pyrsistent-0.18.0-cp38-cp38-manylinux1_x86_64.whl", hash = "sha256:cd3caef37a415fd0dae6148a1b6957a8c5f275a62cca02e18474608cb263640c"}, + {file = "pyrsistent-0.18.0-cp38-cp38-win32.whl", hash = "sha256:e79d94ca58fcafef6395f6352383fa1a76922268fa02caa2272fff501c2fdc78"}, + {file = "pyrsistent-0.18.0-cp38-cp38-win_amd64.whl", hash = "sha256:a0c772d791c38bbc77be659af29bb14c38ced151433592e326361610250c605b"}, + {file = "pyrsistent-0.18.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:d5ec194c9c573aafaceebf05fc400656722793dac57f254cd4741f3c27ae57b4"}, + {file = "pyrsistent-0.18.0-cp39-cp39-manylinux1_i686.whl", hash = "sha256:6b5eed00e597b5b5773b4ca30bd48a5774ef1e96f2a45d105db5b4ebb4bca680"}, + {file = "pyrsistent-0.18.0-cp39-cp39-manylinux1_x86_64.whl", hash = "sha256:48578680353f41dca1ca3dc48629fb77dfc745128b56fc01096b2530c13fd426"}, + {file = "pyrsistent-0.18.0-cp39-cp39-win32.whl", hash = "sha256:f3ef98d7b76da5eb19c37fda834d50262ff9167c65658d1d8f974d2e4d90676b"}, + {file = "pyrsistent-0.18.0-cp39-cp39-win_amd64.whl", hash = "sha256:404e1f1d254d314d55adb8d87f4f465c8693d6f902f67eb6ef5b4526dc58e6ea"}, + {file = "pyrsistent-0.18.0.tar.gz", hash = "sha256:773c781216f8c2900b42a7b638d5b517bb134ae1acbebe4d1e8f1f41ea60eb4b"}, ] pysftp = [ {file = "pysftp-0.2.9.tar.gz", hash = "sha256:fbf55a802e74d663673400acd92d5373c1c7ee94d765b428d9f977567ac4854a"}, ] pytest = [ - {file = "pytest-6.2.4-py3-none-any.whl", hash = "sha256:91ef2131a9bd6be8f76f1f08eac5c5317221d6ad1e143ae03894b862e8976890"}, - {file = "pytest-6.2.4.tar.gz", hash = "sha256:50bcad0a0b9c5a72c8e4e7c9855a3ad496ca6a881a3641b4260605450772c54b"}, + {file = "pytest-6.2.5-py3-none-any.whl", hash = "sha256:7310f8d27bc79ced999e760ca304d69f6ba6c6649c0b60fb0e04a4a77cacc134"}, + {file = "pytest-6.2.5.tar.gz", hash = "sha256:131b36680866a76e6781d13f101efb86cf674ebb9762eb70d3082b6f29889e89"}, ] pytest-cov = [ - {file = "pytest-cov-2.12.1.tar.gz", hash = "sha256:261ceeb8c227b726249b376b8526b600f38667ee314f910353fa318caa01f4d7"}, - {file = "pytest_cov-2.12.1-py2.py3-none-any.whl", hash = "sha256:261bb9e47e65bd099c89c3edf92972865210c36813f80ede5277dceb77a4a62a"}, + {file = "pytest-cov-3.0.0.tar.gz", hash = "sha256:e7f0f5b1617d2210a2cabc266dfe2f4c75a8d32fb89eafb7ad9d06f6d076d470"}, + {file = "pytest_cov-3.0.0-py3-none-any.whl", hash = "sha256:578d5d15ac4a25e5f961c938b85a05b09fdaae9deef3bb6de9a6e766622ca7a6"}, ] pytest-print = [ - {file = "pytest_print-0.2.1-py2.py3-none-any.whl", hash = "sha256:2cfcdeee8b398457d3e3488f1fde5f8303b404c30187be5fcb4c7818df5f4529"}, - {file = "pytest_print-0.2.1.tar.gz", hash = "sha256:8f61e5bb2d031ee88d19a5a7695a0c863caee7b1478f1a82d080c2128b76ad83"}, + {file = "pytest_print-0.3.0-py2.py3-none-any.whl", hash = "sha256:53fb0f71d371f137ac2e7171d92f204eb45055580e8c7920df619d9b2ee45359"}, + {file = "pytest_print-0.3.0.tar.gz", hash = "sha256:769f1b1b0943b2941dbeeaac6985766e76b341130ed538f88c23ebcd7087b90d"}, ] python-dateutil = [ - {file = "python-dateutil-2.8.1.tar.gz", hash = "sha256:73ebfe9dbf22e832286dafa60473e4cd239f8592f699aa5adaf10050e6e1823c"}, - {file = "python_dateutil-2.8.1-py2.py3-none-any.whl", hash = "sha256:75bb3f31ea686f1197762692a9ee6a7550b59fc6ca3a1f4b5d7e32fb98e2da2a"}, + {file = "python-dateutil-2.8.2.tar.gz", hash = "sha256:0123cacc1627ae19ddf3c27a5de5bd67ee4586fbdd6440d9748f8abb483d3e86"}, + {file = "python_dateutil-2.8.2-py2.py3-none-any.whl", hash = "sha256:961d03dc3453ebbc59dbdea9e4e11c5651520a876d0f4db161e8674aae935da9"}, ] python-xlib = [ - {file = "python-xlib-0.30.tar.gz", hash = "sha256:74131418faf9e7b83178c71d9d80297fbbd678abe99ae9258f5a20cd027acb5f"}, - {file = "python_xlib-0.30-py2.py3-none-any.whl", hash = "sha256:c4c92cd47e07588b2cbc7d52de18407b2902c3812d7cdec39cd2177b060828e2"}, + {file = "python-xlib-0.31.tar.gz", hash = "sha256:74d83a081f532bc07f6d7afcd6416ec38403d68f68b9b9dc9e1f28fbf2d799e9"}, + {file = "python_xlib-0.31-py2.py3-none-any.whl", hash = "sha256:1ec6ce0de73d9e6592ead666779a5732b384e5b8fb1f1886bd0a81cafa477759"}, ] python3-xlib = [ {file = "python3-xlib-0.15.tar.gz", hash = "sha256:dc4245f3ae4aa5949c1d112ee4723901ade37a96721ba9645f2bfa56e5b383f8"}, ] pytz = [ - {file = "pytz-2021.1-py2.py3-none-any.whl", hash = "sha256:eb10ce3e7736052ed3623d49975ce333bcd712c7bb19a58b9e2089d4057d0798"}, - {file = "pytz-2021.1.tar.gz", hash = "sha256:83a4a90894bf38e243cf052c8b58f381bfe9a7a483f6a9cab140bc7f702ac4da"}, + {file = "pytz-2021.3-py2.py3-none-any.whl", hash = "sha256:3672058bc3453457b622aab7a1c3bfd5ab0bdae451512f6cf25f64ed37f5b87c"}, + {file = "pytz-2021.3.tar.gz", hash = "sha256:acad2d8b20a1af07d4e4c9d2e9285c5ed9104354062f275f3fcd88dcef4f1326"}, ] pywin32 = [ {file = "pywin32-301-cp35-cp35m-win32.whl", hash = "sha256:93367c96e3a76dfe5003d8291ae16454ca7d84bb24d721e0b74a07610b7be4a7"}, @@ -2394,8 +2494,8 @@ pywin32-ctypes = [ {file = "pywin32_ctypes-0.2.0-py2.py3-none-any.whl", hash = "sha256:9dc2d991b3479cc2df15930958b674a48a227d5361d413827a4cfd0b5876fc98"}, ] "qt.py" = [ - {file = "Qt.py-1.3.3-py2.py3-none-any.whl", hash = "sha256:9e3f5417187c98d246918a9b27a9e1f8055e089bdb2b063a2739986bc19a3d2e"}, - {file = "Qt.py-1.3.3.tar.gz", hash = "sha256:601606127f70be9adc82c248d209d696cccbd1df242c24d3fb1a9e399f3ecaf1"}, + {file = "Qt.py-1.3.6-py2.py3-none-any.whl", hash = "sha256:7edf6048d07a6924707506b5ba34a6e05d66dde9a3f4e3a62f9996ccab0b91c7"}, + {file = "Qt.py-1.3.6.tar.gz", hash = "sha256:0d78656a2f814602eee304521c7bf5da0cec414818b3833712c77524294c404a"}, ] recommonmark = [ {file = "recommonmark-0.7.1-py2.py3-none-any.whl", hash = "sha256:1b1db69af0231efce3fa21b94ff627ea33dee7079a01dd0a7f8482c3da148b3f"}, @@ -2422,12 +2522,12 @@ six = [ {file = "six-1.16.0.tar.gz", hash = "sha256:1e61c37477a1626458e36f7b1d82aa5c9b094fa4802892072e49de9c60c4c926"}, ] slack-sdk = [ - {file = "slack_sdk-3.6.0-py2.py3-none-any.whl", hash = "sha256:e1b257923a1ef88b8620dd3abff94dc5b3eee16ef37975d101ba9e60123ac3af"}, - {file = "slack_sdk-3.6.0.tar.gz", hash = "sha256:195f044e02a2844579a7a26818ce323e85dde8de224730c859644918d793399e"}, + {file = "slack_sdk-3.11.2-py2.py3-none-any.whl", hash = "sha256:35245ec34c8549fbb5c43ccc17101afd725b3508bb784da46530b214f496bf93"}, + {file = "slack_sdk-3.11.2.tar.gz", hash = "sha256:131bf605894525c2d66da064677eabc19f53f02ce0f82a3f2fa130d4ec3bc1b0"}, ] smmap = [ - {file = "smmap-4.0.0-py2.py3-none-any.whl", hash = "sha256:a9a7479e4c572e2e775c404dcd3080c8dc49f39918c2cf74913d30c4c478e3c2"}, - {file = "smmap-4.0.0.tar.gz", hash = "sha256:7e65386bd122d45405ddf795637b7f7d2b532e7e401d46bbe3fb49b9986d5182"}, + {file = "smmap-5.0.0-py3-none-any.whl", hash = "sha256:2aba19d6a040e78d8b09de5c57e96207b09ed71d8e55ce0959eeee6c8e190d94"}, + {file = "smmap-5.0.0.tar.gz", hash = "sha256:c840e62059cd3be204b0c9c9f74be2c09d5648eddd4580d9314c3ecde0b30936"}, ] snowballstemmer = [ {file = "snowballstemmer-2.1.0-py2.py3-none-any.whl", hash = "sha256:b51b447bea85f9968c13b650126a888aabd4cb4463fca868ec596826325dedc2"}, @@ -2438,16 +2538,16 @@ speedcopy = [ {file = "speedcopy-2.1.0.tar.gz", hash = "sha256:8bb1a6c735900b83901a7be84ba2175ed3887c13c6786f97dea48f2ea7d504c2"}, ] sphinx = [ - {file = "Sphinx-4.0.2-py3-none-any.whl", hash = "sha256:d1cb10bee9c4231f1700ec2e24a91be3f3a3aba066ea4ca9f3bbe47e59d5a1d4"}, - {file = "Sphinx-4.0.2.tar.gz", hash = "sha256:b5c2ae4120bf00c799ba9b3699bc895816d272d120080fbc967292f29b52b48c"}, + {file = "Sphinx-3.5.3-py3-none-any.whl", hash = "sha256:3f01732296465648da43dec8fb40dc451ba79eb3e2cc5c6d79005fd98197107d"}, + {file = "Sphinx-3.5.3.tar.gz", hash = "sha256:ce9c228456131bab09a3d7d10ae58474de562a6f79abb3dc811ae401cf8c1abc"}, ] sphinx-qt-documentation = [ {file = "sphinx_qt_documentation-0.3-py3-none-any.whl", hash = "sha256:bee247cb9e4fc03fc496d07adfdb943100e1103320c3e5e820e0cfa7c790d9b6"}, {file = "sphinx_qt_documentation-0.3.tar.gz", hash = "sha256:f09a0c9d9e989172ba3e282b92bf55613bb23ad47315ec5b0d38536b343ac6c8"}, ] sphinx-rtd-theme = [ - {file = "sphinx_rtd_theme-0.5.2-py2.py3-none-any.whl", hash = "sha256:4a05bdbe8b1446d77a01e20a23ebc6777c74f43237035e76be89699308987d6f"}, - {file = "sphinx_rtd_theme-0.5.2.tar.gz", hash = "sha256:32bd3b5d13dc8186d7a42fc816a23d32e83a4827d7d9882948e7b837c232da5a"}, + {file = "sphinx_rtd_theme-0.5.1-py2.py3-none-any.whl", hash = "sha256:fa6bebd5ab9a73da8e102509a86f3fcc36dec04a0b52ea80e5a033b2aba00113"}, + {file = "sphinx_rtd_theme-0.5.1.tar.gz", hash = "sha256:eda689eda0c7301a80cf122dad28b1861e5605cbf455558f3775e1e8200e83a5"}, ] sphinxcontrib-applehelp = [ {file = "sphinxcontrib-applehelp-1.0.2.tar.gz", hash = "sha256:a072735ec80e7675e3f432fcae8610ecf509c5f1869d17e2eecff44389cdbc58"}, @@ -2489,6 +2589,10 @@ toml = [ {file = "toml-0.10.2-py2.py3-none-any.whl", hash = "sha256:806143ae5bfb6a3c6e736a764057db0e6a0e05e338b5630894a5f779cabb4f9b"}, {file = "toml-0.10.2.tar.gz", hash = "sha256:b3bda1d108d5dd99f4a20d24d9c348e91c4db7ab1b749200bded2f839ccbe68f"}, ] +tomli = [ + {file = "tomli-1.2.2-py3-none-any.whl", hash = "sha256:f04066f68f5554911363063a30b108d2b5a5b1a010aa8b6132af78489fe3aade"}, + {file = "tomli-1.2.2.tar.gz", hash = "sha256:c6ce0015eb38820eaf32b5db832dbc26deb3dd427bd5f6556cf0acac2c214fee"}, +] typed-ast = [ {file = "typed_ast-1.4.3-cp35-cp35m-manylinux1_i686.whl", hash = "sha256:2068531575a125b87a41802130fa7e29f26c09a2833fea68d9a40cf33902eba6"}, {file = "typed_ast-1.4.3-cp35-cp35m-manylinux1_x86_64.whl", hash = "sha256:c907f561b1e83e93fad565bac5ba9c22d96a54e7ea0267c708bffe863cbe4075"}, @@ -2522,17 +2626,17 @@ typed-ast = [ {file = "typed_ast-1.4.3.tar.gz", hash = "sha256:fb1bbeac803adea29cedd70781399c99138358c26d05fcbd23c13016b7f5ec65"}, ] typing-extensions = [ - {file = "typing_extensions-3.10.0.0-py2-none-any.whl", hash = "sha256:0ac0f89795dd19de6b97debb0c6af1c70987fd80a2d62d1958f7e56fcc31b497"}, - {file = "typing_extensions-3.10.0.0-py3-none-any.whl", hash = "sha256:779383f6086d90c99ae41cf0ff39aac8a7937a9283ce0a414e5dd782f4c94a84"}, - {file = "typing_extensions-3.10.0.0.tar.gz", hash = "sha256:50b6f157849174217d0656f99dc82fe932884fb250826c18350e159ec6cdf342"}, + {file = "typing_extensions-3.10.0.2-py2-none-any.whl", hash = "sha256:d8226d10bc02a29bcc81df19a26e56a9647f8b0a6d4a83924139f4a8b01f17b7"}, + {file = "typing_extensions-3.10.0.2-py3-none-any.whl", hash = "sha256:f1d25edafde516b146ecd0613dabcc61409817af4766fbbcfb8d1ad4ec441a34"}, + {file = "typing_extensions-3.10.0.2.tar.gz", hash = "sha256:49f75d16ff11f1cd258e1b988ccff82a3ca5570217d7ad8c5f48205dd99a677e"}, ] uritemplate = [ {file = "uritemplate-3.0.1-py2.py3-none-any.whl", hash = "sha256:07620c3f3f8eed1f12600845892b0e036a2420acf513c53f7de0abd911a5894f"}, {file = "uritemplate-3.0.1.tar.gz", hash = "sha256:5af8ad10cec94f215e3f48112de2022e1d5a37ed427fbd88652fa908f2ab7cae"}, ] urllib3 = [ - {file = "urllib3-1.26.5-py2.py3-none-any.whl", hash = "sha256:753a0374df26658f99d826cfe40394a686d05985786d946fbe4165b5148f5a7c"}, - {file = "urllib3-1.26.5.tar.gz", hash = "sha256:a7acd0977125325f516bda9735fa7142b909a8d01e8b2e4c8108d0984e6e0098"}, + {file = "urllib3-1.26.7-py2.py3-none-any.whl", hash = "sha256:c4fdf4019605b6e5423637e01bc9fe4daef873709a7973e195ceba0a62bbc844"}, + {file = "urllib3-1.26.7.tar.gz", hash = "sha256:4987c65554f7a2dbf30c18fd48778ef124af6fab771a377103da0585e2336ece"}, ] wcwidth = [ {file = "wcwidth-0.2.5-py2.py3-none-any.whl", hash = "sha256:beb4802a9cebb9144e99086eff703a642a13d6a0052920003a230f3294bbe784"}, @@ -2543,52 +2647,130 @@ websocket-client = [ {file = "websocket_client-0.59.0-py2.py3-none-any.whl", hash = "sha256:2e50d26ca593f70aba7b13a489435ef88b8fc3b5c5643c1ce8808ff9b40f0b32"}, ] wrapt = [ - {file = "wrapt-1.12.1.tar.gz", hash = "sha256:b62ffa81fb85f4332a4f609cab4ac40709470da05643a082ec1eb88e6d9b97d7"}, + {file = "wrapt-1.13.2-cp27-cp27m-macosx_10_9_x86_64.whl", hash = "sha256:3de7b4d3066cc610054e7aa2c005645e308df2f92be730aae3a47d42e910566a"}, + {file = "wrapt-1.13.2-cp27-cp27m-manylinux1_i686.whl", hash = "sha256:8164069f775c698d15582bf6320a4f308c50d048c1c10cf7d7a341feaccf5df7"}, + {file = "wrapt-1.13.2-cp27-cp27m-manylinux1_x86_64.whl", hash = "sha256:9adee1891253670575028279de8365c3a02d3489a74a66d774c321472939a0b1"}, + {file = "wrapt-1.13.2-cp27-cp27m-manylinux2010_i686.whl", hash = "sha256:a70d876c9aba12d3bd7f8f1b05b419322c6789beb717044eea2c8690d35cb91b"}, + {file = "wrapt-1.13.2-cp27-cp27m-manylinux2010_x86_64.whl", hash = "sha256:3f87042623530bcffea038f824b63084180513c21e2e977291a9a7e65a66f13b"}, + {file = "wrapt-1.13.2-cp27-cp27mu-manylinux1_i686.whl", hash = "sha256:e634136f700a21e1fcead0c137f433dde928979538c14907640607d43537d468"}, + {file = "wrapt-1.13.2-cp27-cp27mu-manylinux1_x86_64.whl", hash = "sha256:3e33c138d1e3620b1e0cc6fd21e46c266393ed5dae0d595b7ed5a6b73ed57aa0"}, + {file = "wrapt-1.13.2-cp27-cp27mu-manylinux2010_i686.whl", hash = "sha256:283e402e5357e104ac1e3fba5791220648e9af6fb14ad7d9cc059091af2b31d2"}, + {file = "wrapt-1.13.2-cp27-cp27mu-manylinux2010_x86_64.whl", hash = "sha256:ccb34ce599cab7f36a4c90318697ead18312c67a9a76327b3f4f902af8f68ea1"}, + {file = "wrapt-1.13.2-cp35-cp35m-manylinux1_i686.whl", hash = "sha256:fbad5ba74c46517e6488149514b2e2348d40df88cd6b52a83855b7a8bf04723f"}, + {file = "wrapt-1.13.2-cp35-cp35m-manylinux1_x86_64.whl", hash = "sha256:724ed2bc9c91a2b9026e5adce310fa60c6e7c8760b03391445730b9789b9d108"}, + {file = "wrapt-1.13.2-cp35-cp35m-manylinux2010_i686.whl", hash = "sha256:83f2793ec6f3ef513ad8d5b9586f5ee6081cad132e6eae2ecb7eac1cc3decae0"}, + {file = "wrapt-1.13.2-cp35-cp35m-manylinux2010_x86_64.whl", hash = "sha256:0473d1558b93e314e84313cc611f6c86be779369f9d3734302bf185a4d2625b1"}, + {file = "wrapt-1.13.2-cp35-cp35m-win32.whl", hash = "sha256:15eee0e6fd07f48af2f66d0e6f2ff1916ffe9732d464d5e2390695296872cad9"}, + {file = "wrapt-1.13.2-cp35-cp35m-win_amd64.whl", hash = "sha256:bc85d17d90201afd88e3d25421da805e4e135012b5d1f149e4de2981394b2a52"}, + {file = "wrapt-1.13.2-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:c6ee5f8734820c21b9b8bf705e99faba87f21566d20626568eeb0d62cbeaf23c"}, + {file = "wrapt-1.13.2-cp36-cp36m-manylinux1_i686.whl", hash = "sha256:53c6706a1bcfb6436f1625511b95b812798a6d2ccc51359cd791e33722b5ea32"}, + {file = "wrapt-1.13.2-cp36-cp36m-manylinux1_x86_64.whl", hash = "sha256:fbe6aebc9559fed7ea27de51c2bf5c25ba2a4156cf0017556f72883f2496ee9a"}, + {file = "wrapt-1.13.2-cp36-cp36m-manylinux2010_i686.whl", hash = "sha256:0582180566e7a13030f896c2f1ac6a56134ab5f3c3f4c5538086f758b1caf3f2"}, + {file = "wrapt-1.13.2-cp36-cp36m-manylinux2010_x86_64.whl", hash = "sha256:bff0a59387a0a2951cb869251257b6553663329a1b5525b5226cab8c88dcbe7e"}, + {file = "wrapt-1.13.2-cp36-cp36m-win32.whl", hash = "sha256:df3eae297a5f1594d1feb790338120f717dac1fa7d6feed7b411f87e0f2401c7"}, + {file = "wrapt-1.13.2-cp36-cp36m-win_amd64.whl", hash = "sha256:1eb657ed84f4d3e6ad648483c8a80a0cf0a78922ef94caa87d327e2e1ad49b48"}, + {file = "wrapt-1.13.2-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:a0cdedf681db878416c05e1831ec69691b0e6577ac7dca9d4f815632e3549580"}, + {file = "wrapt-1.13.2-cp37-cp37m-manylinux1_i686.whl", hash = "sha256:87ee3c73bdfb4367b26c57259995935501829f00c7b3eed373e2ad19ec21e4e4"}, + {file = "wrapt-1.13.2-cp37-cp37m-manylinux1_x86_64.whl", hash = "sha256:3e0d16eedc242d01a6f8cf0623e9cdc3b869329da3f97a15961d8864111d8cf0"}, + {file = "wrapt-1.13.2-cp37-cp37m-manylinux2010_i686.whl", hash = "sha256:8318088860968c07e741537030b1abdd8908ee2c71fbe4facdaade624a09e006"}, + {file = "wrapt-1.13.2-cp37-cp37m-manylinux2010_x86_64.whl", hash = "sha256:d90520616fce71c05dedeac3a0fe9991605f0acacd276e5f821842e454485a70"}, + {file = "wrapt-1.13.2-cp37-cp37m-win32.whl", hash = "sha256:22142afab65daffc95863d78effcbd31c19a8003eca73de59f321ee77f73cadb"}, + {file = "wrapt-1.13.2-cp37-cp37m-win_amd64.whl", hash = "sha256:d0d717e10f952df7ea41200c507cc7e24458f4c45b56c36ad418d2e79dacd1d4"}, + {file = "wrapt-1.13.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:593cb049ce1c391e0288523b30426c4430b26e74c7e6f6e2844bd99ac7ecc831"}, + {file = "wrapt-1.13.2-cp38-cp38-manylinux1_i686.whl", hash = "sha256:8860c8011a6961a651b1b9f46fdbc589ab63b0a50d645f7d92659618a3655867"}, + {file = "wrapt-1.13.2-cp38-cp38-manylinux1_x86_64.whl", hash = "sha256:ada5e29e59e2feb710589ca1c79fd989b1dd94d27079dc1d199ec954a6ecc724"}, + {file = "wrapt-1.13.2-cp38-cp38-manylinux2010_i686.whl", hash = "sha256:fdede980273aeca591ad354608778365a3a310e0ecdd7a3587b38bc5be9b1808"}, + {file = "wrapt-1.13.2-cp38-cp38-manylinux2010_x86_64.whl", hash = "sha256:af9480de8e63c5f959a092047aaf3d7077422ded84695b3398f5d49254af3e90"}, + {file = "wrapt-1.13.2-cp38-cp38-win32.whl", hash = "sha256:c65e623ea7556e39c4f0818200a046cbba7575a6b570ff36122c276fdd30ab0a"}, + {file = "wrapt-1.13.2-cp38-cp38-win_amd64.whl", hash = "sha256:b20703356cae1799080d0ad15085dc3213c1ac3f45e95afb9f12769b98231528"}, + {file = "wrapt-1.13.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:1c5c4cf188b5643a97e87e2110bbd4f5bc491d54a5b90633837b34d5df6a03fe"}, + {file = "wrapt-1.13.2-cp39-cp39-manylinux1_i686.whl", hash = "sha256:82223f72eba6f63eafca87a0f614495ae5aa0126fe54947e2b8c023969e9f2d7"}, + {file = "wrapt-1.13.2-cp39-cp39-manylinux1_x86_64.whl", hash = "sha256:81a4cf257263b299263472d669692785f9c647e7dca01c18286b8f116dbf6b38"}, + {file = "wrapt-1.13.2-cp39-cp39-manylinux2010_i686.whl", hash = "sha256:728e2d9b7a99dd955d3426f237b940fc74017c4a39b125fec913f575619ddfe9"}, + {file = "wrapt-1.13.2-cp39-cp39-manylinux2010_x86_64.whl", hash = "sha256:7574de567dcd4858a2ffdf403088d6df8738b0e1eabea220553abf7c9048f59e"}, + {file = "wrapt-1.13.2-cp39-cp39-win32.whl", hash = "sha256:c7ac2c7a8e34bd06710605b21dd1f3576764443d68e069d2afba9b116014d072"}, + {file = "wrapt-1.13.2-cp39-cp39-win_amd64.whl", hash = "sha256:6e6d1a8eeef415d7fb29fe017de0e48f45e45efd2d1bfda28fc50b7b330859ef"}, + {file = "wrapt-1.13.2.tar.gz", hash = "sha256:dca56cc5963a5fd7c2aa8607017753f534ee514e09103a6c55d2db70b50e7447"}, ] wsrpc-aiohttp = [ {file = "wsrpc-aiohttp-3.2.0.tar.gz", hash = "sha256:f467abc51bcdc760fc5aeb7041abdeef46eeca3928dc43dd6e7fa7a533563818"}, {file = "wsrpc_aiohttp-3.2.0-py3-none-any.whl", hash = "sha256:fa9b0bf5cb056898cb5c9f64cbc5eacb8a5dd18ab1b7f0cd4a2208b4a7fde282"}, ] yarl = [ - {file = "yarl-1.6.3-cp36-cp36m-macosx_10_14_x86_64.whl", hash = "sha256:0355a701b3998dcd832d0dc47cc5dedf3874f966ac7f870e0f3a6788d802d434"}, - {file = "yarl-1.6.3-cp36-cp36m-manylinux1_i686.whl", hash = "sha256:bafb450deef6861815ed579c7a6113a879a6ef58aed4c3a4be54400ae8871478"}, - {file = "yarl-1.6.3-cp36-cp36m-manylinux2014_aarch64.whl", hash = "sha256:547f7665ad50fa8563150ed079f8e805e63dd85def6674c97efd78eed6c224a6"}, - {file = "yarl-1.6.3-cp36-cp36m-manylinux2014_i686.whl", hash = "sha256:63f90b20ca654b3ecc7a8d62c03ffa46999595f0167d6450fa8383bab252987e"}, - {file = "yarl-1.6.3-cp36-cp36m-manylinux2014_ppc64le.whl", hash = "sha256:97b5bdc450d63c3ba30a127d018b866ea94e65655efaf889ebeabc20f7d12406"}, - {file = "yarl-1.6.3-cp36-cp36m-manylinux2014_s390x.whl", hash = "sha256:d8d07d102f17b68966e2de0e07bfd6e139c7c02ef06d3a0f8d2f0f055e13bb76"}, - {file = "yarl-1.6.3-cp36-cp36m-manylinux2014_x86_64.whl", hash = "sha256:15263c3b0b47968c1d90daa89f21fcc889bb4b1aac5555580d74565de6836366"}, - {file = "yarl-1.6.3-cp36-cp36m-win32.whl", hash = "sha256:b5dfc9a40c198334f4f3f55880ecf910adebdcb2a0b9a9c23c9345faa9185721"}, - {file = "yarl-1.6.3-cp36-cp36m-win_amd64.whl", hash = "sha256:b2e9a456c121e26d13c29251f8267541bd75e6a1ccf9e859179701c36a078643"}, - {file = "yarl-1.6.3-cp37-cp37m-macosx_10_14_x86_64.whl", hash = "sha256:ce3beb46a72d9f2190f9e1027886bfc513702d748047b548b05dab7dfb584d2e"}, - {file = "yarl-1.6.3-cp37-cp37m-manylinux1_i686.whl", hash = "sha256:2ce4c621d21326a4a5500c25031e102af589edb50c09b321049e388b3934eec3"}, - {file = "yarl-1.6.3-cp37-cp37m-manylinux2014_aarch64.whl", hash = "sha256:d26608cf178efb8faa5ff0f2d2e77c208f471c5a3709e577a7b3fd0445703ac8"}, - {file = "yarl-1.6.3-cp37-cp37m-manylinux2014_i686.whl", hash = "sha256:4c5bcfc3ed226bf6419f7a33982fb4b8ec2e45785a0561eb99274ebbf09fdd6a"}, - {file = "yarl-1.6.3-cp37-cp37m-manylinux2014_ppc64le.whl", hash = "sha256:4736eaee5626db8d9cda9eb5282028cc834e2aeb194e0d8b50217d707e98bb5c"}, - {file = "yarl-1.6.3-cp37-cp37m-manylinux2014_s390x.whl", hash = "sha256:68dc568889b1c13f1e4745c96b931cc94fdd0defe92a72c2b8ce01091b22e35f"}, - {file = "yarl-1.6.3-cp37-cp37m-manylinux2014_x86_64.whl", hash = "sha256:7356644cbed76119d0b6bd32ffba704d30d747e0c217109d7979a7bc36c4d970"}, - {file = "yarl-1.6.3-cp37-cp37m-win32.whl", hash = "sha256:00d7ad91b6583602eb9c1d085a2cf281ada267e9a197e8b7cae487dadbfa293e"}, - {file = "yarl-1.6.3-cp37-cp37m-win_amd64.whl", hash = "sha256:69ee97c71fee1f63d04c945f56d5d726483c4762845400a6795a3b75d56b6c50"}, - {file = "yarl-1.6.3-cp38-cp38-macosx_10_14_x86_64.whl", hash = "sha256:e46fba844f4895b36f4c398c5af062a9808d1f26b2999c58909517384d5deda2"}, - {file = "yarl-1.6.3-cp38-cp38-manylinux1_i686.whl", hash = "sha256:31ede6e8c4329fb81c86706ba8f6bf661a924b53ba191b27aa5fcee5714d18ec"}, - {file = "yarl-1.6.3-cp38-cp38-manylinux2014_aarch64.whl", hash = "sha256:fcbb48a93e8699eae920f8d92f7160c03567b421bc17362a9ffbbd706a816f71"}, - {file = "yarl-1.6.3-cp38-cp38-manylinux2014_i686.whl", hash = "sha256:72a660bdd24497e3e84f5519e57a9ee9220b6f3ac4d45056961bf22838ce20cc"}, - {file = "yarl-1.6.3-cp38-cp38-manylinux2014_ppc64le.whl", hash = "sha256:324ba3d3c6fee56e2e0b0d09bf5c73824b9f08234339d2b788af65e60040c959"}, - {file = "yarl-1.6.3-cp38-cp38-manylinux2014_s390x.whl", hash = "sha256:e6b5460dc5ad42ad2b36cca524491dfcaffbfd9c8df50508bddc354e787b8dc2"}, - {file = "yarl-1.6.3-cp38-cp38-manylinux2014_x86_64.whl", hash = "sha256:6d6283d8e0631b617edf0fd726353cb76630b83a089a40933043894e7f6721e2"}, - {file = "yarl-1.6.3-cp38-cp38-win32.whl", hash = "sha256:9ede61b0854e267fd565e7527e2f2eb3ef8858b301319be0604177690e1a3896"}, - {file = "yarl-1.6.3-cp38-cp38-win_amd64.whl", hash = "sha256:f0b059678fd549c66b89bed03efcabb009075bd131c248ecdf087bdb6faba24a"}, - {file = "yarl-1.6.3-cp39-cp39-macosx_10_14_x86_64.whl", hash = "sha256:329412812ecfc94a57cd37c9d547579510a9e83c516bc069470db5f75684629e"}, - {file = "yarl-1.6.3-cp39-cp39-manylinux1_i686.whl", hash = "sha256:c49ff66d479d38ab863c50f7bb27dee97c6627c5fe60697de15529da9c3de724"}, - {file = "yarl-1.6.3-cp39-cp39-manylinux2014_aarch64.whl", hash = "sha256:f040bcc6725c821a4c0665f3aa96a4d0805a7aaf2caf266d256b8ed71b9f041c"}, - {file = "yarl-1.6.3-cp39-cp39-manylinux2014_i686.whl", hash = "sha256:d5c32c82990e4ac4d8150fd7652b972216b204de4e83a122546dce571c1bdf25"}, - {file = "yarl-1.6.3-cp39-cp39-manylinux2014_ppc64le.whl", hash = "sha256:d597767fcd2c3dc49d6eea360c458b65643d1e4dbed91361cf5e36e53c1f8c96"}, - {file = "yarl-1.6.3-cp39-cp39-manylinux2014_s390x.whl", hash = "sha256:8aa3decd5e0e852dc68335abf5478a518b41bf2ab2f330fe44916399efedfae0"}, - {file = "yarl-1.6.3-cp39-cp39-manylinux2014_x86_64.whl", hash = "sha256:73494d5b71099ae8cb8754f1df131c11d433b387efab7b51849e7e1e851f07a4"}, - {file = "yarl-1.6.3-cp39-cp39-win32.whl", hash = "sha256:5b883e458058f8d6099e4420f0cc2567989032b5f34b271c0827de9f1079a424"}, - {file = "yarl-1.6.3-cp39-cp39-win_amd64.whl", hash = "sha256:4953fb0b4fdb7e08b2f3b3be80a00d28c5c8a2056bb066169de00e6501b986b6"}, - {file = "yarl-1.6.3.tar.gz", hash = "sha256:8a9066529240171b68893d60dca86a763eae2139dd42f42106b03cf4b426bf10"}, + {file = "yarl-1.7.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:e35d8230e4b08d86ea65c32450533b906a8267a87b873f2954adeaecede85169"}, + {file = "yarl-1.7.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:eb4b3f277880c314e47720b4b6bb2c85114ab3c04c5442c9bc7006b3787904d8"}, + {file = "yarl-1.7.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:c7015dcedb91d90a138eebdc7e432aec8966e0147ab2a55f2df27b1904fa7291"}, + {file = "yarl-1.7.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bb3e478175e15e00d659fb0354a6a8db71a7811a2a5052aed98048bc972e5d2b"}, + {file = "yarl-1.7.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8b8c409aa3a7966647e7c1c524846b362a6bcbbe120bf8a176431f940d2b9a2e"}, + {file = "yarl-1.7.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b22ea41c7e98170474a01e3eded1377d46b2dfaef45888a0005c683eaaa49285"}, + {file = "yarl-1.7.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:a7dfc46add4cfe5578013dbc4127893edc69fe19132d2836ff2f6e49edc5ecd6"}, + {file = "yarl-1.7.0-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:82ff6f85f67500a4f74885d81659cd270eb24dfe692fe44e622b8a2fd57e7279"}, + {file = "yarl-1.7.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:f3cd2158b2ed0fb25c6811adfdcc47224efe075f2d68a750071dacc03a7a66e4"}, + {file = "yarl-1.7.0-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:59c0f13f9592820c51280d1cf811294d753e4a18baf90f0139d1dc93d4b6fc5f"}, + {file = "yarl-1.7.0-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:7f7655ad83d1a8afa48435a449bf2f3009293da1604f5dd95b5ddcf5f673bd69"}, + {file = "yarl-1.7.0-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:aa9f0d9b62d15182341b3e9816582f46182cab91c1a57b2d308b9a3c4e2c4f78"}, + {file = "yarl-1.7.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:fdd1b90c225a653b1bd1c0cae8edf1957892b9a09c8bf7ee6321eeb8208eac0f"}, + {file = "yarl-1.7.0-cp310-cp310-win32.whl", hash = "sha256:7c8d0bb76eabc5299db203e952ec55f8f4c53f08e0df4285aac8c92bd9e12675"}, + {file = "yarl-1.7.0-cp310-cp310-win_amd64.whl", hash = "sha256:622a36fa779efb4ff9eff5fe52730ff17521431379851a31e040958fc251670c"}, + {file = "yarl-1.7.0-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:3d461b7a8e139b9e4b41f62eb417ffa0b98d1c46d4caf14c845e6a3b349c0bb1"}, + {file = "yarl-1.7.0-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:81cfacdd1e40bc931b5519499342efa388d24d262c30a3d31187bfa04f4a7001"}, + {file = "yarl-1.7.0-cp36-cp36m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:821b978f2152be7695d4331ef0621d207aedf9bbd591ba23a63412a3efc29a01"}, + {file = "yarl-1.7.0-cp36-cp36m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b64bd24c8c9a487f4a12260dc26732bf41028816dbf0c458f17864fbebdb3131"}, + {file = "yarl-1.7.0-cp36-cp36m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:98c9ddb92b60a83c21be42c776d3d9d5ec632a762a094c41bda37b7dfbd2cd83"}, + {file = "yarl-1.7.0-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:a532d75ca74431c053a88a802e161fb3d651b8bf5821a3440bc3616e38754583"}, + {file = "yarl-1.7.0-cp36-cp36m-musllinux_1_1_aarch64.whl", hash = "sha256:053e09817eafb892e94e172d05406c1b3a22a93bc68f6eff5198363a3d764459"}, + {file = "yarl-1.7.0-cp36-cp36m-musllinux_1_1_i686.whl", hash = "sha256:98c51f02d542945d306c8e934aa2c1e66ba5e9c1c86b5bf37f3a51c8a747067e"}, + {file = "yarl-1.7.0-cp36-cp36m-musllinux_1_1_ppc64le.whl", hash = "sha256:15ec41a5a5fdb7bace6d7b16701f9440007a82734f69127c0fbf6d87e10f4a1e"}, + {file = "yarl-1.7.0-cp36-cp36m-musllinux_1_1_s390x.whl", hash = "sha256:a7f08819dba1e1255d6991ed37448a1bf4b1352c004bcd899b9da0c47958513d"}, + {file = "yarl-1.7.0-cp36-cp36m-musllinux_1_1_x86_64.whl", hash = "sha256:8e3ffab21db0542ffd1887f3b9575ddd58961f2cf61429cb6458afc00c4581e0"}, + {file = "yarl-1.7.0-cp36-cp36m-win32.whl", hash = "sha256:50127634f519b2956005891507e3aa4ac345f66a7ea7bbc2d7dcba7401f41898"}, + {file = "yarl-1.7.0-cp36-cp36m-win_amd64.whl", hash = "sha256:36ec44f15193f6d5288d42ebb8e751b967ebdfb72d6830983838d45ab18edb4f"}, + {file = "yarl-1.7.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:ec1b5a25a25c880c976d0bb3d107def085bb08dbb3db7f4442e0a2b980359d24"}, + {file = "yarl-1.7.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b36f5a63c891f813c6f04ef19675b382efc190fd5ce7e10ab19386d2548bca06"}, + {file = "yarl-1.7.0-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:38173b8c3a29945e7ecade9a3f6ff39581eee8201338ee6a2c8882db5df3e806"}, + {file = "yarl-1.7.0-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:8ba402f32184f0b405fb281b93bd0d8ab7e3257735b57b62a6ed2e94cdf4fe50"}, + {file = "yarl-1.7.0-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:be52bc5208d767cdd8308a9e93059b3b36d1e048fecbea0e0346d0d24a76adc0"}, + {file = "yarl-1.7.0-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:08c2044a956f4ef30405f2f433ce77f1f57c2c773bf81ae43201917831044d5a"}, + {file = "yarl-1.7.0-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:484d61c047c45670ef5967653a1d0783e232c54bf9dd786a7737036828fa8d54"}, + {file = "yarl-1.7.0-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:b7de92a4af85cfcaf4081f8aa6165b1d63ee5de150af3ee85f954145f93105a7"}, + {file = "yarl-1.7.0-cp37-cp37m-musllinux_1_1_ppc64le.whl", hash = "sha256:376e41775aab79c5575534924a386c8e0f1a5d91db69fc6133fd27a489bcaf10"}, + {file = "yarl-1.7.0-cp37-cp37m-musllinux_1_1_s390x.whl", hash = "sha256:8a8b10d0e7bac154f959b709fcea593cda527b234119311eb950096653816a86"}, + {file = "yarl-1.7.0-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:f46cd4c43e6175030e2a56def8f1d83b64e6706eeb2bb9ab0ef4756f65eab23f"}, + {file = "yarl-1.7.0-cp37-cp37m-win32.whl", hash = "sha256:b28cfb46140efe1a6092b8c5c4994a1fe70dc83c38fbcea4992401e0c6fb9cce"}, + {file = "yarl-1.7.0-cp37-cp37m-win_amd64.whl", hash = "sha256:9624154ec9c02a776802da1086eed7f5034bd1971977f5146233869c2ac80297"}, + {file = "yarl-1.7.0-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:69945d13e1bbf81784a9bc48824feb9cd66491e6a503d4e83f6cd7c7cc861361"}, + {file = "yarl-1.7.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:46a742ed9e363bd01be64160ce7520e92e11989bd4cb224403cfd31c101cc83d"}, + {file = "yarl-1.7.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:cb4ff1ac7cb4500f43581b3f4cbd627d702143aa6be1fdc1fa3ebffaf4dc1be5"}, + {file = "yarl-1.7.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3ad51e17cd65ea3debb0e10f0120cf8dd987c741fe423ed2285087368090b33d"}, + {file = "yarl-1.7.0-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:7e37786ea89a5d3ffbbf318ea9790926f8dfda83858544f128553c347ad143c6"}, + {file = "yarl-1.7.0-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c63c1e208f800daad71715786bfeb1cecdc595d87e2e9b1cd234fd6e597fd71d"}, + {file = "yarl-1.7.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:91cbe24300c11835ef186436363352b3257db7af165e0a767f4f17aa25761388"}, + {file = "yarl-1.7.0-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:e510dbec7c59d32eaa61ffa48173d5e3d7170a67f4a03e8f5e2e9e3971aca622"}, + {file = "yarl-1.7.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:3def6e681cc02397e5d8141ee97b41d02932b2bcf0fb34532ad62855eab7c60e"}, + {file = "yarl-1.7.0-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:263c81b94e6431942b27f6f671fa62f430a0a5c14bb255f2ab69eeb9b2b66ff7"}, + {file = "yarl-1.7.0-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:e78c91faefe88d601ddd16e3882918dbde20577a2438e2320f8239c8b7507b8f"}, + {file = "yarl-1.7.0-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:22b2430c49713bfb2f0a0dd4a8d7aab218b28476ba86fd1c78ad8899462cbcf2"}, + {file = "yarl-1.7.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:2e7ad9db939082f5d0b9269cfd92c025cb8f2fbbb1f1b9dc5a393c639db5bd92"}, + {file = "yarl-1.7.0-cp38-cp38-win32.whl", hash = "sha256:3a31e4a8dcb1beaf167b7e7af61b88cb961b220db8d3ba1c839723630e57eef7"}, + {file = "yarl-1.7.0-cp38-cp38-win_amd64.whl", hash = "sha256:d579957439933d752358c6a300c93110f84aae67b63dd0c19dde6ecbf4056f6b"}, + {file = "yarl-1.7.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:87721b549505a546eb003252185103b5ec8147de6d3ad3714d148a5a67b6fe53"}, + {file = "yarl-1.7.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:a1fa866fa24d9f4108f9e58ea8a2135655419885cdb443e36b39a346e1181532"}, + {file = "yarl-1.7.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:1d3b8449dfedfe94eaff2b77954258b09b24949f6818dfa444b05dbb05ae1b7e"}, + {file = "yarl-1.7.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:db2372e350794ce8b9f810feb094c606b7e0e4aa6807141ac4fadfe5ddd75bb0"}, + {file = "yarl-1.7.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a06d9d0b9a97fa99b84fee71d9dd11e69e21ac8a27229089f07b5e5e50e8d63c"}, + {file = "yarl-1.7.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a3455c2456d6307bcfa80bc1157b8603f7d93573291f5bdc7144489ca0df4628"}, + {file = "yarl-1.7.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:d30d67e3486aea61bb2cbf7cf81385364c2e4f7ce7469a76ed72af76a5cdfe6b"}, + {file = "yarl-1.7.0-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:c18a4b286e8d780c3a40c31d7b79836aa93b720f71d5743f20c08b7e049ca073"}, + {file = "yarl-1.7.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:d54c925396e7891666cabc0199366ca55b27d003393465acef63fd29b8b7aa92"}, + {file = "yarl-1.7.0-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:64773840952de17851a1c7346ad7f71688c77e74248d1f0bc230e96680f84028"}, + {file = "yarl-1.7.0-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:acbf1756d9dc7cd0ae943d883be72e84e04396f6c2ff93a6ddeca929d562039f"}, + {file = "yarl-1.7.0-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:2e48f27936aa838939c798f466c851ba4ae79e347e8dfce43b009c64b930df12"}, + {file = "yarl-1.7.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:1beef4734ca1ad40a9d8c6b20a76ab46e3a2ed09f38561f01e4aa2ea82cafcef"}, + {file = "yarl-1.7.0-cp39-cp39-win32.whl", hash = "sha256:8ee78c9a5f3c642219d4607680a4693b59239c27a3aa608b64ef79ddc9698039"}, + {file = "yarl-1.7.0-cp39-cp39-win_amd64.whl", hash = "sha256:d750503682605088a14d29a4701548c15c510da4f13c8b17409c4097d5b04c52"}, + {file = "yarl-1.7.0.tar.gz", hash = "sha256:8e7ebaf62e19c2feb097ffb7c94deb0f0c9fab52590784c8cd679d30ab009162"}, ] zipp = [ - {file = "zipp-3.4.1-py3-none-any.whl", hash = "sha256:51cb66cc54621609dd593d1787f286ee42a5c0adbb4b29abea5a63edc3e03098"}, - {file = "zipp-3.4.1.tar.gz", hash = "sha256:3607921face881ba3e026887d8150cca609d517579abe052ac81fc5aeffdbd76"}, + {file = "zipp-3.6.0-py3-none-any.whl", hash = "sha256:9fe5ea21568a0a70e50f273397638d39b03353731e6cbbb3fd8502a33fec40bc"}, + {file = "zipp-3.6.0.tar.gz", hash = "sha256:71c644c5369f4a6e07636f0aa966270449561fcea2e3d6747b8d23efaa9d7832"}, ] diff --git a/pyproject.toml b/pyproject.toml index 8dd8664eae..ac1d133561 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "OpenPype" -version = "3.6.0-nightly.5" # OpenPype +version = "3.7.0-nightly.3" # OpenPype description = "Open VFX and Animation pipeline with support." authors = ["OpenPype Team "] license = "MIT License" diff --git a/start.py b/start.py index 61d8d6a64a..0f7e82071d 100644 --- a/start.py +++ b/start.py @@ -100,6 +100,7 @@ import platform import traceback import subprocess import site +import distutils.spawn from pathlib import Path # OPENPYPE_ROOT is variable pointing to build (or code) directory @@ -384,23 +385,6 @@ def set_modules_environments(): os.environ.update(env) -def is_tool(name): - try: - import os.errno as errno - except ImportError: - import errno - - try: - devnull = open(os.devnull, "w") - subprocess.Popen( - [name], stdout=devnull, stderr=devnull - ).communicate() - except OSError as exc: - if exc.errno == errno.ENOENT: - return False - return True - - def _startup_validations(): """Validations before OpenPype starts.""" try: @@ -443,7 +427,8 @@ def _validate_thirdparty_binaries(): if low_platform == "windows": ffmpeg_dir = os.path.join(ffmpeg_dir, "bin") ffmpeg_executable = os.path.join(ffmpeg_dir, "ffmpeg") - if not is_tool(ffmpeg_executable): + ffmpeg_result = distutils.spawn.find_executable(ffmpeg_executable) + if ffmpeg_result is None: raise RuntimeError(error_msg.format("FFmpeg")) # Validate existence of OpenImageIO (not on MacOs) @@ -463,8 +448,11 @@ def _validate_thirdparty_binaries(): low_platform, "oiiotool" ) - if oiio_tool_path is not None and not is_tool(oiio_tool_path): - raise RuntimeError(error_msg.format("OpenImageIO")) + oiio_result = None + if oiio_tool_path is not None: + oiio_result = distutils.spawn.find_executable(oiio_tool_path) + if oiio_result is None: + raise RuntimeError(error_msg.format("OpenImageIO")) def _process_arguments() -> tuple: diff --git a/tests/openpype/modules/default_modules/royal_render/test_rr_job.py b/tests/openpype/modules/default_modules/royal_render/test_rr_job.py new file mode 100644 index 0000000000..ab8b1bfd50 --- /dev/null +++ b/tests/openpype/modules/default_modules/royal_render/test_rr_job.py @@ -0,0 +1,10 @@ +# -*- coding: utf-8 -*- +"""Test suite for User Settings.""" +# import pytest +# from openpype.modules import ModulesManager + + +def test_rr_job(): + # manager = ModulesManager() + # rr_module = manager.modules_by_name["royalrender"] + ... diff --git a/tests/unit/igniter/test_bootstrap_repos.py b/tests/unit/igniter/test_bootstrap_repos.py index 740a71a5ce..d6e861c262 100644 --- a/tests/unit/igniter/test_bootstrap_repos.py +++ b/tests/unit/igniter/test_bootstrap_repos.py @@ -140,7 +140,7 @@ def test_search_string_for_openpype_version(printer): ] for ver_string in strings: printer(f"testing {ver_string[0]} should be {ver_string[1]}") - assert OpenPypeVersion.version_in_str(ver_string[0])[0] == \ + assert OpenPypeVersion.version_in_str(ver_string[0]) == \ ver_string[1] diff --git a/tools/run_mongo.ps1 b/tools/run_mongo.ps1 index 32f6cfed17..f6fa37207d 100644 --- a/tools/run_mongo.ps1 +++ b/tools/run_mongo.ps1 @@ -113,7 +113,7 @@ $port = 2707 # path to database $dbpath = (Get-Item $openpype_root).parent.FullName + "\mongo_db_data" -$preferred_version = "4.0" +$preferred_version = "5.0" $mongoPath = Find-Mongo $preferred_version Write-Host ">>> " -NoNewLine -ForegroundColor Green diff --git a/vendor/deadline/custom/plugins/GlobalJobPreLoad.py b/vendor/deadline/custom/plugins/GlobalJobPreLoad.py index 8631b035cf..0aa5adaa20 100644 --- a/vendor/deadline/custom/plugins/GlobalJobPreLoad.py +++ b/vendor/deadline/custom/plugins/GlobalJobPreLoad.py @@ -16,8 +16,9 @@ def inject_openpype_environment(deadlinePlugin): job = deadlinePlugin.GetJob() job = RepositoryUtils.GetJob(job.JobId, True) # invalidates cache - print("inject_openpype_environment start") + print(">>> Injecting OpenPype environments ...") try: + print(">>> Getting OpenPype executable ...") exe_list = job.GetJobExtraInfoKeyValue("openpype_executables") openpype_app = FileUtils.SearchFileList(exe_list) if openpype_app == "": @@ -27,11 +28,13 @@ def inject_openpype_environment(deadlinePlugin): "The path to the render executable can be configured " + "from the Plugin Configuration in the Deadline Monitor.") + print("--- OpenPype executable: {}".format(openpype_app)) + # tempfile.TemporaryFile cannot be used because of locking export_url = os.path.join(tempfile.gettempdir(), time.strftime('%Y%m%d%H%M%S'), 'env.json') # add HHMMSS + delete later - print("export_url {}".format(export_url)) + print(">>> Temporary path: {}".format(export_url)) args = [ openpype_app, @@ -55,41 +58,52 @@ def inject_openpype_environment(deadlinePlugin): "AVALON_TASK, AVALON_APP_NAME" raise RuntimeError(msg) - print("args:::{}".format(args)) + if not os.environ.get("OPENPYPE_MONGO"): + print(">>> Missing OPENPYPE_MONGO env var, process won't work") - exit_code = subprocess.call(args, cwd=os.path.dirname(openpype_app)) - if exit_code != 0: - raise RuntimeError("Publishing failed, check worker's log") + env = os.environ + env["OPENPYPE_HEADLESS_MODE"] = "1" + env["AVALON_TIMEOUT"] = "5000" + print(">>> Executing: {}".format(args)) + std_output = subprocess.check_output(args, + cwd=os.path.dirname(openpype_app), + env=env) + print(">>> Process result {}".format(std_output)) + + print(">>> Loading file ...") with open(export_url) as fp: contents = json.load(fp) for key, value in contents.items(): deadlinePlugin.SetProcessEnvironmentVariable(key, value) + print(">>> Removing temporary file") os.remove(export_url) - print("inject_openpype_environment end") - except Exception: + print(">> Injection end.") + except Exception as e: + if hasattr(e, "output"): + print(">>> Exception {}".format(e.output)) import traceback print(traceback.format_exc()) - print("inject_openpype_environment failed") + print("!!! Injection failed.") RepositoryUtils.FailJob(job) raise def inject_render_job_id(deadlinePlugin): """Inject dependency ids to publish process as env var for validation.""" - print("inject_render_job_id start") + print(">>> Injecting render job id ...") job = deadlinePlugin.GetJob() job = RepositoryUtils.GetJob(job.JobId, True) # invalidates cache dependency_ids = job.JobDependencyIDs - print("dependency_ids {}".format(dependency_ids)) + print(">>> Dependency IDs: {}".format(dependency_ids)) render_job_ids = ",".join(dependency_ids) deadlinePlugin.SetProcessEnvironmentVariable("RENDER_JOB_IDS", render_job_ids) - print("inject_render_job_id end") + print(">>> Injection end.") def pype_command_line(executable, arguments, workingDirectory): @@ -133,10 +147,13 @@ def pype(deadlinePlugin): deadlinePlugin: Deadline job plugin passed by Deadline """ + print(">>> Getting job ...") job = deadlinePlugin.GetJob() # PYPE should be here, not OPENPYPE - backward compatibility!! pype_metadata = job.GetJobEnvironmentKeyValue("PYPE_METADATA_FILE") pype_python = job.GetJobEnvironmentKeyValue("PYPE_PYTHON_EXE") + print(">>> Having backward compatible env vars {}/{}".format(pype_metadata, + pype_python)) # test if it is pype publish job. if pype_metadata: pype_metadata = RepositoryUtils.CheckPathMapping(pype_metadata) @@ -162,6 +179,8 @@ def pype(deadlinePlugin): def __main__(deadlinePlugin): + print("*** GlobalJobPreload start ...") + print(">>> Getting job ...") job = deadlinePlugin.GetJob() job = RepositoryUtils.GetJob(job.JobId, True) # invalidates cache @@ -170,6 +189,8 @@ def __main__(deadlinePlugin): openpype_publish_job = \ job.GetJobEnvironmentKeyValue('OPENPYPE_PUBLISH_JOB') or '0' + print("--- Job type - render {}".format(openpype_render_job)) + print("--- Job type - publish {}".format(openpype_publish_job)) if openpype_publish_job == '1' and openpype_render_job == '1': raise RuntimeError("Misconfiguration. Job couldn't be both " + "render and publish.") diff --git a/website/docs/admin_openpype_commands.md b/website/docs/admin_openpype_commands.md index 7a46ee7906..0831cf4f5a 100644 --- a/website/docs/admin_openpype_commands.md +++ b/website/docs/admin_openpype_commands.md @@ -32,7 +32,10 @@ For more information [see here](admin_use#run-openpype). | Command | Description | Arguments | | --- | --- |: --- :| -| tray | Launch OpenPype Tray. | [📑](#tray-arguments) +| contextselection | Open Context selection dialog. | | +| module | Run command line arguments for modules. | | +| repack-version | Tool to re-create version zip. | [📑](#repack-version-arguments) | +| tray | Launch OpenPype Tray. | [📑](#tray-arguments) | eventserver | This should be ideally used by system service (such as systemd or upstart on linux and window service). | [📑](#eventserver-arguments) | | launch | Launch application in Pype environment. | [📑](#launch-arguments) | | publish | Pype takes JSON from provided path and use it to publish data in it. | [📑](#publish-arguments) | @@ -156,4 +159,10 @@ openpypeconsole settings `standalonepublisher` has no command-line arguments. ```shell openpype_console standalonepublisher -``` \ No newline at end of file +``` + +### `repack-version` arguments {#repack-version-arguments} +Takes path to unzipped and possibly modified OpenPype version. Files will be +zipped, checksums recalculated and version will be determined by folder name +(and written to `version.py`). + diff --git a/website/docs/admin_settings_project_anatomy.md b/website/docs/admin_settings_project_anatomy.md index 54023d468f..30784686e2 100644 --- a/website/docs/admin_settings_project_anatomy.md +++ b/website/docs/admin_settings_project_anatomy.md @@ -57,7 +57,9 @@ We have a few required anatomy templates for OpenPype to work properly, however | `project[code]` | Project's code | | `hierarchy` | All hierarchical parents as subfolders | | `asset` | Name of asset or shot | -| `task` | Name of task | +| `task[name]` | Name of task | +| `task[type]` | Type of task | +| `task[short]` | Shortname of task | | `version` | Version number | | `subset` | Subset name | | `family` | Main family name | diff --git a/website/docs/assets/site_sync_always_on.png b/website/docs/assets/site_sync_always_on.png new file mode 100644 index 0000000000..712adf173b Binary files /dev/null and b/website/docs/assets/site_sync_always_on.png differ diff --git a/website/docs/assets/site_sync_system_sites.png b/website/docs/assets/site_sync_system_sites.png new file mode 100644 index 0000000000..e9f895c743 Binary files /dev/null and b/website/docs/assets/site_sync_system_sites.png differ diff --git a/website/docs/module_site_sync.md b/website/docs/module_site_sync.md index b0604ed3cf..571da60ceb 100644 --- a/website/docs/module_site_sync.md +++ b/website/docs/module_site_sync.md @@ -27,6 +27,38 @@ To use synchronization, *Site Sync* needs to be enabled globally in **OpenPype S ![Configure module](assets/site_sync_system.png) +### Sites + +By default there are two sites created for each OpenPype installation: +- **studio** - default site - usually a centralized mounted disk accessible to all artists. Studio site is used if Site Sync is disabled. +- **local** - each workstation or server running OpenPype Tray receives its own with unique site name. Workstation refers to itself as "local"however all other sites will see it under it's unique ID. + +Artists can explore their site ID by opening OpenPype Info tool by clicking on a version number in the tray app. + +Many different sites can be created and configured on the system level, and some or all can be assigned to each project. + +Each OpenPype Tray app works with two sites at one time. (Sites can be the same, and no synching is done in this setup). + +Sites could be configured differently per project basis. + +Each new site needs to be created first in `System Settings`. Most important feature of site is its Provider, select one from already prepared Providers. + +#### Alternative sites + +This attribute is meant for special use cases only. + +One of the use cases is sftp site vendoring (exposing) same data as regular site (studio). Each site is accessible for different audience. 'studio' for artists in a studio via shared disk, 'sftp' for externals via sftp server with mounted 'studio' drive. + +Change of file status on one site actually means same change on 'alternate' site occured too. (eg. artists publish to 'studio', 'sftp' is using +same location >> file is accessible on 'sftp' site right away, no need to sync it anyhow.) + +##### Example +![Configure module](assets/site_sync_system_sites.png) +Admin created new `sftp` site which is handled by `SFTP` provider. Somewhere in the studio SFTP server is deployed on a machine that has access to `studio` drive. + +Alternative sites work both way: +- everything published to `studio` is accessible on a `sftp` site too +- everything published to `sftp` (most probably via artist's local disk - artists publishes locally, representation is marked to be synced to `sftp`. Immediately after it is synced, it is marked to be available on `studio` too for artists in the studio to use.) ## Project Settings @@ -45,21 +77,6 @@ Artists can also override which site they use as active and remote if need be. ![Local overrides](assets/site_sync_local_setting.png) -## Sites - -By default there are two sites created for each OpenPype installation: -- **studio** - default site - usually a centralized mounted disk accessible to all artists. Studio site is used if Site Sync is disabled. -- **local** - each workstation or server running OpenPype Tray receives its own with unique site name. Workstation refers to itself as "local"however all other sites will see it under it's unique ID. - -Artists can explore their site ID by opening OpenPype Info tool by clicking on a version number in the tray app. - -Many different sites can be created and configured on the system level, and some or all can be assigned to each project. - -Each OpenPype Tray app works with two sites at one time. (Sites can be the same, and no synching is done in this setup). - -Sites could be configured differently per project basis. - - ## Providers Each site implements a so called `provider` which handles most common operations (list files, copy files etc.) and provides interface with a particular type of storage. (disk, gdrive, aws, etc.) @@ -140,3 +157,42 @@ Beware that ssh key expects OpenSSH format (`.pem`) not a Putty format (`.ppk`)! If a studio needs to use other services for cloud storage, or want to implement totally different storage providers, they can do so by writing their own provider plugin. We're working on a developer documentation, however, for now we recommend looking at `abstract_provider.py`and `gdrive.py` inside `openpype/modules/sync_server/providers` and using it as a template. +### Running Site Sync in background + +Site Sync server synchronizes new published files from artist machine into configured remote location by default. + +There might be a use case where you need to synchronize between "non-artist" sites, for example between studio site and cloud. In this case +you need to run Site Sync as a background process from a command line (via service etc) 24/7. + +To configure all sites where all published files should be synced eventually you need to configure `project_settings/global/sync_server/config/always_accessible_on` property in Settins (per project) first. + +![Set another non artist remote site](assets/site_sync_always_on.png) + +This is an example of: +- Site Sync is enabled for a project +- default active and remote sites are set to `studio` - eg. standard process: everyone is working in a studio, publishing to shared location etc. +- (but this also allows any of the artists to work remotely, they would change their active site in their own Local Settings to `local` and configure local root. + This would result in everything artist publishes is saved first onto his local folder AND synchronized to `studio` site eventually.) +- everything exported must also be eventually uploaded to `sftp` site + +This eventual synchronization between `studio` and `sftp` sites must be physically handled by background process. + +As current implementation relies heavily on Settings and Local Settings, background process for a specific site ('studio' for example) must be configured via Tray first to `syncserver` command to work. + +To do this: + +- run OP `Tray` with environment variable OPENPYPE_LOCAL_ID set to name of active (source) site. In most use cases it would be studio (for cases of backups of everything published to studio site to different cloud site etc.) +- start `Tray` +- check `Local ID` in information dialog after clicking on version number in the Tray +- open `Local Settings` in the `Tray` +- configure for each project necessary active site and remote site +- close `Tray` +- run OP from a command line with `syncserver` and `--active_site` arguments + + +This is an example how to trigger background synching process where active (source) site is `studio`. +(It is expected that OP is installed on a machine, `openpype_console` is on PATH. If not, add full path to executable. +) +```shell +openpype_console syncserver --active_site studio +``` \ No newline at end of file diff --git a/website/src/css/custom.css b/website/src/css/custom.css index 4f7f8396f6..0a72dc0f23 100644 --- a/website/src/css/custom.css +++ b/website/src/css/custom.css @@ -197,7 +197,7 @@ h5, h6 { font-weight: var(--ifm-font-weight-semibold); } } .showcase .client img { - max-height: 80px; + max-height: 70px; padding: 20px; max-width: 120px; align-self: center; @@ -215,10 +215,10 @@ h5, h6 { font-weight: var(--ifm-font-weight-semibold); } } .showcase .collab img { - max-height: 60px; + max-height: 70px; padding: 20px; align-self: center; - max-width: 200px; + max-width: 160px; } .showcase .pype_logo img{ diff --git a/website/src/pages/index.js b/website/src/pages/index.js index 00cf002aec..29b81e973f 100644 --- a/website/src/pages/index.js +++ b/website/src/pages/index.js @@ -64,6 +64,10 @@ const collab = [ title: 'Clothcat Animation', image: '/img/clothcat.png', infoLink: 'https://www.clothcatanimation.com/' + }, { + title: 'Ellipse Studio', + image: '/img/ellipse-studio.png', + infoLink: 'http://www.dargaudmedia.com' } ]; @@ -125,7 +129,7 @@ const studios = [ title: "Moonrock Animation Studio", image: "/img/moonrock_logo.png", infoLink: "https://www.moonrock.eu/", - } + } ]; function Service({imageUrl, title, description}) { diff --git a/website/static/img/ellipse-studio.png b/website/static/img/ellipse-studio.png new file mode 100644 index 0000000000..c6fd62a6d5 Binary files /dev/null and b/website/static/img/ellipse-studio.png differ