diff --git a/.github/workflows/prerelease.yml b/.github/workflows/prerelease.yml
index 258458e2d4..d9b4d8089c 100644
--- a/.github/workflows/prerelease.yml
+++ b/.github/workflows/prerelease.yml
@@ -43,7 +43,7 @@ jobs:
uses: heinrichreimer/github-changelog-generator-action@v2.2
with:
token: ${{ secrets.ADMIN_TOKEN }}
- addSections: '{"documentation":{"prefix":"### 📖 Documentation","labels":["type: documentation"]},"tests":{"prefix":"### ✅ Testing","labels":["tests"]},"feature":{"prefix":"**🆕 New features**", "labels":["type: feature"]},"breaking":{"prefix":"**💥 Breaking**", "labels":["breaking"]},"enhancements":{"prefix":"**🚀 Enhancements**", "labels":["type: enhancement"]},"bugs":{"prefix":"**🐛 Bug fixes**", "labels":["type: bug"]},"deprecated":{"prefix":"**⚠️ Deprecations**", "labels":["depreciated"]}}'
+ addSections: '{"documentation":{"prefix":"### 📖 Documentation","labels":["type: documentation"]},"tests":{"prefix":"### ✅ Testing","labels":["tests"]},"feature":{"prefix":"**🆕 New features**", "labels":["type: feature"]},"breaking":{"prefix":"**💥 Breaking**", "labels":["breaking"]},"enhancements":{"prefix":"**🚀 Enhancements**", "labels":["type: enhancement"]},"bugs":{"prefix":"**🐛 Bug fixes**", "labels":["type: bug"]},"deprecated":{"prefix":"**⚠️ Deprecations**", "labels":["depreciated"]}, "refactor":{"prefix":"**🔀 Refactored code**", "labels":["refactor"]}}'
issues: false
issuesWoLabels: false
sinceTag: "3.0.0"
diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml
index 3f85525c26..917e6c884c 100644
--- a/.github/workflows/release.yml
+++ b/.github/workflows/release.yml
@@ -39,7 +39,7 @@ jobs:
uses: heinrichreimer/github-changelog-generator-action@v2.2
with:
token: ${{ secrets.ADMIN_TOKEN }}
- addSections: '{"tests":{"prefix":"### ✅ Testing","labels":["tests"]},"feature":{"prefix":"**🆕 New features**", "labels":["type: feature"]},"breaking":{"prefix":"**💥 Breaking**", "labels":["breaking"]},"enhancements":{"prefix":"**🚀 Enhancements**", "labels":["type: enhancement"]},"bugs":{"prefix":"**🐛 Bug fixes**", "labels":["type: bug"]},"deprecated":{"prefix":"**⚠️ Deprecations**", "labels":["depreciated"]},"documentation":{"prefix":"### 📖 Documentation","labels":["type: documentation"]}}'
+ addSections: '{"documentation":{"prefix":"### 📖 Documentation","labels":["type: documentation"]},"tests":{"prefix":"### ✅ Testing","labels":["tests"]},"feature":{"prefix":"**🆕 New features**", "labels":["type: feature"]},"breaking":{"prefix":"**💥 Breaking**", "labels":["breaking"]},"enhancements":{"prefix":"**🚀 Enhancements**", "labels":["type: enhancement"]},"bugs":{"prefix":"**🐛 Bug fixes**", "labels":["type: bug"]},"deprecated":{"prefix":"**⚠️ Deprecations**", "labels":["depreciated"]}, "refactor":{"prefix":"**🔀 Refactored code**", "labels":["refactor"]}}'
issues: false
issuesWoLabels: false
sinceTag: "3.0.0"
@@ -81,7 +81,7 @@ jobs:
uses: heinrichreimer/github-changelog-generator-action@v2.2
with:
token: ${{ secrets.ADMIN_TOKEN }}
- addSections: '{"documentation":{"prefix":"### 📖 Documentation","labels":["type: documentation"]},"tests":{"prefix":"### ✅ Testing","labels":["tests"]},"feature":{"prefix":"**🆕 New features**", "labels":["type: feature"]},"breaking":{"prefix":"**💥 Breaking**", "labels":["breaking"]},"enhancements":{"prefix":"**🚀 Enhancements**", "labels":["type: enhancement"]},"bugs":{"prefix":"**🐛 Bug fixes**", "labels":["type: bug"]},"deprecated":{"prefix":"**⚠️ Deprecations**", "labels":["depreciated"]}}'
+ addSections: '{"documentation":{"prefix":"### 📖 Documentation","labels":["type: documentation"]},"tests":{"prefix":"### ✅ Testing","labels":["tests"]},"feature":{"prefix":"**🆕 New features**", "labels":["type: feature"]},"breaking":{"prefix":"**💥 Breaking**", "labels":["breaking"]},"enhancements":{"prefix":"**🚀 Enhancements**", "labels":["type: enhancement"]},"bugs":{"prefix":"**🐛 Bug fixes**", "labels":["type: bug"]},"deprecated":{"prefix":"**⚠️ Deprecations**", "labels":["depreciated"]}, "refactor":{"prefix":"**🔀 Refactored code**", "labels":["refactor"]}}'
issues: false
issuesWoLabels: false
sinceTag: ${{ steps.version.outputs.last_release }}
diff --git a/.gitmodules b/.gitmodules
index e1b0917e9d..9920ceaad6 100644
--- a/.gitmodules
+++ b/.gitmodules
@@ -1,12 +1,3 @@
[submodule "repos/avalon-core"]
path = repos/avalon-core
- url = https://github.com/pypeclub/avalon-core.git
-[submodule "repos/avalon-unreal-integration"]
- path = repos/avalon-unreal-integration
- url = https://github.com/pypeclub/avalon-unreal-integration.git
-[submodule "openpype/modules/default_modules/ftrack/python2_vendor/arrow"]
- path = openpype/modules/default_modules/ftrack/python2_vendor/arrow
- url = https://github.com/arrow-py/arrow.git
-[submodule "openpype/modules/default_modules/ftrack/python2_vendor/ftrack-python-api"]
- path = openpype/modules/default_modules/ftrack/python2_vendor/ftrack-python-api
- url = https://bitbucket.org/ftrack/ftrack-python-api.git
\ No newline at end of file
+ url = https://github.com/pypeclub/avalon-core.git
\ No newline at end of file
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 3babdceafb..7790894b7f 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,121 +1,89 @@
# Changelog
-## [3.9.0-nightly.3](https://github.com/pypeclub/OpenPype/tree/HEAD)
+## [3.9.1-nightly.1](https://github.com/pypeclub/OpenPype/tree/HEAD)
-[Full Changelog](https://github.com/pypeclub/OpenPype/compare/3.8.2...HEAD)
-
-**Deprecated:**
-
-- Loader: Remove default family states for hosts from code [\#2706](https://github.com/pypeclub/OpenPype/pull/2706)
-
-### 📖 Documentation
-
-- Update docusaurus to latest version [\#2760](https://github.com/pypeclub/OpenPype/pull/2760)
-- documentation: add example to `repack-version` command [\#2669](https://github.com/pypeclub/OpenPype/pull/2669)
-
-**🆕 New features**
-
-- Flame: loading clips to reels [\#2622](https://github.com/pypeclub/OpenPype/pull/2622)
-
-**🚀 Enhancements**
-
-- Pyblish Pype: Remove redundant new line in installed fonts printing [\#2758](https://github.com/pypeclub/OpenPype/pull/2758)
-- Flame: adding validator source clip [\#2746](https://github.com/pypeclub/OpenPype/pull/2746)
-- Work Files: Preserve subversion comment of current filename by default [\#2734](https://github.com/pypeclub/OpenPype/pull/2734)
-- Ftrack: Disable ftrack module by default [\#2732](https://github.com/pypeclub/OpenPype/pull/2732)
-- Project Manager: Disable add task, add asset and save button when not in a project [\#2727](https://github.com/pypeclub/OpenPype/pull/2727)
-- dropbox handle big file [\#2718](https://github.com/pypeclub/OpenPype/pull/2718)
-- Fusion Move PR: Minor tweaks to Fusion integration [\#2716](https://github.com/pypeclub/OpenPype/pull/2716)
-- Nuke: prerender with review knob [\#2691](https://github.com/pypeclub/OpenPype/pull/2691)
-- Maya configurable unit validator [\#2680](https://github.com/pypeclub/OpenPype/pull/2680)
-- General: Add settings for CleanUpFarm and disable the plugin by default [\#2679](https://github.com/pypeclub/OpenPype/pull/2679)
-- Project Manager: Only allow scroll wheel edits when spinbox is active [\#2678](https://github.com/pypeclub/OpenPype/pull/2678)
-- Ftrack: Sync description to assets [\#2670](https://github.com/pypeclub/OpenPype/pull/2670)
-- General: FFmpeg conversion also check attribute string length [\#2635](https://github.com/pypeclub/OpenPype/pull/2635)
-- Global: adding studio name/code to anatomy template formatting data [\#2630](https://github.com/pypeclub/OpenPype/pull/2630)
-- Houdini: Load Arnold .ass procedurals into Houdini [\#2606](https://github.com/pypeclub/OpenPype/pull/2606)
-- Deadline: Simplify GlobalJobPreLoad logic [\#2605](https://github.com/pypeclub/OpenPype/pull/2605)
-- Houdini: Implement Arnold .ass standin extraction from Houdini \(also support .ass.gz\) [\#2603](https://github.com/pypeclub/OpenPype/pull/2603)
+[Full Changelog](https://github.com/pypeclub/OpenPype/compare/3.9.0...HEAD)
**🐛 Bug fixes**
-- Loader UI: Fix right click in representation widget [\#2757](https://github.com/pypeclub/OpenPype/pull/2757)
-- Aftereffects 2022 and Deadline [\#2748](https://github.com/pypeclub/OpenPype/pull/2748)
-- Flame: bunch of bugs [\#2745](https://github.com/pypeclub/OpenPype/pull/2745)
-- Maya: Save current scene on workfile publish [\#2744](https://github.com/pypeclub/OpenPype/pull/2744)
-- Version Up: Preserve parts of filename after version number \(like subversion\) on version\_up [\#2741](https://github.com/pypeclub/OpenPype/pull/2741)
-- Loader UI: Multiple asset selection and underline colors fixed [\#2731](https://github.com/pypeclub/OpenPype/pull/2731)
-- General: Fix loading of unused chars in xml format [\#2729](https://github.com/pypeclub/OpenPype/pull/2729)
-- TVPaint: Set objectName with members [\#2725](https://github.com/pypeclub/OpenPype/pull/2725)
-- General: Don't use 'objectName' from loaded references [\#2715](https://github.com/pypeclub/OpenPype/pull/2715)
-- Settings: Studio Project anatomy is queried using right keys [\#2711](https://github.com/pypeclub/OpenPype/pull/2711)
-- Local Settings: Additional applications don't break UI [\#2710](https://github.com/pypeclub/OpenPype/pull/2710)
-- Houdini: Fix refactor of Houdini host move for CreateArnoldAss [\#2704](https://github.com/pypeclub/OpenPype/pull/2704)
-- LookAssigner: Fix imports after moving code to OpenPype repository [\#2701](https://github.com/pypeclub/OpenPype/pull/2701)
-- Multiple hosts: unify menu style across hosts [\#2693](https://github.com/pypeclub/OpenPype/pull/2693)
-- Maya Redshift fixes [\#2692](https://github.com/pypeclub/OpenPype/pull/2692)
-- Maya: fix fps validation popup [\#2685](https://github.com/pypeclub/OpenPype/pull/2685)
-- Houdini Explicitly collect correct frame name even in case of single frame render when `frameStart` is provided [\#2676](https://github.com/pypeclub/OpenPype/pull/2676)
-- hiero: fix effect collector name and order [\#2673](https://github.com/pypeclub/OpenPype/pull/2673)
-- Maya: Fix menu callbacks [\#2671](https://github.com/pypeclub/OpenPype/pull/2671)
-- Launcher: Fix access to 'data' attribute on actions [\#2659](https://github.com/pypeclub/OpenPype/pull/2659)
-- Houdini: fix usd family in loader and integrators [\#2631](https://github.com/pypeclub/OpenPype/pull/2631)
+- Harmony - fixed creator issue [\#2891](https://github.com/pypeclub/OpenPype/pull/2891)
+- General: Remove forgotten use of avalon Creator [\#2885](https://github.com/pypeclub/OpenPype/pull/2885)
+- General: Avoid circular import [\#2884](https://github.com/pypeclub/OpenPype/pull/2884)
+- Fixes for attaching loaded containers \(\#2837\) [\#2874](https://github.com/pypeclub/OpenPype/pull/2874)
-**Merged pull requests:**
+**🔀 Refactored code**
-- Harmony: Rendering in Deadline didn't work in other machines than submitter [\#2754](https://github.com/pypeclub/OpenPype/pull/2754)
-- Maya: set Deadline job/batch name to original source workfile name instead of published workfile [\#2733](https://github.com/pypeclub/OpenPype/pull/2733)
-- Fusion: Moved implementation into OpenPype [\#2713](https://github.com/pypeclub/OpenPype/pull/2713)
-- TVPaint: Plugin build without dependencies [\#2705](https://github.com/pypeclub/OpenPype/pull/2705)
-- Webpublisher: Photoshop create a beauty png [\#2689](https://github.com/pypeclub/OpenPype/pull/2689)
-- Ftrack: Hierarchical attributes are queried properly [\#2682](https://github.com/pypeclub/OpenPype/pull/2682)
-- Maya: Add Validate Frame Range settings [\#2661](https://github.com/pypeclub/OpenPype/pull/2661)
-- Harmony: move to Openpype [\#2657](https://github.com/pypeclub/OpenPype/pull/2657)
-- General: Show applications without integration in project [\#2656](https://github.com/pypeclub/OpenPype/pull/2656)
-- Maya: cleanup duplicate rendersetup code [\#2642](https://github.com/pypeclub/OpenPype/pull/2642)
+- General: Reduce style usage to OpenPype repository [\#2889](https://github.com/pypeclub/OpenPype/pull/2889)
+
+## [3.9.0](https://github.com/pypeclub/OpenPype/tree/3.9.0) (2022-03-14)
+
+[Full Changelog](https://github.com/pypeclub/OpenPype/compare/CI/3.9.0-nightly.9...3.9.0)
+
+**Deprecated:**
+
+- AssetCreator: Remove the tool [\#2845](https://github.com/pypeclub/OpenPype/pull/2845)
+
+**🚀 Enhancements**
+
+- General: Subset name filtering in ExtractReview outpus [\#2872](https://github.com/pypeclub/OpenPype/pull/2872)
+- NewPublisher: Descriptions and Icons in creator dialog [\#2867](https://github.com/pypeclub/OpenPype/pull/2867)
+- NewPublisher: Changing task on publishing instance [\#2863](https://github.com/pypeclub/OpenPype/pull/2863)
+- TrayPublisher: Choose project widget is more clear [\#2859](https://github.com/pypeclub/OpenPype/pull/2859)
+- New: Validation exceptions [\#2841](https://github.com/pypeclub/OpenPype/pull/2841)
+- Maya: add loaded containers to published instance [\#2837](https://github.com/pypeclub/OpenPype/pull/2837)
+- Ftrack: Can sync fps as string [\#2836](https://github.com/pypeclub/OpenPype/pull/2836)
+- General: Custom function for find executable [\#2822](https://github.com/pypeclub/OpenPype/pull/2822)
+- General: Color dialog UI fixes [\#2817](https://github.com/pypeclub/OpenPype/pull/2817)
+- global: letter box calculated on output as last process [\#2812](https://github.com/pypeclub/OpenPype/pull/2812)
+- Nuke: adding Reformat to baking mov plugin [\#2811](https://github.com/pypeclub/OpenPype/pull/2811)
+- Manager: Update all to latest button [\#2805](https://github.com/pypeclub/OpenPype/pull/2805)
+
+**🐛 Bug fixes**
+
+- General: Missing time function [\#2877](https://github.com/pypeclub/OpenPype/pull/2877)
+- Deadline: Fix plugin name for tile assemble [\#2868](https://github.com/pypeclub/OpenPype/pull/2868)
+- Nuke: gizmo precollect fix [\#2866](https://github.com/pypeclub/OpenPype/pull/2866)
+- General: Fix hardlink for windows [\#2864](https://github.com/pypeclub/OpenPype/pull/2864)
+- General: ffmpeg was crashing on slate merge [\#2860](https://github.com/pypeclub/OpenPype/pull/2860)
+- WebPublisher: Video file was published with one too many frame [\#2858](https://github.com/pypeclub/OpenPype/pull/2858)
+- New Publisher: Error dialog got right styles [\#2857](https://github.com/pypeclub/OpenPype/pull/2857)
+- General: Fix getattr clalback on dynamic modules [\#2855](https://github.com/pypeclub/OpenPype/pull/2855)
+- Nuke: slate resolution to input video resolution [\#2853](https://github.com/pypeclub/OpenPype/pull/2853)
+- WebPublisher: Fix username stored in DB [\#2852](https://github.com/pypeclub/OpenPype/pull/2852)
+- WebPublisher: Fix wrong number of frames for video file [\#2851](https://github.com/pypeclub/OpenPype/pull/2851)
+- Nuke: Fix family test in validate\_write\_legacy to work with stillImage [\#2847](https://github.com/pypeclub/OpenPype/pull/2847)
+- Nuke: fix multiple baking profile farm publishing [\#2842](https://github.com/pypeclub/OpenPype/pull/2842)
+- Blender: Fixed parameters for FBX export of the camera [\#2840](https://github.com/pypeclub/OpenPype/pull/2840)
+- Maya: Stop creation of reviews for Cryptomattes [\#2832](https://github.com/pypeclub/OpenPype/pull/2832)
+- Deadline: Remove recreated event [\#2828](https://github.com/pypeclub/OpenPype/pull/2828)
+- Deadline: Added missing events folder [\#2827](https://github.com/pypeclub/OpenPype/pull/2827)
+- Settings: Missing document with OP versions may break start of OpenPype [\#2825](https://github.com/pypeclub/OpenPype/pull/2825)
+- Deadline: more detailed temp file name for environment json [\#2824](https://github.com/pypeclub/OpenPype/pull/2824)
+- General: Host name was formed from obsolete code [\#2821](https://github.com/pypeclub/OpenPype/pull/2821)
+- Settings UI: Fix "Apply from" action [\#2820](https://github.com/pypeclub/OpenPype/pull/2820)
+- Ftrack: Job killer with missing user [\#2819](https://github.com/pypeclub/OpenPype/pull/2819)
+- Nuke: Use AVALON\_APP to get value for "app" key [\#2818](https://github.com/pypeclub/OpenPype/pull/2818)
+- StandalonePublisher: use dynamic groups in subset names [\#2816](https://github.com/pypeclub/OpenPype/pull/2816)
+
+**🔀 Refactored code**
+
+- Refactor: move webserver tool to openpype [\#2876](https://github.com/pypeclub/OpenPype/pull/2876)
+- General: Move create logic from avalon to OpenPype [\#2854](https://github.com/pypeclub/OpenPype/pull/2854)
+- General: Add vendors from avalon [\#2848](https://github.com/pypeclub/OpenPype/pull/2848)
+- General: Basic event system [\#2846](https://github.com/pypeclub/OpenPype/pull/2846)
+- General: Move change context functions [\#2839](https://github.com/pypeclub/OpenPype/pull/2839)
+- Tools: Don't use avalon tools code [\#2829](https://github.com/pypeclub/OpenPype/pull/2829)
+- Move Unreal Implementation to OpenPype [\#2823](https://github.com/pypeclub/OpenPype/pull/2823)
+- General: Extract template formatting from anatomy [\#2766](https://github.com/pypeclub/OpenPype/pull/2766)
## [3.8.2](https://github.com/pypeclub/OpenPype/tree/3.8.2) (2022-02-07)
[Full Changelog](https://github.com/pypeclub/OpenPype/compare/CI/3.8.2-nightly.3...3.8.2)
-### 📖 Documentation
-
-- Cosmetics: Fix common typos in openpype/website [\#2617](https://github.com/pypeclub/OpenPype/pull/2617)
-
-**🚀 Enhancements**
-
-- General: Project backup tools [\#2629](https://github.com/pypeclub/OpenPype/pull/2629)
-- nuke: adding clear button to write nodes [\#2627](https://github.com/pypeclub/OpenPype/pull/2627)
-- Ftrack: Family to Asset type mapping is in settings [\#2602](https://github.com/pypeclub/OpenPype/pull/2602)
-
-**🐛 Bug fixes**
-
-- Fix pulling of cx\_freeze 6.10 [\#2628](https://github.com/pypeclub/OpenPype/pull/2628)
-
-**Merged pull requests:**
-
-- Docker: enhance dockerfiles with metadata, fix pyenv initialization [\#2647](https://github.com/pypeclub/OpenPype/pull/2647)
-- WebPublisher: fix instance duplicates [\#2641](https://github.com/pypeclub/OpenPype/pull/2641)
-- Fix - safer pulling of task name for webpublishing from PS [\#2613](https://github.com/pypeclub/OpenPype/pull/2613)
-
## [3.8.1](https://github.com/pypeclub/OpenPype/tree/3.8.1) (2022-02-01)
[Full Changelog](https://github.com/pypeclub/OpenPype/compare/CI/3.8.1-nightly.3...3.8.1)
-**🚀 Enhancements**
-
-- Webpublisher: Thumbnail extractor [\#2600](https://github.com/pypeclub/OpenPype/pull/2600)
-
-**🐛 Bug fixes**
-
-- Release/3.8.0 [\#2619](https://github.com/pypeclub/OpenPype/pull/2619)
-- hotfix: OIIO tool path - add extension on windows [\#2618](https://github.com/pypeclub/OpenPype/pull/2618)
-- Settings: Enum does not store empty string if has single item to select [\#2615](https://github.com/pypeclub/OpenPype/pull/2615)
-
-**Merged pull requests:**
-
-- Bump pillow from 8.4.0 to 9.0.0 [\#2595](https://github.com/pypeclub/OpenPype/pull/2595)
-
## [3.8.0](https://github.com/pypeclub/OpenPype/tree/3.8.0) (2022-01-24)
[Full Changelog](https://github.com/pypeclub/OpenPype/compare/CI/3.8.0-nightly.7...3.8.0)
diff --git a/openpype/__init__.py b/openpype/__init__.py
index 11b563ebfe..99629a4257 100644
--- a/openpype/__init__.py
+++ b/openpype/__init__.py
@@ -10,7 +10,8 @@ from .lib import (
Anatomy,
filter_pyblish_plugins,
set_plugin_attributes_from_settings,
- change_timer_to_current_context
+ change_timer_to_current_context,
+ register_event_callback,
)
pyblish = avalon = _original_discover = None
@@ -58,10 +59,15 @@ def patched_discover(superclass):
"""
# run original discover and get plugins
plugins = _original_discover(superclass)
+ filtered_plugins = [
+ plugin
+ for plugin in plugins
+ if issubclass(plugin, superclass)
+ ]
- set_plugin_attributes_from_settings(plugins, superclass)
+ set_plugin_attributes_from_settings(filtered_plugins, superclass)
- return plugins
+ return filtered_plugins
@import_wrapper
@@ -69,6 +75,10 @@ def install():
"""Install Pype to Avalon."""
from pyblish.lib import MessageHandler
from openpype.modules import load_modules
+ from openpype.pipeline import (
+ LegacyCreator,
+ register_loader_plugin_path,
+ )
from avalon import pipeline
# Make sure modules are loaded
@@ -84,7 +94,7 @@ def install():
log.info("Registering global plug-ins..")
pyblish.register_plugin_path(PUBLISH_PATH)
pyblish.register_discovery_filter(filter_pyblish_plugins)
- avalon.register_plugin_path(avalon.Loader, LOAD_PATH)
+ register_loader_plugin_path(LOAD_PATH)
project_name = os.environ.get("AVALON_PROJECT")
@@ -112,8 +122,8 @@ def install():
continue
pyblish.register_plugin_path(path)
- avalon.register_plugin_path(avalon.Loader, path)
- avalon.register_plugin_path(avalon.Creator, path)
+ register_loader_plugin_path(path)
+ avalon.register_plugin_path(LegacyCreator, path)
avalon.register_plugin_path(avalon.InventoryAction, path)
# apply monkey patched discover to original one
@@ -122,20 +132,22 @@ def install():
avalon.discover = patched_discover
pipeline.discover = patched_discover
- avalon.on("taskChanged", _on_task_change)
+ register_event_callback("taskChanged", _on_task_change)
-def _on_task_change(*args):
+def _on_task_change():
change_timer_to_current_context()
@import_wrapper
def uninstall():
"""Uninstall Pype from Avalon."""
+ from openpype.pipeline import deregister_loader_plugin_path
+
log.info("Deregistering global plug-ins..")
pyblish.deregister_plugin_path(PUBLISH_PATH)
pyblish.deregister_discovery_filter(filter_pyblish_plugins)
- avalon.deregister_plugin_path(avalon.Loader, LOAD_PATH)
+ deregister_loader_plugin_path(LOAD_PATH)
log.info("Global plug-ins unregistred")
# restore original discover
diff --git a/openpype/api.py b/openpype/api.py
index 51854492ab..b692b36065 100644
--- a/openpype/api.py
+++ b/openpype/api.py
@@ -45,9 +45,6 @@ from .lib.avalon_context import (
from . import resources
from .plugin import (
- PypeCreatorMixin,
- Creator,
-
Extractor,
ValidatePipelineOrder,
@@ -89,9 +86,6 @@ __all__ = [
# Resources
"resources",
- # Pype creator mixin
- "PypeCreatorMixin",
- "Creator",
# plugin classes
"Extractor",
# ordering
diff --git a/openpype/cli.py b/openpype/cli.py
index 0597c387d0..155e07dea3 100644
--- a/openpype/cli.py
+++ b/openpype/cli.py
@@ -42,6 +42,12 @@ def standalonepublisher():
PypeCommands().launch_standalone_publisher()
+@main.command()
+def traypublisher():
+ """Show new OpenPype Standalone publisher UI."""
+ PypeCommands().launch_traypublisher()
+
+
@main.command()
@click.option("-d", "--debug",
is_flag=True, help=("Run pype tray in debug mode"))
@@ -371,10 +377,15 @@ def run(script):
"--app_variant",
help="Provide specific app variant for test, empty for latest",
default=None)
-def runtests(folder, mark, pyargs, test_data_folder, persist, app_variant):
+@click.option("-t",
+ "--timeout",
+ help="Provide specific timeout value for test case",
+ default=None)
+def runtests(folder, mark, pyargs, test_data_folder, persist, app_variant,
+ timeout):
"""Run all automatic tests after proper initialization via start.py"""
PypeCommands().run_tests(folder, mark, pyargs, test_data_folder,
- persist, app_variant)
+ persist, app_variant, timeout)
@main.command()
diff --git a/openpype/hooks/pre_add_last_workfile_arg.py b/openpype/hooks/pre_add_last_workfile_arg.py
index eb9e6a6b1c..3609620917 100644
--- a/openpype/hooks/pre_add_last_workfile_arg.py
+++ b/openpype/hooks/pre_add_last_workfile_arg.py
@@ -17,12 +17,13 @@ class AddLastWorkfileToLaunchArgs(PreLaunchHook):
"nuke",
"nukex",
"hiero",
+ "houdini",
"nukestudio",
"fusion",
"blender",
"photoshop",
"tvpaint",
- "afftereffects"
+ "aftereffects"
]
def execute(self):
diff --git a/openpype/hooks/pre_copy_template_workfile.py b/openpype/hooks/pre_copy_template_workfile.py
index 5c56d721e8..dffac22ee2 100644
--- a/openpype/hooks/pre_copy_template_workfile.py
+++ b/openpype/hooks/pre_copy_template_workfile.py
@@ -19,7 +19,7 @@ class CopyTemplateWorkfile(PreLaunchHook):
# Before `AddLastWorkfileToLaunchArgs`
order = 0
- app_groups = ["blender", "photoshop", "tvpaint", "afftereffects"]
+ app_groups = ["blender", "photoshop", "tvpaint", "aftereffects"]
def execute(self):
"""Check if can copy template for context and do it if possible.
@@ -44,7 +44,7 @@ class CopyTemplateWorkfile(PreLaunchHook):
return
if os.path.exists(last_workfile):
- self.log.debug("Last workfile exits. Skipping {} process.".format(
+ self.log.debug("Last workfile exists. Skipping {} process.".format(
self.__class__.__name__
))
return
@@ -120,7 +120,7 @@ class CopyTemplateWorkfile(PreLaunchHook):
f"Creating workfile from template: \"{template_path}\""
)
- # Copy template workfile to new destinantion
+ # Copy template workfile to new destination
shutil.copy2(
os.path.normpath(template_path),
os.path.normpath(last_workfile)
diff --git a/openpype/hooks/pre_global_host_data.py b/openpype/hooks/pre_global_host_data.py
index bae967e25f..4c85a511ed 100644
--- a/openpype/hooks/pre_global_host_data.py
+++ b/openpype/hooks/pre_global_host_data.py
@@ -2,7 +2,7 @@ from openpype.api import Anatomy
from openpype.lib import (
PreLaunchHook,
EnvironmentPrepData,
- prepare_host_environments,
+ prepare_app_environments,
prepare_context_environments
)
@@ -14,14 +14,6 @@ class GlobalHostDataHook(PreLaunchHook):
def execute(self):
"""Prepare global objects to `data` that will be used for sure."""
- if not self.application.is_host:
- self.log.info(
- "Skipped hook {}. Application is not marked as host.".format(
- self.__class__.__name__
- )
- )
- return
-
self.prepare_global_data()
if not self.data.get("asset_doc"):
@@ -49,7 +41,7 @@ class GlobalHostDataHook(PreLaunchHook):
"log": self.log
})
- prepare_host_environments(temp_data, self.launch_context.env_group)
+ prepare_app_environments(temp_data, self.launch_context.env_group)
prepare_context_environments(temp_data)
temp_data.pop("log")
diff --git a/openpype/hosts/aftereffects/api/launch_logic.py b/openpype/hosts/aftereffects/api/launch_logic.py
index 97f14c9332..c549268978 100644
--- a/openpype/hosts/aftereffects/api/launch_logic.py
+++ b/openpype/hosts/aftereffects/api/launch_logic.py
@@ -15,7 +15,7 @@ from Qt import QtCore
from openpype.tools.utils import host_tools
from avalon import api
-from avalon.tools.webserver.app import WebServerTool
+from openpype.tools.adobe_webserver.app import WebServerTool
from .ws_stub import AfterEffectsServerStub
diff --git a/openpype/hosts/aftereffects/api/pipeline.py b/openpype/hosts/aftereffects/api/pipeline.py
index 94f1e3d105..681f1c51a7 100644
--- a/openpype/hosts/aftereffects/api/pipeline.py
+++ b/openpype/hosts/aftereffects/api/pipeline.py
@@ -9,7 +9,13 @@ from avalon import io, pipeline
from openpype import lib
from openpype.api import Logger
+from openpype.pipeline import (
+ LegacyCreator,
+ register_loader_plugin_path,
+ deregister_loader_plugin_path,
+)
import openpype.hosts.aftereffects
+from openpype.lib import register_event_callback
from .launch_logic import get_stub
@@ -65,21 +71,21 @@ def install():
pyblish.api.register_host("aftereffects")
pyblish.api.register_plugin_path(PUBLISH_PATH)
- avalon.api.register_plugin_path(avalon.api.Loader, LOAD_PATH)
- avalon.api.register_plugin_path(avalon.api.Creator, CREATE_PATH)
+ register_loader_plugin_path(LOAD_PATH)
+ avalon.api.register_plugin_path(LegacyCreator, CREATE_PATH)
log.info(PUBLISH_PATH)
pyblish.api.register_callback(
"instanceToggled", on_pyblish_instance_toggled
)
- avalon.api.on("application.launched", application_launch)
+ register_event_callback("application.launched", application_launch)
def uninstall():
pyblish.api.deregister_plugin_path(PUBLISH_PATH)
- avalon.api.deregister_plugin_path(avalon.api.Loader, LOAD_PATH)
- avalon.api.deregister_plugin_path(avalon.api.Creator, CREATE_PATH)
+ deregister_loader_plugin_path(LOAD_PATH)
+ avalon.api.deregister_plugin_path(LegacyCreator, CREATE_PATH)
def on_pyblish_instance_toggled(instance, old_value, new_value):
diff --git a/openpype/hosts/aftereffects/api/plugin.py b/openpype/hosts/aftereffects/api/plugin.py
index fbe07663dd..29705cc5be 100644
--- a/openpype/hosts/aftereffects/api/plugin.py
+++ b/openpype/hosts/aftereffects/api/plugin.py
@@ -1,9 +1,8 @@
-import avalon.api
+from openpype.pipeline import LoaderPlugin
from .launch_logic import get_stub
-class AfterEffectsLoader(avalon.api.Loader):
+class AfterEffectsLoader(LoaderPlugin):
@staticmethod
def get_stub():
return get_stub()
-
diff --git a/openpype/hosts/aftereffects/api/ws_stub.py b/openpype/hosts/aftereffects/api/ws_stub.py
index 5a0600e92e..b0893310c1 100644
--- a/openpype/hosts/aftereffects/api/ws_stub.py
+++ b/openpype/hosts/aftereffects/api/ws_stub.py
@@ -8,7 +8,7 @@ import logging
import attr
from wsrpc_aiohttp import WebSocketAsync
-from avalon.tools.webserver.app import WebServerTool
+from openpype.tools.adobe_webserver.app import WebServerTool
@attr.s
diff --git a/openpype/hosts/aftereffects/plugins/create/create_render.py b/openpype/hosts/aftereffects/plugins/create/create_render.py
index 8dfc85cdc8..41efb4b0ba 100644
--- a/openpype/hosts/aftereffects/plugins/create/create_render.py
+++ b/openpype/hosts/aftereffects/plugins/create/create_render.py
@@ -1,13 +1,12 @@
-from avalon.api import CreatorError
-
-import openpype.api
+from openpype.pipeline import create
+from openpype.pipeline import CreatorError
from openpype.hosts.aftereffects.api import (
get_stub,
list_instances
)
-class CreateRender(openpype.api.Creator):
+class CreateRender(create.LegacyCreator):
"""Render folder for publish.
Creates subsets in format 'familyTaskSubsetname',
diff --git a/openpype/hosts/aftereffects/plugins/load/load_background.py b/openpype/hosts/aftereffects/plugins/load/load_background.py
index 1a2d6fc432..be43cae44e 100644
--- a/openpype/hosts/aftereffects/plugins/load/load_background.py
+++ b/openpype/hosts/aftereffects/plugins/load/load_background.py
@@ -1,11 +1,10 @@
import re
-import avalon.api
-
from openpype.lib import (
get_background_layers,
get_unique_layer_name
)
+from openpype.pipeline import get_representation_path
from openpype.hosts.aftereffects.api import (
AfterEffectsLoader,
containerise
@@ -78,7 +77,7 @@ class BackgroundLoader(AfterEffectsLoader):
else: # switching version - keep same name
comp_name = container["namespace"]
- path = avalon.api.get_representation_path(representation)
+ path = get_representation_path(representation)
layers = get_background_layers(path)
comp = stub.reload_background(container["members"][1],
diff --git a/openpype/hosts/aftereffects/plugins/load/load_file.py b/openpype/hosts/aftereffects/plugins/load/load_file.py
index 9dbbf7aae1..9eb9e80a2c 100644
--- a/openpype/hosts/aftereffects/plugins/load/load_file.py
+++ b/openpype/hosts/aftereffects/plugins/load/load_file.py
@@ -1,8 +1,8 @@
import re
-import avalon.api
from openpype import lib
+from openpype.pipeline import get_representation_path
from openpype.hosts.aftereffects.api import (
AfterEffectsLoader,
containerise
@@ -92,7 +92,7 @@ class FileLoader(AfterEffectsLoader):
"{}_{}".format(context["asset"], context["subset"]))
else: # switching version - keep same name
layer_name = container["namespace"]
- path = avalon.api.get_representation_path(representation)
+ path = get_representation_path(representation)
# with aftereffects.maintained_selection(): # TODO
stub.replace_item(layer.id, path, stub.LOADED_ICON + layer_name)
stub.imprint(
diff --git a/openpype/hosts/aftereffects/plugins/publish/help/validate_instance_asset.xml b/openpype/hosts/aftereffects/plugins/publish/help/validate_instance_asset.xml
new file mode 100644
index 0000000000..13f03a9b9a
--- /dev/null
+++ b/openpype/hosts/aftereffects/plugins/publish/help/validate_instance_asset.xml
@@ -0,0 +1,21 @@
+
+
+
+Subset context
+
+## Invalid subset context
+
+Context of the given subset doesn't match your current scene.
+
+### How to repair?
+
+You can fix this with "repair" button on the right.
+
+
+### __Detailed Info__ (optional)
+
+This might happen if you are reuse old workfile and open it in different context.
+(Eg. you created subset "renderCompositingDefault" from asset "Robot' in "your_project_Robot_compositing.aep", now you opened this workfile in a context "Sloth" but existing subset for "Robot" asset stayed in the workfile.)
+
+
+
\ No newline at end of file
diff --git a/openpype/hosts/aftereffects/plugins/publish/help/validate_scene_settings.xml b/openpype/hosts/aftereffects/plugins/publish/help/validate_scene_settings.xml
new file mode 100644
index 0000000000..36fa90456e
--- /dev/null
+++ b/openpype/hosts/aftereffects/plugins/publish/help/validate_scene_settings.xml
@@ -0,0 +1,35 @@
+
+
+
+Scene setting
+
+## Invalid scene setting found
+
+One of the settings in a scene doesn't match to asset settings in database.
+
+{invalid_setting_str}
+
+### How to repair?
+
+Change values for {invalid_keys_str} in the scene OR change them in the asset database if they are wrong there.
+
+
+### __Detailed Info__ (optional)
+
+This error is shown when for example resolution in the scene doesn't match to resolution set on the asset in the database.
+Either value in the database or in the scene is wrong.
+
+
+
+Scene file doesn't exist
+
+## Scene file doesn't exist
+
+Collected scene {scene_url} doesn't exist.
+
+### How to repair?
+
+Re-save file, start publish from the beginning again.
+
+
+
\ No newline at end of file
diff --git a/openpype/hosts/aftereffects/plugins/publish/validate_instance_asset.py b/openpype/hosts/aftereffects/plugins/publish/validate_instance_asset.py
index 71c1750457..37cecfbcc4 100644
--- a/openpype/hosts/aftereffects/plugins/publish/validate_instance_asset.py
+++ b/openpype/hosts/aftereffects/plugins/publish/validate_instance_asset.py
@@ -1,6 +1,7 @@
from avalon import api
import pyblish.api
import openpype.api
+from openpype.pipeline import PublishXmlValidationError
from openpype.hosts.aftereffects.api import get_stub
@@ -53,9 +54,8 @@ class ValidateInstanceAsset(pyblish.api.InstancePlugin):
current_asset = api.Session["AVALON_ASSET"]
msg = (
f"Instance asset {instance_asset} is not the same "
- f"as current context {current_asset}. PLEASE DO:\n"
- f"Repair with 'A' action to use '{current_asset}'.\n"
- f"If that's not correct value, close workfile and "
- f"reopen via Workfiles!"
+ f"as current context {current_asset}."
)
- assert instance_asset == current_asset, msg
+
+ if instance_asset != current_asset:
+ raise PublishXmlValidationError(self, msg)
diff --git a/openpype/hosts/aftereffects/plugins/publish/validate_scene_settings.py b/openpype/hosts/aftereffects/plugins/publish/validate_scene_settings.py
index 5ae391e230..273ccd295e 100644
--- a/openpype/hosts/aftereffects/plugins/publish/validate_scene_settings.py
+++ b/openpype/hosts/aftereffects/plugins/publish/validate_scene_settings.py
@@ -5,6 +5,7 @@ import re
import pyblish.api
+from openpype.pipeline import PublishXmlValidationError
from openpype.hosts.aftereffects.api import get_asset_settings
@@ -99,12 +100,14 @@ class ValidateSceneSettings(pyblish.api.InstancePlugin):
self.log.info("current_settings:: {}".format(current_settings))
invalid_settings = []
+ invalid_keys = set()
for key, value in expected_settings.items():
if value != current_settings[key]:
invalid_settings.append(
"{} expected: {} found: {}".format(key, value,
current_settings[key])
)
+ invalid_keys.add(key)
if ((expected_settings.get("handleStart")
or expected_settings.get("handleEnd"))
@@ -116,7 +119,27 @@ class ValidateSceneSettings(pyblish.api.InstancePlugin):
msg = "Found invalid settings:\n{}".format(
"\n".join(invalid_settings)
)
- assert not invalid_settings, msg
- assert os.path.exists(instance.data.get("source")), (
- "Scene file not found (saved under wrong name)"
- )
+
+ if invalid_settings:
+ invalid_keys_str = ",".join(invalid_keys)
+ break_str = " "
+ invalid_setting_str = "Found invalid settings: {}".\
+ format(break_str.join(invalid_settings))
+
+ formatting_data = {
+ "invalid_setting_str": invalid_setting_str,
+ "invalid_keys_str": invalid_keys_str
+ }
+ raise PublishXmlValidationError(self, msg,
+ formatting_data=formatting_data)
+
+ if not os.path.exists(instance.data.get("source")):
+ scene_url = instance.data.get("source")
+ msg = "Scene file {} not found (saved under wrong name)".format(
+ scene_url
+ )
+ formatting_data = {
+ "scene_url": scene_url
+ }
+ raise PublishXmlValidationError(self, msg, key="file_not_found",
+ formatting_data=formatting_data)
diff --git a/openpype/hosts/blender/api/pipeline.py b/openpype/hosts/blender/api/pipeline.py
index 0e5104fea9..07a7509dd7 100644
--- a/openpype/hosts/blender/api/pipeline.py
+++ b/openpype/hosts/blender/api/pipeline.py
@@ -14,7 +14,16 @@ import avalon.api
from avalon import io, schema
from avalon.pipeline import AVALON_CONTAINER_ID
+from openpype.pipeline import (
+ LegacyCreator,
+ register_loader_plugin_path,
+ deregister_loader_plugin_path,
+)
from openpype.api import Logger
+from openpype.lib import (
+ register_event_callback,
+ emit_event
+)
import openpype.hosts.blender
HOST_DIR = os.path.dirname(os.path.abspath(openpype.hosts.blender.__file__))
@@ -45,13 +54,14 @@ def install():
pyblish.api.register_host("blender")
pyblish.api.register_plugin_path(str(PUBLISH_PATH))
- avalon.api.register_plugin_path(avalon.api.Loader, str(LOAD_PATH))
- avalon.api.register_plugin_path(avalon.api.Creator, str(CREATE_PATH))
+ register_loader_plugin_path(str(LOAD_PATH))
+ avalon.api.register_plugin_path(LegacyCreator, str(CREATE_PATH))
lib.append_user_scripts()
- avalon.api.on("new", on_new)
- avalon.api.on("open", on_open)
+ register_event_callback("new", on_new)
+ register_event_callback("open", on_open)
+
_register_callbacks()
_register_events()
@@ -66,8 +76,8 @@ def uninstall():
pyblish.api.deregister_host("blender")
pyblish.api.deregister_plugin_path(str(PUBLISH_PATH))
- avalon.api.deregister_plugin_path(avalon.api.Loader, str(LOAD_PATH))
- avalon.api.deregister_plugin_path(avalon.api.Creator, str(CREATE_PATH))
+ deregister_loader_plugin_path(str(LOAD_PATH))
+ avalon.api.deregister_plugin_path(LegacyCreator, str(CREATE_PATH))
if not IS_HEADLESS:
ops.unregister()
@@ -113,22 +123,22 @@ def set_start_end_frames():
scene.render.resolution_y = resolution_y
-def on_new(arg1, arg2):
+def on_new():
set_start_end_frames()
-def on_open(arg1, arg2):
+def on_open():
set_start_end_frames()
@bpy.app.handlers.persistent
def _on_save_pre(*args):
- avalon.api.emit("before_save", args)
+ emit_event("before.save")
@bpy.app.handlers.persistent
def _on_save_post(*args):
- avalon.api.emit("save", args)
+ emit_event("save")
@bpy.app.handlers.persistent
@@ -136,9 +146,9 @@ def _on_load_post(*args):
# Detect new file or opening an existing file
if bpy.data.filepath:
# Likely this was an open operation since it has a filepath
- avalon.api.emit("open", args)
+ emit_event("open")
else:
- avalon.api.emit("new", args)
+ emit_event("new")
ops.OpenFileCacher.post_load()
@@ -169,7 +179,7 @@ def _register_callbacks():
log.info("Installed event handler _on_load_post...")
-def _on_task_changed(*args):
+def _on_task_changed():
"""Callback for when the task in the context is changed."""
# TODO (jasper): Blender has no concept of projects or workspace.
@@ -186,7 +196,7 @@ def _on_task_changed(*args):
def _register_events():
"""Install callbacks for specific events."""
- avalon.api.on("taskChanged", _on_task_changed)
+ register_event_callback("taskChanged", _on_task_changed)
log.info("Installed event callback for 'taskChanged'...")
@@ -202,13 +212,10 @@ def reload_pipeline(*args):
avalon.api.uninstall()
for module in (
- "avalon.io",
- "avalon.lib",
- "avalon.pipeline",
- "avalon.tools.creator.app",
- "avalon.tools.manager.app",
- "avalon.api",
- "avalon.tools",
+ "avalon.io",
+ "avalon.lib",
+ "avalon.pipeline",
+ "avalon.api",
):
module = importlib.import_module(module)
importlib.reload(module)
diff --git a/openpype/hosts/blender/api/plugin.py b/openpype/hosts/blender/api/plugin.py
index 8c9ab9a27f..3207f543b7 100644
--- a/openpype/hosts/blender/api/plugin.py
+++ b/openpype/hosts/blender/api/plugin.py
@@ -5,8 +5,10 @@ from typing import Dict, List, Optional
import bpy
-import avalon.api
-from openpype.api import PypeCreatorMixin
+from openpype.pipeline import (
+ LegacyCreator,
+ LoaderPlugin,
+)
from .pipeline import AVALON_CONTAINERS
from .ops import (
MainThreadItem,
@@ -129,7 +131,7 @@ def deselect_all():
bpy.context.view_layer.objects.active = active
-class Creator(PypeCreatorMixin, avalon.api.Creator):
+class Creator(LegacyCreator):
"""Base class for Creator plug-ins."""
defaults = ['Main']
@@ -145,13 +147,13 @@ class Creator(PypeCreatorMixin, avalon.api.Creator):
return collection
-class Loader(avalon.api.Loader):
+class Loader(LoaderPlugin):
"""Base class for Loader plug-ins."""
hosts = ["blender"]
-class AssetLoader(avalon.api.Loader):
+class AssetLoader(LoaderPlugin):
"""A basic AssetLoader for Blender
This will implement the basic logic for linking/appending assets
diff --git a/openpype/hosts/blender/plugins/load/load_abc.py b/openpype/hosts/blender/plugins/load/load_abc.py
index 07800521c9..3daaeceffe 100644
--- a/openpype/hosts/blender/plugins/load/load_abc.py
+++ b/openpype/hosts/blender/plugins/load/load_abc.py
@@ -6,7 +6,7 @@ from typing import Dict, List, Optional
import bpy
-from avalon import api
+from openpype.pipeline import get_representation_path
from openpype.hosts.blender.api.pipeline import (
AVALON_CONTAINERS,
AVALON_PROPERTY,
@@ -178,7 +178,7 @@ class CacheModelLoader(plugin.AssetLoader):
"""
object_name = container["objectName"]
asset_group = bpy.data.objects.get(object_name)
- libpath = Path(api.get_representation_path(representation))
+ libpath = Path(get_representation_path(representation))
extension = libpath.suffix.lower()
self.log.info(
diff --git a/openpype/hosts/blender/plugins/load/load_action.py b/openpype/hosts/blender/plugins/load/load_action.py
index a9d8522220..3c8fe988f0 100644
--- a/openpype/hosts/blender/plugins/load/load_action.py
+++ b/openpype/hosts/blender/plugins/load/load_action.py
@@ -5,9 +5,13 @@ from pathlib import Path
from pprint import pformat
from typing import Dict, List, Optional
-from avalon import api, blender
import bpy
+from openpype.pipeline import get_representation_path
import openpype.hosts.blender.api.plugin
+from openpype.hosts.blender.api.pipeline import (
+ containerise_existing,
+ AVALON_PROPERTY,
+)
logger = logging.getLogger("openpype").getChild("blender").getChild("load_action")
@@ -49,7 +53,7 @@ class BlendActionLoader(openpype.hosts.blender.api.plugin.AssetLoader):
container = bpy.data.collections.new(lib_container)
container.name = container_name
- blender.pipeline.containerise_existing(
+ containerise_existing(
container,
name,
namespace,
@@ -57,8 +61,7 @@ class BlendActionLoader(openpype.hosts.blender.api.plugin.AssetLoader):
self.__class__.__name__,
)
- container_metadata = container.get(
- blender.pipeline.AVALON_PROPERTY)
+ container_metadata = container.get(AVALON_PROPERTY)
container_metadata["libpath"] = libpath
container_metadata["lib_container"] = lib_container
@@ -90,16 +93,16 @@ class BlendActionLoader(openpype.hosts.blender.api.plugin.AssetLoader):
anim_data.action.make_local()
- if not obj.get(blender.pipeline.AVALON_PROPERTY):
+ if not obj.get(AVALON_PROPERTY):
- obj[blender.pipeline.AVALON_PROPERTY] = dict()
+ obj[AVALON_PROPERTY] = dict()
- avalon_info = obj[blender.pipeline.AVALON_PROPERTY]
+ avalon_info = obj[AVALON_PROPERTY]
avalon_info.update({"container_name": container_name})
objects_list.append(obj)
- animation_container.pop(blender.pipeline.AVALON_PROPERTY)
+ animation_container.pop(AVALON_PROPERTY)
# Save the list of objects in the metadata container
container_metadata["objects"] = objects_list
@@ -128,7 +131,7 @@ class BlendActionLoader(openpype.hosts.blender.api.plugin.AssetLoader):
container["objectName"]
)
- libpath = Path(api.get_representation_path(representation))
+ libpath = Path(get_representation_path(representation))
extension = libpath.suffix.lower()
logger.info(
@@ -153,8 +156,7 @@ class BlendActionLoader(openpype.hosts.blender.api.plugin.AssetLoader):
f"Unsupported file: {libpath}"
)
- collection_metadata = collection.get(
- blender.pipeline.AVALON_PROPERTY)
+ collection_metadata = collection.get(AVALON_PROPERTY)
collection_libpath = collection_metadata["libpath"]
normalized_collection_libpath = (
@@ -225,16 +227,16 @@ class BlendActionLoader(openpype.hosts.blender.api.plugin.AssetLoader):
strip.action = anim_data.action
strip.action_frame_end = anim_data.action.frame_range[1]
- if not obj.get(blender.pipeline.AVALON_PROPERTY):
+ if not obj.get(AVALON_PROPERTY):
- obj[blender.pipeline.AVALON_PROPERTY] = dict()
+ obj[AVALON_PROPERTY] = dict()
- avalon_info = obj[blender.pipeline.AVALON_PROPERTY]
+ avalon_info = obj[AVALON_PROPERTY]
avalon_info.update({"container_name": collection.name})
objects_list.append(obj)
- anim_container.pop(blender.pipeline.AVALON_PROPERTY)
+ anim_container.pop(AVALON_PROPERTY)
# Save the list of objects in the metadata container
collection_metadata["objects"] = objects_list
@@ -266,8 +268,7 @@ class BlendActionLoader(openpype.hosts.blender.api.plugin.AssetLoader):
"Nested collections are not supported."
)
- collection_metadata = collection.get(
- blender.pipeline.AVALON_PROPERTY)
+ collection_metadata = collection.get(AVALON_PROPERTY)
objects = collection_metadata["objects"]
lib_container = collection_metadata["lib_container"]
diff --git a/openpype/hosts/blender/plugins/load/load_audio.py b/openpype/hosts/blender/plugins/load/load_audio.py
index e065150c15..b95c5db270 100644
--- a/openpype/hosts/blender/plugins/load/load_audio.py
+++ b/openpype/hosts/blender/plugins/load/load_audio.py
@@ -6,7 +6,7 @@ from typing import Dict, List, Optional
import bpy
-from avalon import api
+from openpype.pipeline import get_representation_path
from openpype.hosts.blender.api import plugin
from openpype.hosts.blender.api.pipeline import (
AVALON_CONTAINERS,
@@ -102,7 +102,7 @@ class AudioLoader(plugin.AssetLoader):
"""
object_name = container["objectName"]
asset_group = bpy.data.objects.get(object_name)
- libpath = Path(api.get_representation_path(representation))
+ libpath = Path(get_representation_path(representation))
self.log.info(
"Container: %s\nRepresentation: %s",
diff --git a/openpype/hosts/blender/plugins/load/load_camera_blend.py b/openpype/hosts/blender/plugins/load/load_camera_blend.py
index 61955f124d..6ed2e8a575 100644
--- a/openpype/hosts/blender/plugins/load/load_camera_blend.py
+++ b/openpype/hosts/blender/plugins/load/load_camera_blend.py
@@ -7,7 +7,7 @@ from typing import Dict, List, Optional
import bpy
-from avalon import api
+from openpype.pipeline import get_representation_path
from openpype.hosts.blender.api import plugin
from openpype.hosts.blender.api.pipeline import (
AVALON_CONTAINERS,
@@ -155,7 +155,7 @@ class BlendCameraLoader(plugin.AssetLoader):
"""
object_name = container["objectName"]
asset_group = bpy.data.objects.get(object_name)
- libpath = Path(api.get_representation_path(representation))
+ libpath = Path(get_representation_path(representation))
extension = libpath.suffix.lower()
self.log.info(
diff --git a/openpype/hosts/blender/plugins/load/load_camera_fbx.py b/openpype/hosts/blender/plugins/load/load_camera_fbx.py
index 175ddacf9f..626ed44f08 100644
--- a/openpype/hosts/blender/plugins/load/load_camera_fbx.py
+++ b/openpype/hosts/blender/plugins/load/load_camera_fbx.py
@@ -6,7 +6,7 @@ from typing import Dict, List, Optional
import bpy
-from avalon import api
+from openpype.pipeline import get_representation_path
from openpype.hosts.blender.api import plugin, lib
from openpype.hosts.blender.api.pipeline import (
AVALON_CONTAINERS,
@@ -143,7 +143,7 @@ class FbxCameraLoader(plugin.AssetLoader):
"""
object_name = container["objectName"]
asset_group = bpy.data.objects.get(object_name)
- libpath = Path(api.get_representation_path(representation))
+ libpath = Path(get_representation_path(representation))
extension = libpath.suffix.lower()
self.log.info(
diff --git a/openpype/hosts/blender/plugins/load/load_fbx.py b/openpype/hosts/blender/plugins/load/load_fbx.py
index c6e6af5592..2d249ef647 100644
--- a/openpype/hosts/blender/plugins/load/load_fbx.py
+++ b/openpype/hosts/blender/plugins/load/load_fbx.py
@@ -6,7 +6,7 @@ from typing import Dict, List, Optional
import bpy
-from avalon import api
+from openpype.pipeline import get_representation_path
from openpype.hosts.blender.api import plugin, lib
from openpype.hosts.blender.api.pipeline import (
AVALON_CONTAINERS,
@@ -187,7 +187,7 @@ class FbxModelLoader(plugin.AssetLoader):
"""
object_name = container["objectName"]
asset_group = bpy.data.objects.get(object_name)
- libpath = Path(api.get_representation_path(representation))
+ libpath = Path(get_representation_path(representation))
extension = libpath.suffix.lower()
self.log.info(
diff --git a/openpype/hosts/blender/plugins/load/load_layout_blend.py b/openpype/hosts/blender/plugins/load/load_layout_blend.py
index 8029c38b4a..d87df3c010 100644
--- a/openpype/hosts/blender/plugins/load/load_layout_blend.py
+++ b/openpype/hosts/blender/plugins/load/load_layout_blend.py
@@ -6,8 +6,11 @@ from typing import Dict, List, Optional
import bpy
-from avalon import api
from openpype import lib
+from openpype.pipeline import (
+ legacy_create,
+ get_representation_path,
+)
from openpype.hosts.blender.api import plugin
from openpype.hosts.blender.api.pipeline import (
AVALON_CONTAINERS,
@@ -159,7 +162,7 @@ class BlendLayoutLoader(plugin.AssetLoader):
raise ValueError("Creator plugin \"CreateAnimation\" was "
"not found.")
- api.create(
+ legacy_create(
creator_plugin,
name=local_obj.name.split(':')[-1] + "_animation",
asset=asset,
@@ -308,7 +311,7 @@ class BlendLayoutLoader(plugin.AssetLoader):
"""
object_name = container["objectName"]
asset_group = bpy.data.objects.get(object_name)
- libpath = Path(api.get_representation_path(representation))
+ libpath = Path(get_representation_path(representation))
extension = libpath.suffix.lower()
self.log.info(
diff --git a/openpype/hosts/blender/plugins/load/load_layout_json.py b/openpype/hosts/blender/plugins/load/load_layout_json.py
index 0a5bdeecaa..0693937fec 100644
--- a/openpype/hosts/blender/plugins/load/load_layout_json.py
+++ b/openpype/hosts/blender/plugins/load/load_layout_json.py
@@ -7,8 +7,13 @@ from typing import Dict, Optional
import bpy
-from avalon import api
-from openpype import lib
+from openpype.pipeline import (
+ discover_loader_plugins,
+ remove_container,
+ load_container,
+ get_representation_path,
+ loaders_from_representation,
+)
from openpype.hosts.blender.api.pipeline import (
AVALON_INSTANCES,
AVALON_CONTAINERS,
@@ -34,7 +39,7 @@ class JsonLayoutLoader(plugin.AssetLoader):
objects = list(asset_group.children)
for obj in objects:
- api.remove(obj.get(AVALON_PROPERTY))
+ remove_container(obj.get(AVALON_PROPERTY))
def _remove_animation_instances(self, asset_group):
instances = bpy.data.collections.get(AVALON_INSTANCES)
@@ -67,13 +72,13 @@ class JsonLayoutLoader(plugin.AssetLoader):
with open(libpath, "r") as fp:
data = json.load(fp)
- all_loaders = api.discover(api.Loader)
+ all_loaders = discover_loader_plugins()
for element in data:
reference = element.get('reference')
family = element.get('family')
- loaders = api.loaders_from_representation(all_loaders, reference)
+ loaders = loaders_from_representation(all_loaders, reference)
loader = self._get_loader(loaders, family)
if not loader:
@@ -103,7 +108,7 @@ class JsonLayoutLoader(plugin.AssetLoader):
# at this time it will not return anything. The assets will be
# loaded in the next Blender cycle, so we use the options to
# set the transform, parent and assign the action, if there is one.
- api.load(
+ load_container(
loader,
reference,
namespace=instance_name,
@@ -118,7 +123,7 @@ class JsonLayoutLoader(plugin.AssetLoader):
# raise ValueError("Creator plugin \"CreateCamera\" was "
# "not found.")
- # api.create(
+ # legacy_create(
# creator_plugin,
# name="camera",
# # name=f"{unique_number}_{subset}_animation",
@@ -189,7 +194,7 @@ class JsonLayoutLoader(plugin.AssetLoader):
"""
object_name = container["objectName"]
asset_group = bpy.data.objects.get(object_name)
- libpath = Path(api.get_representation_path(representation))
+ libpath = Path(get_representation_path(representation))
extension = libpath.suffix.lower()
self.log.info(
diff --git a/openpype/hosts/blender/plugins/load/load_look.py b/openpype/hosts/blender/plugins/load/load_look.py
index 066ec0101b..70d1b95f02 100644
--- a/openpype/hosts/blender/plugins/load/load_look.py
+++ b/openpype/hosts/blender/plugins/load/load_look.py
@@ -8,7 +8,7 @@ import os
import json
import bpy
-from avalon import api
+from openpype.pipeline import get_representation_path
from openpype.hosts.blender.api import plugin
from openpype.hosts.blender.api.pipeline import (
containerise_existing,
@@ -140,7 +140,7 @@ class BlendLookLoader(plugin.AssetLoader):
def update(self, container: Dict, representation: Dict):
collection = bpy.data.collections.get(container["objectName"])
- libpath = Path(api.get_representation_path(representation))
+ libpath = Path(get_representation_path(representation))
extension = libpath.suffix.lower()
self.log.info(
diff --git a/openpype/hosts/blender/plugins/load/load_model.py b/openpype/hosts/blender/plugins/load/load_model.py
index 04ece0b338..18d01dcb29 100644
--- a/openpype/hosts/blender/plugins/load/load_model.py
+++ b/openpype/hosts/blender/plugins/load/load_model.py
@@ -6,7 +6,7 @@ from typing import Dict, List, Optional
import bpy
-from avalon import api
+from openpype.pipeline import get_representation_path
from openpype.hosts.blender.api import plugin
from openpype.hosts.blender.api.pipeline import (
AVALON_CONTAINERS,
@@ -195,7 +195,7 @@ class BlendModelLoader(plugin.AssetLoader):
"""
object_name = container["objectName"]
asset_group = bpy.data.objects.get(object_name)
- libpath = Path(api.get_representation_path(representation))
+ libpath = Path(get_representation_path(representation))
extension = libpath.suffix.lower()
self.log.info(
diff --git a/openpype/hosts/blender/plugins/load/load_rig.py b/openpype/hosts/blender/plugins/load/load_rig.py
index eb6d273a51..cec088076c 100644
--- a/openpype/hosts/blender/plugins/load/load_rig.py
+++ b/openpype/hosts/blender/plugins/load/load_rig.py
@@ -6,10 +6,15 @@ from typing import Dict, List, Optional
import bpy
-from avalon import api
-from avalon.blender import lib as avalon_lib
from openpype import lib
-from openpype.hosts.blender.api import plugin
+from openpype.pipeline import (
+ legacy_create,
+ get_representation_path,
+)
+from openpype.hosts.blender.api import (
+ plugin,
+ get_selection,
+)
from openpype.hosts.blender.api.pipeline import (
AVALON_CONTAINERS,
AVALON_PROPERTY,
@@ -248,7 +253,7 @@ class BlendRigLoader(plugin.AssetLoader):
animation_asset = options.get('animation_asset')
- api.create(
+ legacy_create(
creator_plugin,
name=namespace + "_animation",
# name=f"{unique_number}_{subset}_animation",
@@ -262,7 +267,7 @@ class BlendRigLoader(plugin.AssetLoader):
if anim_file:
bpy.ops.import_scene.fbx(filepath=anim_file, anim_offset=0.0)
- imported = avalon_lib.get_selection()
+ imported = get_selection()
armature = [
o for o in asset_group.children if o.type == 'ARMATURE'][0]
@@ -306,7 +311,7 @@ class BlendRigLoader(plugin.AssetLoader):
"""
object_name = container["objectName"]
asset_group = bpy.data.objects.get(object_name)
- libpath = Path(api.get_representation_path(representation))
+ libpath = Path(get_representation_path(representation))
extension = libpath.suffix.lower()
self.log.info(
diff --git a/openpype/hosts/blender/plugins/publish/extract_camera.py b/openpype/hosts/blender/plugins/publish/extract_camera.py
index 597dcecd21..b2c7611b58 100644
--- a/openpype/hosts/blender/plugins/publish/extract_camera.py
+++ b/openpype/hosts/blender/plugins/publish/extract_camera.py
@@ -50,6 +50,10 @@ class ExtractCamera(api.Extractor):
filepath=filepath,
use_active_collection=False,
use_selection=True,
+ bake_anim_use_nla_strips=False,
+ bake_anim_use_all_actions=False,
+ add_leaf_bones=False,
+ armature_nodetype='ROOT',
object_types={'CAMERA'},
bake_anim_simplify_factor=0.0
)
diff --git a/openpype/hosts/celaction/plugins/publish/submit_celaction_deadline.py b/openpype/hosts/celaction/plugins/publish/submit_celaction_deadline.py
index fd958d11a3..ea109e9445 100644
--- a/openpype/hosts/celaction/plugins/publish/submit_celaction_deadline.py
+++ b/openpype/hosts/celaction/plugins/publish/submit_celaction_deadline.py
@@ -1,9 +1,9 @@
import os
+import re
import json
import getpass
-from avalon.vendor import requests
-import re
+import requests
import pyblish.api
diff --git a/openpype/hosts/flame/api/__init__.py b/openpype/hosts/flame/api/__init__.py
index 56bbadd2fc..f210c27f87 100644
--- a/openpype/hosts/flame/api/__init__.py
+++ b/openpype/hosts/flame/api/__init__.py
@@ -68,7 +68,8 @@ from .workio import (
)
from .render_utils import (
export_clip,
- get_preset_path_by_xml_name
+ get_preset_path_by_xml_name,
+ modify_preset_file
)
__all__ = [
@@ -140,5 +141,6 @@ __all__ = [
# render utils
"export_clip",
- "get_preset_path_by_xml_name"
+ "get_preset_path_by_xml_name",
+ "modify_preset_file"
]
diff --git a/openpype/hosts/flame/api/lib.py b/openpype/hosts/flame/api/lib.py
index bbb7c38119..74d9e7607a 100644
--- a/openpype/hosts/flame/api/lib.py
+++ b/openpype/hosts/flame/api/lib.py
@@ -527,6 +527,7 @@ def get_segment_attributes(segment):
# Add timeline segment to tree
clip_data = {
+ "shot_name": segment.shot_name.get_value(),
"segment_name": segment.name.get_value(),
"segment_comment": segment.comment.get_value(),
"tape_name": segment.tape_name,
diff --git a/openpype/hosts/flame/api/pipeline.py b/openpype/hosts/flame/api/pipeline.py
index af071439ef..930c6abe29 100644
--- a/openpype/hosts/flame/api/pipeline.py
+++ b/openpype/hosts/flame/api/pipeline.py
@@ -7,6 +7,11 @@ from avalon import api as avalon
from avalon.pipeline import AVALON_CONTAINER_ID
from pyblish import api as pyblish
from openpype.api import Logger
+from openpype.pipeline import (
+ LegacyCreator,
+ register_loader_plugin_path,
+ deregister_loader_plugin_path,
+)
from .lib import (
set_segment_data_marker,
set_publish_attribute,
@@ -32,8 +37,8 @@ def install():
pyblish.register_host("flame")
pyblish.register_plugin_path(PUBLISH_PATH)
- avalon.register_plugin_path(avalon.Loader, LOAD_PATH)
- avalon.register_plugin_path(avalon.Creator, CREATE_PATH)
+ register_loader_plugin_path(LOAD_PATH)
+ avalon.register_plugin_path(LegacyCreator, CREATE_PATH)
avalon.register_plugin_path(avalon.InventoryAction, INVENTORY_PATH)
log.info("OpenPype Flame plug-ins registred ...")
@@ -47,8 +52,8 @@ def uninstall():
log.info("Deregistering Flame plug-ins..")
pyblish.deregister_plugin_path(PUBLISH_PATH)
- avalon.deregister_plugin_path(avalon.Loader, LOAD_PATH)
- avalon.deregister_plugin_path(avalon.Creator, CREATE_PATH)
+ deregister_loader_plugin_path(LOAD_PATH)
+ avalon.deregister_plugin_path(LegacyCreator, CREATE_PATH)
avalon.deregister_plugin_path(avalon.InventoryAction, INVENTORY_PATH)
# register callback for switching publishable
diff --git a/openpype/hosts/flame/api/plugin.py b/openpype/hosts/flame/api/plugin.py
index db1793cba8..4c9d3c5383 100644
--- a/openpype/hosts/flame/api/plugin.py
+++ b/openpype/hosts/flame/api/plugin.py
@@ -2,13 +2,16 @@ import os
import re
import shutil
import sys
-from avalon.vendor import qargparse
from xml.etree import ElementTree as ET
import six
+import qargparse
from Qt import QtWidgets, QtCore
import openpype.api as openpype
+from openpype.pipeline import (
+ LegacyCreator,
+ LoaderPlugin,
+)
from openpype import style
-import avalon.api as avalon
from . import (
lib as flib,
pipeline as fpipeline,
@@ -299,7 +302,7 @@ class Spacer(QtWidgets.QWidget):
self.setLayout(layout)
-class Creator(openpype.Creator):
+class Creator(LegacyCreator):
"""Creator class wrapper
"""
clip_color = constants.COLOR_MAP["purple"]
@@ -361,6 +364,7 @@ class PublishableClip:
vertical_sync_default = False
driving_layer_default = ""
index_from_segment_default = False
+ use_shot_name_default = False
def __init__(self, segment, **kwargs):
self.rename_index = kwargs["rename_index"]
@@ -376,6 +380,7 @@ class PublishableClip:
# segment (clip) main attributes
self.cs_name = self.clip_data["segment_name"]
self.cs_index = int(self.clip_data["segment"])
+ self.shot_name = self.clip_data["shot_name"]
# get track name and index
self.track_index = int(self.clip_data["track"])
@@ -419,18 +424,21 @@ class PublishableClip:
# deal with clip name
new_name = self.marker_data.pop("newClipName")
- if self.rename:
+ if self.rename and not self.use_shot_name:
# rename segment
self.current_segment.name = str(new_name)
self.marker_data["asset"] = str(new_name)
+ elif self.use_shot_name:
+ self.marker_data["asset"] = self.shot_name
+ self.marker_data["hierarchyData"]["shot"] = self.shot_name
else:
self.marker_data["asset"] = self.cs_name
self.marker_data["hierarchyData"]["shot"] = self.cs_name
if self.marker_data["heroTrack"] and self.review_layer:
- self.marker_data.update({"reviewTrack": self.review_layer})
+ self.marker_data["reviewTrack"] = self.review_layer
else:
- self.marker_data.update({"reviewTrack": None})
+ self.marker_data["reviewTrack"] = None
# create pype tag on track_item and add data
fpipeline.imprint(self.current_segment, self.marker_data)
@@ -463,6 +471,8 @@ class PublishableClip:
# ui_inputs data or default values if gui was not used
self.rename = self.ui_inputs.get(
"clipRename", {}).get("value") or self.rename_default
+ self.use_shot_name = self.ui_inputs.get(
+ "useShotName", {}).get("value") or self.use_shot_name_default
self.clip_name = self.ui_inputs.get(
"clipName", {}).get("value") or self.clip_name_default
self.hierarchy = self.ui_inputs.get(
@@ -652,7 +662,7 @@ class PublishableClip:
# Publishing plugin functions
# Loader plugin functions
-class ClipLoader(avalon.Loader):
+class ClipLoader(LoaderPlugin):
"""A basic clip loader for Flame
This will implement the basic behavior for a loader to inherit from that
diff --git a/openpype/hosts/flame/api/render_utils.py b/openpype/hosts/flame/api/render_utils.py
index 1b086646cc..473fb2f985 100644
--- a/openpype/hosts/flame/api/render_utils.py
+++ b/openpype/hosts/flame/api/render_utils.py
@@ -1,4 +1,5 @@
import os
+from xml.etree import ElementTree as ET
def export_clip(export_path, clip, preset_path, **kwargs):
@@ -123,3 +124,29 @@ def get_preset_path_by_xml_name(xml_preset_name):
# if nothing found then return False
return False
+
+
+def modify_preset_file(xml_path, staging_dir, data):
+ """Modify xml preset with input data
+
+ Args:
+ xml_path (str ): path for input xml preset
+ staging_dir (str): staging dir path
+ data (dict): data where key is xmlTag and value as string
+
+ Returns:
+ str: _description_
+ """
+ # create temp path
+ dirname, basename = os.path.split(xml_path)
+ temp_path = os.path.join(staging_dir, basename)
+
+ # change xml following data keys
+ with open(xml_path, "r") as datafile:
+ tree = ET.parse(datafile)
+ for key, value in data.items():
+ for element in tree.findall(".//{}".format(key)):
+ element.text = str(value)
+ tree.write(temp_path)
+
+ return temp_path
diff --git a/openpype/hosts/flame/api/scripts/wiretap_com.py b/openpype/hosts/flame/api/scripts/wiretap_com.py
index c864399608..ee906c2608 100644
--- a/openpype/hosts/flame/api/scripts/wiretap_com.py
+++ b/openpype/hosts/flame/api/scripts/wiretap_com.py
@@ -420,13 +420,20 @@ class WireTapCom(object):
RuntimeError: Not able to set colorspace policy
"""
color_policy = color_policy or "Legacy"
+
+ # check if the colour policy in custom dir
+ if not os.path.exists(color_policy):
+ color_policy = "/syncolor/policies/Autodesk/{}".format(
+ color_policy)
+
+ # create arguments
project_colorspace_cmd = [
os.path.join(
self.wiretap_tools_dir,
"wiretap_duplicate_node"
),
"-s",
- "/syncolor/policies/Autodesk/{}".format(color_policy),
+ color_policy,
"-n",
"/projects/{}/syncolor".format(project_name)
]
diff --git a/openpype/hosts/flame/hooks/pre_flame_setup.py b/openpype/hosts/flame/hooks/pre_flame_setup.py
index 0d63b0d926..ad2b0dc897 100644
--- a/openpype/hosts/flame/hooks/pre_flame_setup.py
+++ b/openpype/hosts/flame/hooks/pre_flame_setup.py
@@ -73,7 +73,7 @@ class FlamePrelaunch(PreLaunchHook):
"FrameWidth": int(width),
"FrameHeight": int(height),
"AspectRatio": float((width / height) * _db_p_data["pixelAspect"]),
- "FrameRate": "{} fps".format(fps),
+ "FrameRate": self._get_flame_fps(fps),
"FrameDepth": str(imageio_flame["project"]["frameDepth"]),
"FieldDominance": str(imageio_flame["project"]["fieldDominance"])
}
@@ -101,6 +101,28 @@ class FlamePrelaunch(PreLaunchHook):
self.launch_context.launch_args.extend(app_arguments)
+ def _get_flame_fps(self, fps_num):
+ fps_table = {
+ float(23.976): "23.976 fps",
+ int(25): "25 fps",
+ int(24): "24 fps",
+ float(29.97): "29.97 fps DF",
+ int(30): "30 fps",
+ int(50): "50 fps",
+ float(59.94): "59.94 fps DF",
+ int(60): "60 fps"
+ }
+
+ match_key = min(fps_table.keys(), key=lambda x: abs(x - fps_num))
+
+ try:
+ return fps_table[match_key]
+ except KeyError as msg:
+ raise KeyError((
+ "Missing FPS key in conversion table. "
+ "Following keys are available: {}".format(fps_table.keys())
+ )) from msg
+
def _add_pythonpath(self):
pythonpath = self.launch_context.env.get("PYTHONPATH")
diff --git a/openpype/hosts/flame/plugins/create/create_shot_clip.py b/openpype/hosts/flame/plugins/create/create_shot_clip.py
index f055c77a89..11c00dab42 100644
--- a/openpype/hosts/flame/plugins/create/create_shot_clip.py
+++ b/openpype/hosts/flame/plugins/create/create_shot_clip.py
@@ -87,41 +87,48 @@ class CreateShotClip(opfapi.Creator):
"target": "tag",
"toolTip": "Parents folder for shot root folder, Template filled with `Hierarchy Data` section", # noqa
"order": 0},
+ "useShotName": {
+ "value": True,
+ "type": "QCheckBox",
+ "label": "Use Shot Name",
+ "target": "ui",
+ "toolTip": "Use name form Shot name clip attribute", # noqa
+ "order": 1},
"clipRename": {
"value": False,
"type": "QCheckBox",
"label": "Rename clips",
"target": "ui",
"toolTip": "Renaming selected clips on fly", # noqa
- "order": 1},
+ "order": 2},
"clipName": {
"value": "{sequence}{shot}",
"type": "QLineEdit",
"label": "Clip Name Template",
"target": "ui",
"toolTip": "template for creating shot namespaused for renaming (use rename: on)", # noqa
- "order": 2},
+ "order": 3},
"segmentIndex": {
"value": True,
"type": "QCheckBox",
"label": "Segment index",
"target": "ui",
"toolTip": "Take number from segment index", # noqa
- "order": 3},
+ "order": 4},
"countFrom": {
"value": 10,
"type": "QSpinBox",
"label": "Count sequence from",
"target": "ui",
"toolTip": "Set when the sequence number stafrom", # noqa
- "order": 4},
+ "order": 5},
"countSteps": {
"value": 10,
"type": "QSpinBox",
"label": "Stepping number",
"target": "ui",
"toolTip": "What number is adding every new step", # noqa
- "order": 5},
+ "order": 6},
}
},
"hierarchyData": {
diff --git a/openpype/hosts/flame/plugins/load/load_clip.py b/openpype/hosts/flame/plugins/load/load_clip.py
index 8ba01d6937..8980f72cb8 100644
--- a/openpype/hosts/flame/plugins/load/load_clip.py
+++ b/openpype/hosts/flame/plugins/load/load_clip.py
@@ -172,7 +172,7 @@ class LoadClip(opfapi.ClipLoader):
# version_name = version.get("name", None)
# colorspace = version_data.get("colorspace", None)
# object_name = "{}_{}".format(name, namespace)
- # file = api.get_representation_path(representation).replace("\\", "/")
+ # file = get_representation_path(representation).replace("\\", "/")
# clip = track_item.source()
# # reconnect media to new path
diff --git a/openpype/hosts/flame/plugins/publish/extract_subset_resources.py b/openpype/hosts/flame/plugins/publish/extract_subset_resources.py
index db85bede85..3b1466925f 100644
--- a/openpype/hosts/flame/plugins/publish/extract_subset_resources.py
+++ b/openpype/hosts/flame/plugins/publish/extract_subset_resources.py
@@ -22,6 +22,7 @@ class ExtractSubsetResources(openpype.api.Extractor):
"ext": "jpg",
"xml_preset_file": "Jpeg (8-bit).xml",
"xml_preset_dir": "",
+ "export_type": "File Sequence",
"colorspace_out": "Output - sRGB",
"representation_add_range": False,
"representation_tags": ["thumbnail"]
@@ -30,6 +31,7 @@ class ExtractSubsetResources(openpype.api.Extractor):
"ext": "mov",
"xml_preset_file": "Apple iPad (1920x1080).xml",
"xml_preset_dir": "",
+ "export_type": "Movie",
"colorspace_out": "Output - Rec.709",
"representation_add_range": True,
"representation_tags": [
@@ -54,21 +56,35 @@ class ExtractSubsetResources(openpype.api.Extractor):
):
instance.data["representations"] = []
- frame_start = instance.data["frameStart"]
- handle_start = instance.data["handleStart"]
- frame_start_handle = frame_start - handle_start
- source_first_frame = instance.data["sourceFirstFrame"]
- source_start_handles = instance.data["sourceStartH"]
- source_end_handles = instance.data["sourceEndH"]
- source_duration_handles = (
- source_end_handles - source_start_handles) + 1
-
+ # flame objects
+ segment = instance.data["item"]
+ sequence_clip = instance.context.data["flameSequence"]
clip_data = instance.data["flameSourceClip"]
clip = clip_data["PyClip"]
- in_mark = (source_start_handles - source_first_frame) + 1
- out_mark = in_mark + source_duration_handles
+ # segment's parent track name
+ s_track_name = segment.parent.name.get_value()
+ # get configured workfile frame start/end (handles excluded)
+ frame_start = instance.data["frameStart"]
+ # get media source first frame
+ source_first_frame = instance.data["sourceFirstFrame"]
+
+ # get timeline in/out of segment
+ clip_in = instance.data["clipIn"]
+ clip_out = instance.data["clipOut"]
+
+ # get handles value - take only the max from both
+ handle_start = instance.data["handleStart"]
+ handle_end = instance.data["handleStart"]
+ handles = max(handle_start, handle_end)
+
+ # get media source range with handles
+ source_end_handles = instance.data["sourceEndH"]
+ source_start_handles = instance.data["sourceStartH"]
+ source_end_handles = instance.data["sourceEndH"]
+
+ # create staging dir path
staging_dir = self.staging_dir(instance)
# add default preset type for thumbnail and reviewable video
@@ -77,15 +93,52 @@ class ExtractSubsetResources(openpype.api.Extractor):
export_presets = deepcopy(self.default_presets)
export_presets.update(self.export_presets_mapping)
- # with maintained duplication loop all presets
- with opfapi.maintained_object_duplication(clip) as duplclip:
- # loop all preset names and
- for unique_name, preset_config in export_presets.items():
+ # loop all preset names and
+ for unique_name, preset_config in export_presets.items():
+ modify_xml_data = {}
+
+ # get all presets attributes
+ preset_file = preset_config["xml_preset_file"]
+ preset_dir = preset_config["xml_preset_dir"]
+ export_type = preset_config["export_type"]
+ repre_tags = preset_config["representation_tags"]
+ color_out = preset_config["colorspace_out"]
+
+ # get frame range with handles for representation range
+ frame_start_handle = frame_start - handle_start
+ source_duration_handles = (
+ source_end_handles - source_start_handles) + 1
+
+ # define in/out marks
+ in_mark = (source_start_handles - source_first_frame) + 1
+ out_mark = in_mark + source_duration_handles
+
+ # by default export source clips
+ exporting_clip = clip
+
+ if export_type == "Sequence Publish":
+ # change export clip to sequence
+ exporting_clip = sequence_clip
+
+ # change in/out marks to timeline in/out
+ in_mark = clip_in
+ out_mark = clip_out
+
+ # add xml tags modifications
+ modify_xml_data.update({
+ "exportHandles": True,
+ "nbHandles": handles,
+ "startFrame": frame_start
+ })
+
+ # with maintained duplication loop all presets
+ with opfapi.maintained_object_duplication(
+ exporting_clip) as duplclip:
kwargs = {}
- preset_file = preset_config["xml_preset_file"]
- preset_dir = preset_config["xml_preset_dir"]
- repre_tags = preset_config["representation_tags"]
- color_out = preset_config["colorspace_out"]
+
+ if export_type == "Sequence Publish":
+ # only keep visible layer where instance segment is child
+ self.hide_other_tracks(duplclip, s_track_name)
# validate xml preset file is filled
if preset_file == "":
@@ -108,10 +161,13 @@ class ExtractSubsetResources(openpype.api.Extractor):
)
# create preset path
- preset_path = str(os.path.join(
+ preset_orig_xml_path = str(os.path.join(
preset_dir, preset_file
))
+ preset_path = opfapi.modify_preset_file(
+ preset_orig_xml_path, staging_dir, modify_xml_data)
+
# define kwargs based on preset type
if "thumbnail" in unique_name:
kwargs["thumb_frame_number"] = in_mark + (
@@ -122,6 +178,7 @@ class ExtractSubsetResources(openpype.api.Extractor):
"out_mark": out_mark
})
+ # get and make export dir paths
export_dir_path = str(os.path.join(
staging_dir, unique_name
))
@@ -132,6 +189,7 @@ class ExtractSubsetResources(openpype.api.Extractor):
export_dir_path, duplclip, preset_path, **kwargs)
extension = preset_config["ext"]
+
# create representation data
representation_data = {
"name": unique_name,
@@ -159,7 +217,12 @@ class ExtractSubsetResources(openpype.api.Extractor):
# add files to represetation but add
# imagesequence as list
if (
- "movie_file" in preset_path
+ # first check if path in files is not mov extension
+ [
+ f for f in files
+ if os.path.splitext(f)[-1] == ".mov"
+ ]
+ # then try if thumbnail is not in unique name
or unique_name == "thumbnail"
):
representation_data["files"] = files.pop()
@@ -246,3 +309,19 @@ class ExtractSubsetResources(openpype.api.Extractor):
)
return new_stage_dir, new_files_list
+
+ def hide_other_tracks(self, sequence_clip, track_name):
+ """Helper method used only if sequence clip is used
+
+ Args:
+ sequence_clip (flame.Clip): sequence clip
+ track_name (str): track name
+ """
+ # create otio tracks and clips
+ for ver in sequence_clip.versions:
+ for track in ver.tracks:
+ if len(track.segments) == 0 and track.hidden:
+ continue
+
+ if track.name.get_value() != track_name:
+ track.hidden = True
diff --git a/openpype/hosts/flame/startup/openpype_flame_to_ftrack/export_preset/openpype_seg_thumbnails_jpg.xml b/openpype/hosts/flame/startup/openpype_babypublisher/export_preset/openpype_seg_thumbnails_jpg.xml
similarity index 97%
rename from openpype/hosts/flame/startup/openpype_flame_to_ftrack/export_preset/openpype_seg_thumbnails_jpg.xml
rename to openpype/hosts/flame/startup/openpype_babypublisher/export_preset/openpype_seg_thumbnails_jpg.xml
index fa43ceece7..44a7bd9770 100644
--- a/openpype/hosts/flame/startup/openpype_flame_to_ftrack/export_preset/openpype_seg_thumbnails_jpg.xml
+++ b/openpype/hosts/flame/startup/openpype_babypublisher/export_preset/openpype_seg_thumbnails_jpg.xml
@@ -29,7 +29,7 @@
Jpeg923688
- <segment name>
+ <shot name>10024
diff --git a/openpype/hosts/flame/startup/openpype_flame_to_ftrack/export_preset/openpype_seg_video_h264.xml b/openpype/hosts/flame/startup/openpype_babypublisher/export_preset/openpype_seg_video_h264.xml
similarity index 95%
rename from openpype/hosts/flame/startup/openpype_flame_to_ftrack/export_preset/openpype_seg_video_h264.xml
rename to openpype/hosts/flame/startup/openpype_babypublisher/export_preset/openpype_seg_video_h264.xml
index 3ca185b8b4..1d2c5a28bb 100644
--- a/openpype/hosts/flame/startup/openpype_flame_to_ftrack/export_preset/openpype_seg_video_h264.xml
+++ b/openpype/hosts/flame/startup/openpype_babypublisher/export_preset/openpype_seg_video_h264.xml
@@ -27,7 +27,7 @@
QuickTime
- <segment name>
+ <shot name>0PCS_709None
@@ -43,7 +43,7 @@
2021/profiles/.33622016/HDTV_720p_8Mbits.cdxprof
- <segment name>_<video codec>
+ <shot name>_<video codec>5024
diff --git a/openpype/hosts/flame/startup/openpype_flame_to_ftrack/modules/__init__.py b/openpype/hosts/flame/startup/openpype_babypublisher/modules/__init__.py
similarity index 100%
rename from openpype/hosts/flame/startup/openpype_flame_to_ftrack/modules/__init__.py
rename to openpype/hosts/flame/startup/openpype_babypublisher/modules/__init__.py
diff --git a/openpype/hosts/flame/startup/openpype_flame_to_ftrack/modules/app_utils.py b/openpype/hosts/flame/startup/openpype_babypublisher/modules/app_utils.py
similarity index 98%
rename from openpype/hosts/flame/startup/openpype_flame_to_ftrack/modules/app_utils.py
rename to openpype/hosts/flame/startup/openpype_babypublisher/modules/app_utils.py
index b255d8d3f5..e639c3f482 100644
--- a/openpype/hosts/flame/startup/openpype_flame_to_ftrack/modules/app_utils.py
+++ b/openpype/hosts/flame/startup/openpype_babypublisher/modules/app_utils.py
@@ -8,7 +8,7 @@ PLUGIN_DIR = os.path.dirname(os.path.dirname(__file__))
EXPORT_PRESETS_DIR = os.path.join(PLUGIN_DIR, "export_preset")
CONFIG_DIR = os.path.join(os.path.expanduser(
- "~/.openpype"), "openpype_flame_to_ftrack")
+ "~/.openpype"), "openpype_babypublisher")
@contextmanager
diff --git a/openpype/hosts/flame/startup/openpype_flame_to_ftrack/modules/ftrack_lib.py b/openpype/hosts/flame/startup/openpype_babypublisher/modules/ftrack_lib.py
similarity index 96%
rename from openpype/hosts/flame/startup/openpype_flame_to_ftrack/modules/ftrack_lib.py
rename to openpype/hosts/flame/startup/openpype_babypublisher/modules/ftrack_lib.py
index c2168016c6..0e84a5ef52 100644
--- a/openpype/hosts/flame/startup/openpype_flame_to_ftrack/modules/ftrack_lib.py
+++ b/openpype/hosts/flame/startup/openpype_babypublisher/modules/ftrack_lib.py
@@ -360,6 +360,8 @@ class FtrackComponentCreator:
class FtrackEntityOperator:
+ existing_tasks = []
+
def __init__(self, session, project_entity):
self.session = session
self.project_entity = project_entity
@@ -392,10 +394,7 @@ class FtrackEntityOperator:
query = '{} where name is "{}" and project_id is "{}"'.format(
type, name, self.project_entity["id"])
- try:
- entity = session.query(query).one()
- except Exception:
- entity = None
+ entity = session.query(query).first()
# if entity doesnt exist then create one
if not entity:
@@ -430,10 +429,21 @@ class FtrackEntityOperator:
return parents
def create_task(self, task_type, task_types, parent):
- existing_task = [
+ _exising_tasks = [
child for child in parent['children']
if child.entity_type.lower() == 'task'
- if child['name'].lower() in task_type.lower()
+ ]
+
+ # add task into existing tasks if they are not already there
+ for _t in _exising_tasks:
+ if _t in self.existing_tasks:
+ continue
+ self.existing_tasks.append(_t)
+
+ existing_task = [
+ task for task in self.existing_tasks
+ if task['name'].lower() in task_type.lower()
+ if task['parent'] == parent
]
if existing_task:
@@ -445,4 +455,5 @@ class FtrackEntityOperator:
})
task["type"] = task_types[task_type]
+ self.existing_tasks.append(task)
return task
diff --git a/openpype/hosts/flame/startup/openpype_flame_to_ftrack/modules/panel_app.py b/openpype/hosts/flame/startup/openpype_babypublisher/modules/panel_app.py
similarity index 96%
rename from openpype/hosts/flame/startup/openpype_flame_to_ftrack/modules/panel_app.py
rename to openpype/hosts/flame/startup/openpype_babypublisher/modules/panel_app.py
index 648f902872..1e8011efaa 100644
--- a/openpype/hosts/flame/startup/openpype_flame_to_ftrack/modules/panel_app.py
+++ b/openpype/hosts/flame/startup/openpype_babypublisher/modules/panel_app.py
@@ -1,4 +1,4 @@
-from PySide2 import QtWidgets, QtCore
+from Qt import QtWidgets, QtCore
import uiwidgets
import app_utils
@@ -33,11 +33,12 @@ class MainWindow(QtWidgets.QWidget):
self.panel_class.clear_temp_data()
self.panel_class.close()
clear_inner_modules()
+ ftrack_lib.FtrackEntityOperator.existing_tasks = []
# now the panel can be closed
event.accept()
-class FlameToFtrackPanel(object):
+class FlameBabyPublisherPanel(object):
session = None
temp_data_dir = None
processed_components = []
@@ -78,7 +79,7 @@ class FlameToFtrackPanel(object):
# creating ui
self.window.setMinimumSize(1500, 600)
- self.window.setWindowTitle('Sequence Shots to Ftrack')
+ self.window.setWindowTitle('OpenPype: Baby-publisher')
self.window.setWindowFlags(QtCore.Qt.WindowStaysOnTopHint)
self.window.setAttribute(QtCore.Qt.WA_DeleteOnClose)
self.window.setFocusPolicy(QtCore.Qt.StrongFocus)
@@ -469,10 +470,14 @@ class FlameToFtrackPanel(object):
for sequence in self.selection:
frame_rate = float(str(sequence.frame_rate)[:-4])
for ver in sequence.versions:
- for tracks in ver.tracks:
- for segment in tracks.segments:
+ for track in ver.tracks:
+ if len(track.segments) == 0 and track.hidden:
+ continue
+ for segment in track.segments:
print(segment.attributes)
- if str(segment.name)[1:-1] == "":
+ if segment.name.get_value() == "":
+ continue
+ if segment.hidden.get_value() is True:
continue
# get clip frame duration
record_duration = str(segment.record_duration)[1:-1]
@@ -492,11 +497,11 @@ class FlameToFtrackPanel(object):
# Add timeline segment to tree
QtWidgets.QTreeWidgetItem(self.tree, [
- str(sequence.name)[1:-1], # seq
- str(segment.name)[1:-1], # shot
+ sequence.name.get_value(), # seq name
+ segment.shot_name.get_value(), # shot name
str(clip_duration), # clip duration
shot_description, # shot description
- str(segment.comment)[1:-1] # task description
+ segment.comment.get_value() # task description
]).setFlags(
QtCore.Qt.ItemIsEditable
| QtCore.Qt.ItemIsEnabled
diff --git a/openpype/hosts/flame/startup/openpype_flame_to_ftrack/modules/uiwidgets.py b/openpype/hosts/flame/startup/openpype_babypublisher/modules/uiwidgets.py
similarity index 99%
rename from openpype/hosts/flame/startup/openpype_flame_to_ftrack/modules/uiwidgets.py
rename to openpype/hosts/flame/startup/openpype_babypublisher/modules/uiwidgets.py
index 0d4807a4ea..c6db875df0 100644
--- a/openpype/hosts/flame/startup/openpype_flame_to_ftrack/modules/uiwidgets.py
+++ b/openpype/hosts/flame/startup/openpype_babypublisher/modules/uiwidgets.py
@@ -1,4 +1,4 @@
-from PySide2 import QtWidgets, QtCore
+from Qt import QtWidgets, QtCore
class FlameLabel(QtWidgets.QLabel):
diff --git a/openpype/hosts/flame/startup/openpype_flame_to_ftrack/openpype_flame_to_ftrack.py b/openpype/hosts/flame/startup/openpype_babypublisher/openpype_babypublisher.py
similarity index 85%
rename from openpype/hosts/flame/startup/openpype_flame_to_ftrack/openpype_flame_to_ftrack.py
rename to openpype/hosts/flame/startup/openpype_babypublisher/openpype_babypublisher.py
index 5a72706ba1..4675d163e3 100644
--- a/openpype/hosts/flame/startup/openpype_flame_to_ftrack/openpype_flame_to_ftrack.py
+++ b/openpype/hosts/flame/startup/openpype_babypublisher/openpype_babypublisher.py
@@ -16,10 +16,11 @@ def flame_panel_executor(selection):
if "panel_app" in sys.modules.keys():
print("panel_app module is already loaded")
del sys.modules["panel_app"]
+ import panel_app
+ reload(panel_app) # noqa
print("panel_app module removed from sys.modules")
- import panel_app
- panel_app.FlameToFtrackPanel(selection)
+ panel_app.FlameBabyPublisherPanel(selection)
def scope_sequence(selection):
@@ -30,7 +31,7 @@ def scope_sequence(selection):
def get_media_panel_custom_ui_actions():
return [
{
- "name": "OpenPype: Ftrack",
+ "name": "OpenPype: Baby-publisher",
"actions": [
{
"name": "Create Shots",
diff --git a/openpype/hosts/fusion/api/lib.py b/openpype/hosts/fusion/api/lib.py
index 37a13e4a10..8f1ea26db0 100644
--- a/openpype/hosts/fusion/api/lib.py
+++ b/openpype/hosts/fusion/api/lib.py
@@ -5,8 +5,8 @@ import contextlib
from Qt import QtGui
-import avalon.api
from avalon import io
+from openpype.pipeline import switch_container
from .pipeline import get_current_comp, comp_lock_and_undo_chunk
from openpype.api import (
get_asset
@@ -167,7 +167,7 @@ def switch_item(container,
assert representation, ("Could not find representation in the database "
"with the name '%s'" % representation_name)
- avalon.api.switch(container, representation)
+ switch_container(container, representation)
return representation
diff --git a/openpype/hosts/fusion/api/pipeline.py b/openpype/hosts/fusion/api/pipeline.py
index 0c1c5a4362..3e83f3a9da 100644
--- a/openpype/hosts/fusion/api/pipeline.py
+++ b/openpype/hosts/fusion/api/pipeline.py
@@ -11,6 +11,11 @@ import avalon.api
from avalon.pipeline import AVALON_CONTAINER_ID
from openpype.api import Logger
+from openpype.pipeline import (
+ LegacyCreator,
+ register_loader_plugin_path,
+ deregister_loader_plugin_path,
+)
import openpype.hosts.fusion
log = Logger().get_logger(__name__)
@@ -62,8 +67,8 @@ def install():
pyblish.api.register_plugin_path(PUBLISH_PATH)
log.info("Registering Fusion plug-ins..")
- avalon.api.register_plugin_path(avalon.api.Loader, LOAD_PATH)
- avalon.api.register_plugin_path(avalon.api.Creator, CREATE_PATH)
+ register_loader_plugin_path(LOAD_PATH)
+ avalon.api.register_plugin_path(LegacyCreator, CREATE_PATH)
avalon.api.register_plugin_path(avalon.api.InventoryAction, INVENTORY_PATH)
pyblish.api.register_callback(
@@ -86,8 +91,8 @@ def uninstall():
pyblish.api.deregister_plugin_path(PUBLISH_PATH)
log.info("Deregistering Fusion plug-ins..")
- avalon.api.deregister_plugin_path(avalon.api.Loader, LOAD_PATH)
- avalon.api.deregister_plugin_path(avalon.api.Creator, CREATE_PATH)
+ deregister_loader_plugin_path(LOAD_PATH)
+ avalon.api.deregister_plugin_path(LegacyCreator, CREATE_PATH)
avalon.api.deregister_plugin_path(
avalon.api.InventoryAction, INVENTORY_PATH
)
diff --git a/openpype/hosts/fusion/deploy/Scripts/Comp/OpenPype/switch_ui.py b/openpype/hosts/fusion/deploy/Scripts/Comp/OpenPype/switch_ui.py
index fe324d9a41..d9eeae25ea 100644
--- a/openpype/hosts/fusion/deploy/Scripts/Comp/OpenPype/switch_ui.py
+++ b/openpype/hosts/fusion/deploy/Scripts/Comp/OpenPype/switch_ui.py
@@ -5,11 +5,12 @@ import logging
from Qt import QtWidgets, QtCore
import avalon.api
-from avalon import io, pipeline
-from avalon.vendor import qtawesome as qta
+from avalon import io
+import qtawesome as qta
from openpype import style
from openpype.hosts.fusion import api
+from openpype.lib.avalon_context import get_workdir_from_session
log = logging.getLogger("Fusion Switch Shot")
@@ -123,7 +124,7 @@ class App(QtWidgets.QWidget):
def _on_open_from_dir(self):
- start_dir = self._get_context_directory()
+ start_dir = get_workdir_from_session()
comp_file, _ = QtWidgets.QFileDialog.getOpenFileName(
self, "Choose comp", start_dir)
@@ -157,17 +158,6 @@ class App(QtWidgets.QWidget):
import colorbleed.scripts.fusion_switch_shot as switch_shot
switch_shot.switch(asset_name=asset, filepath=file_name, new=True)
- def _get_context_directory(self):
-
- project = io.find_one({"type": "project",
- "name": avalon.api.Session["AVALON_PROJECT"]},
- projection={"config": True})
-
- template = project["config"]["template"]["work"]
- dir = pipeline._format_work_template(template, avalon.api.Session)
-
- return dir
-
def collect_slap_comps(self, directory):
items = glob.glob("{}/*.comp".format(directory))
return items
diff --git a/openpype/hosts/fusion/plugins/create/create_exr_saver.py b/openpype/hosts/fusion/plugins/create/create_exr_saver.py
index 04717f4746..ff8bdb21ef 100644
--- a/openpype/hosts/fusion/plugins/create/create_exr_saver.py
+++ b/openpype/hosts/fusion/plugins/create/create_exr_saver.py
@@ -1,13 +1,13 @@
import os
-import openpype.api
+from openpype.pipeline import create
from openpype.hosts.fusion.api import (
get_current_comp,
comp_lock_and_undo_chunk
)
-class CreateOpenEXRSaver(openpype.api.Creator):
+class CreateOpenEXRSaver(create.LegacyCreator):
name = "openexrDefault"
label = "Create OpenEXR Saver"
diff --git a/openpype/hosts/fusion/plugins/load/actions.py b/openpype/hosts/fusion/plugins/load/actions.py
index 6af99e4c56..bc59cec77f 100644
--- a/openpype/hosts/fusion/plugins/load/actions.py
+++ b/openpype/hosts/fusion/plugins/load/actions.py
@@ -2,10 +2,10 @@
"""
-from avalon import api
+from openpype.pipeline import load
-class FusionSetFrameRangeLoader(api.Loader):
+class FusionSetFrameRangeLoader(load.LoaderPlugin):
"""Specific loader of Alembic for the avalon.animation family"""
families = ["animation",
@@ -39,7 +39,7 @@ class FusionSetFrameRangeLoader(api.Loader):
lib.update_frame_range(start, end)
-class FusionSetFrameRangeWithHandlesLoader(api.Loader):
+class FusionSetFrameRangeWithHandlesLoader(load.LoaderPlugin):
"""Specific loader of Alembic for the avalon.animation family"""
families = ["animation",
diff --git a/openpype/hosts/fusion/plugins/load/load_sequence.py b/openpype/hosts/fusion/plugins/load/load_sequence.py
index ea118585bf..075820de35 100644
--- a/openpype/hosts/fusion/plugins/load/load_sequence.py
+++ b/openpype/hosts/fusion/plugins/load/load_sequence.py
@@ -1,8 +1,12 @@
import os
import contextlib
-from avalon import api, io
+from avalon import io
+from openpype.pipeline import (
+ load,
+ get_representation_path,
+)
from openpype.hosts.fusion.api import (
imprint_container,
get_current_comp,
@@ -117,7 +121,7 @@ def loader_shift(loader, frame, relative=True):
return int(shift)
-class FusionLoadSequence(api.Loader):
+class FusionLoadSequence(load.LoaderPlugin):
"""Load image sequence into Fusion"""
families = ["imagesequence", "review", "render"]
@@ -204,7 +208,7 @@ class FusionLoadSequence(api.Loader):
assert tool.ID == "Loader", "Must be Loader"
comp = tool.Comp()
- root = os.path.dirname(api.get_representation_path(representation))
+ root = os.path.dirname(get_representation_path(representation))
path = self._get_first_image(root)
# Get start frame from version data
diff --git a/openpype/hosts/fusion/scripts/fusion_switch_shot.py b/openpype/hosts/fusion/scripts/fusion_switch_shot.py
index 9dd8a351e4..ca7efb9136 100644
--- a/openpype/hosts/fusion/scripts/fusion_switch_shot.py
+++ b/openpype/hosts/fusion/scripts/fusion_switch_shot.py
@@ -5,11 +5,12 @@ import logging
# Pipeline imports
import avalon.api
-from avalon import io, pipeline
+from avalon import io
from openpype.lib import version_up
from openpype.hosts.fusion import api
from openpype.hosts.fusion.api import lib
+from openpype.lib.avalon_context import get_workdir_from_session
log = logging.getLogger("Update Slap Comp")
@@ -44,16 +45,6 @@ def _format_version_folder(folder):
return version_folder
-def _get_work_folder(session):
- """Convenience function to get the work folder path of the current asset"""
-
- # Get new filename, create path based on asset and work template
- template_work = self._project["config"]["template"]["work"]
- work_path = pipeline._format_work_template(template_work, session)
-
- return os.path.normpath(work_path)
-
-
def _get_fusion_instance():
fusion = getattr(sys.modules["__main__"], "fusion", None)
if fusion is None:
@@ -72,7 +63,7 @@ def _format_filepath(session):
asset = session["AVALON_ASSET"]
# Save updated slap comp
- work_path = _get_work_folder(session)
+ work_path = get_workdir_from_session(session)
walk_to_dir = os.path.join(work_path, "scenes", "slapcomp")
slapcomp_dir = os.path.abspath(walk_to_dir)
@@ -112,7 +103,7 @@ def _update_savers(comp, session):
None
"""
- new_work = _get_work_folder(session)
+ new_work = get_workdir_from_session(session)
renders = os.path.join(new_work, "renders")
version_folder = _format_version_folder(renders)
renders_version = os.path.join(renders, version_folder)
diff --git a/openpype/hosts/fusion/scripts/set_rendermode.py b/openpype/hosts/fusion/scripts/set_rendermode.py
index 77a2d8e945..f0638e4fe3 100644
--- a/openpype/hosts/fusion/scripts/set_rendermode.py
+++ b/openpype/hosts/fusion/scripts/set_rendermode.py
@@ -1,5 +1,5 @@
from Qt import QtWidgets
-from avalon.vendor import qtawesome
+import qtawesome
from openpype.hosts.fusion.api import get_current_comp
diff --git a/openpype/hosts/harmony/api/README.md b/openpype/hosts/harmony/api/README.md
index a8d182736a..e8d354e1e6 100644
--- a/openpype/hosts/harmony/api/README.md
+++ b/openpype/hosts/harmony/api/README.md
@@ -575,7 +575,7 @@ replace_files = """function %s_replace_files(args)
""" % (signature, signature)
-class ImageSequenceLoader(api.Loader):
+class ImageSequenceLoader(load.LoaderPlugin):
"""Load images
Stores the imported asset in a container named after the asset.
"""
diff --git a/openpype/hosts/harmony/api/TB_sceneOpened.js b/openpype/hosts/harmony/api/TB_sceneOpened.js
index 5a3fe9ce82..6a403fa65e 100644
--- a/openpype/hosts/harmony/api/TB_sceneOpened.js
+++ b/openpype/hosts/harmony/api/TB_sceneOpened.js
@@ -272,8 +272,8 @@ function Client() {
app.avalonClient.send(
{
- 'module': 'avalon.api',
- 'method': 'emit',
+ 'module': 'openpype.lib',
+ 'method': 'emit_event',
'args': ['application.launched']
}, false);
};
diff --git a/openpype/hosts/harmony/api/lib.py b/openpype/hosts/harmony/api/lib.py
index 134f670dc4..66eeac1e3a 100644
--- a/openpype/hosts/harmony/api/lib.py
+++ b/openpype/hosts/harmony/api/lib.py
@@ -361,7 +361,7 @@ def zip_and_move(source, destination):
log.debug(f"Saved '{source}' to '{destination}'")
-def show(module_name):
+def show(tool_name):
"""Call show on "module_name".
This allows to make a QApplication ahead of time and always "exec_" to
@@ -375,13 +375,6 @@ def show(module_name):
# requests to be received properly.
time.sleep(1)
- # Get tool name from module name
- # TODO this is for backwards compatibility not sure if `TB_sceneOpened.js`
- # is automatically updated.
- # Previous javascript sent 'module_name' which contained whole tool import
- # string e.g. "avalon.tools.workfiles" now it should be only "workfiles"
- tool_name = module_name.split(".")[-1]
-
kwargs = {}
if tool_name == "loader":
kwargs["use_context"] = True
diff --git a/openpype/hosts/harmony/api/pipeline.py b/openpype/hosts/harmony/api/pipeline.py
index 17d2870876..f967da15ca 100644
--- a/openpype/hosts/harmony/api/pipeline.py
+++ b/openpype/hosts/harmony/api/pipeline.py
@@ -9,6 +9,12 @@ import avalon.api
from avalon.pipeline import AVALON_CONTAINER_ID
from openpype import lib
+from openpype.lib import register_event_callback
+from openpype.pipeline import (
+ LegacyCreator,
+ register_loader_plugin_path,
+ deregister_loader_plugin_path,
+)
import openpype.hosts.harmony
import openpype.hosts.harmony.api as harmony
@@ -129,7 +135,7 @@ def check_inventory():
harmony.send({"function": "PypeHarmony.message", "args": msg})
-def application_launch():
+def application_launch(event):
"""Event that is executed after Harmony is launched."""
# FIXME: This is breaking server <-> client communication.
# It is now moved so it it manually called.
@@ -178,8 +184,8 @@ def install():
pyblish.api.register_host("harmony")
pyblish.api.register_plugin_path(PUBLISH_PATH)
- avalon.api.register_plugin_path(avalon.api.Loader, LOAD_PATH)
- avalon.api.register_plugin_path(avalon.api.Creator, CREATE_PATH)
+ register_loader_plugin_path(LOAD_PATH)
+ avalon.api.register_plugin_path(LegacyCreator, CREATE_PATH)
log.info(PUBLISH_PATH)
# Register callbacks.
@@ -187,13 +193,13 @@ def install():
"instanceToggled", on_pyblish_instance_toggled
)
- avalon.api.on("application.launched", application_launch)
+ register_event_callback("application.launched", application_launch)
def uninstall():
pyblish.api.deregister_plugin_path(PUBLISH_PATH)
- avalon.api.deregister_plugin_path(avalon.api.Loader, LOAD_PATH)
- avalon.api.deregister_plugin_path(avalon.api.Creator, CREATE_PATH)
+ deregister_loader_plugin_path(LOAD_PATH)
+ avalon.api.deregister_plugin_path(LegacyCreator, CREATE_PATH)
def on_pyblish_instance_toggled(instance, old_value, new_value):
diff --git a/openpype/hosts/harmony/api/plugin.py b/openpype/hosts/harmony/api/plugin.py
index d6d61a547a..c55d200d30 100644
--- a/openpype/hosts/harmony/api/plugin.py
+++ b/openpype/hosts/harmony/api/plugin.py
@@ -1,9 +1,8 @@
-import avalon.api
-from openpype.api import PypeCreatorMixin
+from openpype.pipeline import LegacyCreator
import openpype.hosts.harmony.api as harmony
-class Creator(PypeCreatorMixin, avalon.api.Creator):
+class Creator(LegacyCreator):
"""Creator plugin to create instances in Harmony.
By default a Composite node is created to support any number of nodes in
diff --git a/openpype/hosts/harmony/plugins/load/load_audio.py b/openpype/hosts/harmony/plugins/load/load_audio.py
index 57ea8ae312..e18a6de097 100644
--- a/openpype/hosts/harmony/plugins/load/load_audio.py
+++ b/openpype/hosts/harmony/plugins/load/load_audio.py
@@ -1,4 +1,7 @@
-from avalon import api
+from openpype.pipeline import (
+ load,
+ get_representation_path,
+)
import openpype.hosts.harmony.api as harmony
sig = harmony.signature()
@@ -29,7 +32,7 @@ function %s(args)
""" % (sig, sig)
-class ImportAudioLoader(api.Loader):
+class ImportAudioLoader(load.LoaderPlugin):
"""Import audio."""
families = ["shot", "audio"]
@@ -37,7 +40,7 @@ class ImportAudioLoader(api.Loader):
label = "Import Audio"
def load(self, context, name=None, namespace=None, data=None):
- wav_file = api.get_representation_path(context["representation"])
+ wav_file = get_representation_path(context["representation"])
harmony.send(
{"function": func, "args": [context["subset"]["name"], wav_file]}
)
diff --git a/openpype/hosts/harmony/plugins/load/load_background.py b/openpype/hosts/harmony/plugins/load/load_background.py
index 686d6b5b7b..9c01fe3cd8 100644
--- a/openpype/hosts/harmony/plugins/load/load_background.py
+++ b/openpype/hosts/harmony/plugins/load/load_background.py
@@ -1,7 +1,10 @@
import os
import json
-from avalon import api
+from openpype.pipeline import (
+ load,
+ get_representation_path,
+)
import openpype.hosts.harmony.api as harmony
import openpype.lib
@@ -226,7 +229,7 @@ replace_files
"""
-class BackgroundLoader(api.Loader):
+class BackgroundLoader(load.LoaderPlugin):
"""Load images
Stores the imported asset in a container named after the asset.
"""
@@ -278,7 +281,7 @@ class BackgroundLoader(api.Loader):
def update(self, container, representation):
- path = api.get_representation_path(representation)
+ path = get_representation_path(representation)
with open(path) as json_file:
data = json.load(json_file)
@@ -297,7 +300,7 @@ class BackgroundLoader(api.Loader):
bg_folder = os.path.dirname(path)
- path = api.get_representation_path(representation)
+ path = get_representation_path(representation)
print(container)
diff --git a/openpype/hosts/harmony/plugins/load/load_imagesequence.py b/openpype/hosts/harmony/plugins/load/load_imagesequence.py
index 310f9bdb61..18695438d5 100644
--- a/openpype/hosts/harmony/plugins/load/load_imagesequence.py
+++ b/openpype/hosts/harmony/plugins/load/load_imagesequence.py
@@ -6,12 +6,15 @@ from pathlib import Path
import clique
-from avalon import api
+from openpype.pipeline import (
+ load,
+ get_representation_path,
+)
import openpype.hosts.harmony.api as harmony
import openpype.lib
-class ImageSequenceLoader(api.Loader):
+class ImageSequenceLoader(load.LoaderPlugin):
"""Load image sequences.
Stores the imported asset in a container named after the asset.
@@ -79,7 +82,7 @@ class ImageSequenceLoader(api.Loader):
self_name = self.__class__.__name__
node = container.get("nodes").pop()
- path = api.get_representation_path(representation)
+ path = get_representation_path(representation)
collections, remainder = clique.assemble(
os.listdir(os.path.dirname(path))
)
diff --git a/openpype/hosts/harmony/plugins/load/load_palette.py b/openpype/hosts/harmony/plugins/load/load_palette.py
index 2e0f70d135..1da3e61e1b 100644
--- a/openpype/hosts/harmony/plugins/load/load_palette.py
+++ b/openpype/hosts/harmony/plugins/load/load_palette.py
@@ -1,11 +1,14 @@
import os
import shutil
-from avalon import api
+from openpype.pipeline import (
+ load,
+ get_representation_path,
+)
import openpype.hosts.harmony.api as harmony
-class ImportPaletteLoader(api.Loader):
+class ImportPaletteLoader(load.LoaderPlugin):
"""Import palettes."""
families = ["palette", "harmony.palette"]
@@ -31,7 +34,7 @@ class ImportPaletteLoader(api.Loader):
scene_path = harmony.send(
{"function": "scene.currentProjectPath"}
)["result"]
- src = api.get_representation_path(representation)
+ src = get_representation_path(representation)
dst = os.path.join(
scene_path,
"palette-library",
diff --git a/openpype/hosts/harmony/plugins/load/load_template.py b/openpype/hosts/harmony/plugins/load/load_template.py
index 112e613ae6..c6dc9d913b 100644
--- a/openpype/hosts/harmony/plugins/load/load_template.py
+++ b/openpype/hosts/harmony/plugins/load/load_template.py
@@ -6,12 +6,15 @@ import os
import shutil
import uuid
-from avalon import api
+from openpype.pipeline import (
+ load,
+ get_representation_path,
+)
import openpype.hosts.harmony.api as harmony
import openpype.lib
-class TemplateLoader(api.Loader):
+class TemplateLoader(load.LoaderPlugin):
"""Load Harmony template as container.
.. todo::
@@ -38,7 +41,7 @@ class TemplateLoader(api.Loader):
# Load template.
self_name = self.__class__.__name__
temp_dir = tempfile.mkdtemp()
- zip_file = api.get_representation_path(context["representation"])
+ zip_file = get_representation_path(context["representation"])
template_path = os.path.join(temp_dir, "temp.tpl")
with zipfile.ZipFile(zip_file, "r") as zip_ref:
zip_ref.extractall(template_path)
diff --git a/openpype/hosts/harmony/plugins/load/load_template_workfile.py b/openpype/hosts/harmony/plugins/load/load_template_workfile.py
index c21b8194b1..2b84a43b35 100644
--- a/openpype/hosts/harmony/plugins/load/load_template_workfile.py
+++ b/openpype/hosts/harmony/plugins/load/load_template_workfile.py
@@ -3,11 +3,14 @@ import zipfile
import os
import shutil
-from avalon import api
+from openpype.pipeline import (
+ load,
+ get_representation_path,
+)
import openpype.hosts.harmony.api as harmony
-class ImportTemplateLoader(api.Loader):
+class ImportTemplateLoader(load.LoaderPlugin):
"""Import templates."""
families = ["harmony.template", "workfile"]
@@ -17,7 +20,7 @@ class ImportTemplateLoader(api.Loader):
def load(self, context, name=None, namespace=None, data=None):
# Import template.
temp_dir = tempfile.mkdtemp()
- zip_file = api.get_representation_path(context["representation"])
+ zip_file = get_representation_path(context["representation"])
template_path = os.path.join(temp_dir, "temp.tpl")
with zipfile.ZipFile(zip_file, "r") as zip_ref:
zip_ref.extractall(template_path)
diff --git a/openpype/hosts/harmony/plugins/publish/collect_farm_render.py b/openpype/hosts/harmony/plugins/publish/collect_farm_render.py
index 85237094e4..35b123f97d 100644
--- a/openpype/hosts/harmony/plugins/publish/collect_farm_render.py
+++ b/openpype/hosts/harmony/plugins/publish/collect_farm_render.py
@@ -5,6 +5,7 @@ from pathlib import Path
import attr
from avalon import api
+from openpype.lib import get_formatted_current_time
import openpype.lib.abstract_collect_render
import openpype.hosts.harmony.api as harmony
from openpype.lib.abstract_collect_render import RenderInstance
@@ -138,7 +139,7 @@ class CollectFarmRender(openpype.lib.abstract_collect_render.
render_instance = HarmonyRenderInstance(
version=version,
- time=api.time(),
+ time=get_formatted_current_time(),
source=context.data["currentFile"],
label=node.split("/")[1],
subset=subset_name,
diff --git a/openpype/hosts/harmony/plugins/publish/help/validate_audio.xml b/openpype/hosts/harmony/plugins/publish/help/validate_audio.xml
new file mode 100644
index 0000000000..e9a183c675
--- /dev/null
+++ b/openpype/hosts/harmony/plugins/publish/help/validate_audio.xml
@@ -0,0 +1,15 @@
+
+
+
+Missing audio file
+
+## Cannot locate linked audio file
+
+Audio file at {audio_url} cannot be found.
+
+### How to repair?
+
+Copy audio file to the highlighted location or remove audio link in the workfile.
+
+
+
\ No newline at end of file
diff --git a/openpype/hosts/harmony/plugins/publish/help/validate_instances.xml b/openpype/hosts/harmony/plugins/publish/help/validate_instances.xml
new file mode 100644
index 0000000000..3b040e8ea8
--- /dev/null
+++ b/openpype/hosts/harmony/plugins/publish/help/validate_instances.xml
@@ -0,0 +1,25 @@
+
+
+
+Subset context
+
+## Invalid subset context
+
+Asset name found '{found}' in subsets, expected '{expected}'.
+
+### How to repair?
+
+You can fix this with `Repair` button on the right. This will use '{expected}' asset name and overwrite '{found}' asset name in scene metadata.
+
+After that restart `Publish` with a `Reload button`.
+
+If this is unwanted, close workfile and open again, that way different asset value would be used for context information.
+
+
+### __Detailed Info__ (optional)
+
+This might happen if you are reuse old workfile and open it in different context.
+(Eg. you created subset "renderCompositingDefault" from asset "Robot' in "your_project_Robot_compositing.aep", now you opened this workfile in a context "Sloth" but existing subset for "Robot" asset stayed in the workfile.)
+
+
+
\ No newline at end of file
diff --git a/openpype/hosts/harmony/plugins/publish/help/validate_scene_settings.xml b/openpype/hosts/harmony/plugins/publish/help/validate_scene_settings.xml
new file mode 100644
index 0000000000..36fa90456e
--- /dev/null
+++ b/openpype/hosts/harmony/plugins/publish/help/validate_scene_settings.xml
@@ -0,0 +1,35 @@
+
+
+
+Scene setting
+
+## Invalid scene setting found
+
+One of the settings in a scene doesn't match to asset settings in database.
+
+{invalid_setting_str}
+
+### How to repair?
+
+Change values for {invalid_keys_str} in the scene OR change them in the asset database if they are wrong there.
+
+
+### __Detailed Info__ (optional)
+
+This error is shown when for example resolution in the scene doesn't match to resolution set on the asset in the database.
+Either value in the database or in the scene is wrong.
+
+
+
+Scene file doesn't exist
+
+## Scene file doesn't exist
+
+Collected scene {scene_url} doesn't exist.
+
+### How to repair?
+
+Re-save file, start publish from the beginning again.
+
+
+
\ No newline at end of file
diff --git a/openpype/hosts/harmony/plugins/publish/validate_audio.py b/openpype/hosts/harmony/plugins/publish/validate_audio.py
index d183dadb91..cb6b2307cd 100644
--- a/openpype/hosts/harmony/plugins/publish/validate_audio.py
+++ b/openpype/hosts/harmony/plugins/publish/validate_audio.py
@@ -4,6 +4,8 @@ import pyblish.api
import openpype.hosts.harmony.api as harmony
+from openpype.pipeline import PublishXmlValidationError
+
class ValidateAudio(pyblish.api.InstancePlugin):
"""Ensures that there is an audio file in the scene.
@@ -42,4 +44,9 @@ class ValidateAudio(pyblish.api.InstancePlugin):
msg = "You are missing audio file:\n{}".format(audio_path)
- assert os.path.isfile(audio_path), msg
+ formatting_data = {
+ "audio_url": audio_path
+ }
+ if os.path.isfile(audio_path):
+ raise PublishXmlValidationError(self, msg,
+ formatting_data=formatting_data)
diff --git a/openpype/hosts/harmony/plugins/publish/validate_instances.py b/openpype/hosts/harmony/plugins/publish/validate_instances.py
index 1ba65573fc..373ef94cc3 100644
--- a/openpype/hosts/harmony/plugins/publish/validate_instances.py
+++ b/openpype/hosts/harmony/plugins/publish/validate_instances.py
@@ -2,6 +2,7 @@ import os
import pyblish.api
import openpype.api
+from openpype.pipeline import PublishXmlValidationError
import openpype.hosts.harmony.api as harmony
@@ -45,4 +46,11 @@ class ValidateInstance(pyblish.api.InstancePlugin):
"Instance asset is not the same as current asset:"
f"\nInstance: {instance_asset}\nCurrent: {current_asset}"
)
- assert instance_asset == current_asset, msg
+
+ formatting_data = {
+ "found": instance_asset,
+ "expected": current_asset
+ }
+ if instance_asset != current_asset:
+ raise PublishXmlValidationError(self, msg,
+ formatting_data=formatting_data)
diff --git a/openpype/hosts/harmony/plugins/publish/validate_scene_settings.py b/openpype/hosts/harmony/plugins/publish/validate_scene_settings.py
index efd9bbb212..4c3a6c4465 100644
--- a/openpype/hosts/harmony/plugins/publish/validate_scene_settings.py
+++ b/openpype/hosts/harmony/plugins/publish/validate_scene_settings.py
@@ -7,7 +7,7 @@ import re
import pyblish.api
import openpype.hosts.harmony.api as harmony
-import openpype.hosts.harmony
+from openpype.pipeline import PublishXmlValidationError
class ValidateSceneSettingsRepair(pyblish.api.Action):
@@ -19,12 +19,12 @@ class ValidateSceneSettingsRepair(pyblish.api.Action):
def process(self, context, plugin):
"""Repair action entry point."""
- expected = openpype.hosts.harmony.api.get_asset_settings()
+ expected = harmony.get_asset_settings()
asset_settings = _update_frames(dict.copy(expected))
asset_settings["frameStart"] = 1
asset_settings["frameEnd"] = asset_settings["frameEnd"] + \
asset_settings["handleEnd"]
- openpype.hosts.harmony.api.set_scene_settings(asset_settings)
+ harmony.set_scene_settings(asset_settings)
if not os.path.exists(context.data["scenePath"]):
self.log.info("correcting scene name")
scene_dir = os.path.dirname(context.data["currentFile"])
@@ -55,7 +55,7 @@ class ValidateSceneSettings(pyblish.api.InstancePlugin):
def process(self, instance):
"""Plugin entry point."""
- expected_settings = openpype.hosts.harmony.api.get_asset_settings()
+ expected_settings = harmony.get_asset_settings()
self.log.info("scene settings from DB:".format(expected_settings))
expected_settings = _update_frames(dict.copy(expected_settings))
@@ -102,13 +102,13 @@ class ValidateSceneSettings(pyblish.api.InstancePlugin):
self.log.debug("current scene settings {}".format(current_settings))
invalid_settings = []
+ invalid_keys = set()
for key, value in expected_settings.items():
if value != current_settings[key]:
- invalid_settings.append({
- "name": key,
- "expected": value,
- "current": current_settings[key]
- })
+ invalid_settings.append(
+ "{} expected: {} found: {}".format(key, value,
+ current_settings[key]))
+ invalid_keys.add(key)
if ((expected_settings["handleStart"]
or expected_settings["handleEnd"])
@@ -120,10 +120,30 @@ class ValidateSceneSettings(pyblish.api.InstancePlugin):
msg = "Found invalid settings:\n{}".format(
json.dumps(invalid_settings, sort_keys=True, indent=4)
)
- assert not invalid_settings, msg
- assert os.path.exists(instance.context.data.get("scenePath")), (
- "Scene file not found (saved under wrong name)"
- )
+
+ if invalid_settings:
+ invalid_keys_str = ",".join(invalid_keys)
+ break_str = " "
+ invalid_setting_str = "Found invalid settings: {}".\
+ format(break_str.join(invalid_settings))
+
+ formatting_data = {
+ "invalid_setting_str": invalid_setting_str,
+ "invalid_keys_str": invalid_keys_str
+ }
+ raise PublishXmlValidationError(self, msg,
+ formatting_data=formatting_data)
+
+ scene_url = instance.context.data.get("scenePath")
+ if not os.path.exists(scene_url):
+ msg = "Scene file {} not found (saved under wrong name)".format(
+ scene_url
+ )
+ formatting_data = {
+ "scene_url": scene_url
+ }
+ raise PublishXmlValidationError(self, msg, key="file_not_found",
+ formatting_data=formatting_data)
def _update_frames(expected_settings):
diff --git a/openpype/hosts/hiero/api/events.py b/openpype/hosts/hiero/api/events.py
index 7563503593..9439199933 100644
--- a/openpype/hosts/hiero/api/events.py
+++ b/openpype/hosts/hiero/api/events.py
@@ -1,12 +1,12 @@
import os
import hiero.core.events
-import avalon.api as avalon
from openpype.api import Logger
from .lib import (
sync_avalon_data_to_workfile,
launch_workfiles_app,
selection_changed_timeline,
- before_project_save
+ before_project_save,
+ register_event_callback
)
from .tags import add_tags_to_workfile
from .menu import update_menu_task_label
@@ -126,5 +126,5 @@ def register_events():
"""
# if task changed then change notext of hiero
- avalon.on("taskChanged", update_menu_task_label)
+ register_event_callback("taskChanged", update_menu_task_label)
log.info("Installed event callback for 'taskChanged'..")
diff --git a/openpype/hosts/hiero/api/menu.py b/openpype/hosts/hiero/api/menu.py
index 306bef87ca..de20b86f30 100644
--- a/openpype/hosts/hiero/api/menu.py
+++ b/openpype/hosts/hiero/api/menu.py
@@ -14,7 +14,7 @@ self = sys.modules[__name__]
self._change_context_menu = None
-def update_menu_task_label(*args):
+def update_menu_task_label():
"""Update the task label in Avalon menu to current session"""
object_name = self._change_context_menu
diff --git a/openpype/hosts/hiero/api/pipeline.py b/openpype/hosts/hiero/api/pipeline.py
index cbcaf23994..eff126c0b6 100644
--- a/openpype/hosts/hiero/api/pipeline.py
+++ b/openpype/hosts/hiero/api/pipeline.py
@@ -9,6 +9,11 @@ from avalon import api as avalon
from avalon import schema
from pyblish import api as pyblish
from openpype.api import Logger
+from openpype.pipeline import (
+ LegacyCreator,
+ register_loader_plugin_path,
+ deregister_loader_plugin_path,
+)
from openpype.tools.utils import host_tools
from . import lib, menu, events
@@ -44,8 +49,8 @@ def install():
log.info("Registering Hiero plug-ins..")
pyblish.register_host("hiero")
pyblish.register_plugin_path(PUBLISH_PATH)
- avalon.register_plugin_path(avalon.Loader, LOAD_PATH)
- avalon.register_plugin_path(avalon.Creator, CREATE_PATH)
+ register_loader_plugin_path(LOAD_PATH)
+ avalon.register_plugin_path(LegacyCreator, CREATE_PATH)
avalon.register_plugin_path(avalon.InventoryAction, INVENTORY_PATH)
# register callback for switching publishable
@@ -66,8 +71,8 @@ def uninstall():
log.info("Deregistering Hiero plug-ins..")
pyblish.deregister_host("hiero")
pyblish.deregister_plugin_path(PUBLISH_PATH)
- avalon.deregister_plugin_path(avalon.Loader, LOAD_PATH)
- avalon.deregister_plugin_path(avalon.Creator, CREATE_PATH)
+ deregister_loader_plugin_path(LOAD_PATH)
+ avalon.deregister_plugin_path(LegacyCreator, CREATE_PATH)
# register callback for switching publishable
pyblish.deregister_callback("instanceToggled", on_pyblish_instance_toggled)
diff --git a/openpype/hosts/hiero/api/plugin.py b/openpype/hosts/hiero/api/plugin.py
index 3506af2d6a..54e66bf99a 100644
--- a/openpype/hosts/hiero/api/plugin.py
+++ b/openpype/hosts/hiero/api/plugin.py
@@ -1,13 +1,16 @@
-import re
import os
-import hiero
-from Qt import QtWidgets, QtCore
-from avalon.vendor import qargparse
-import avalon.api as avalon
-import openpype.api as openpype
-from . import lib
+import re
from copy import deepcopy
+import hiero
+
+from Qt import QtWidgets, QtCore
+import qargparse
+
+import openpype.api as openpype
+from openpype.pipeline import LoaderPlugin, LegacyCreator
+from . import lib
+
log = openpype.Logger().get_logger(__name__)
@@ -303,7 +306,7 @@ def get_reference_node_parents(ref):
return parents
-class SequenceLoader(avalon.Loader):
+class SequenceLoader(LoaderPlugin):
"""A basic SequenceLoader for Resolve
This will implement the basic behavior for a loader to inherit from that
@@ -589,7 +592,7 @@ class ClipLoader:
return track_item
-class Creator(openpype.Creator):
+class Creator(LegacyCreator):
"""Creator class wrapper
"""
clip_color = "Purple"
diff --git a/openpype/hosts/hiero/plugins/load/load_clip.py b/openpype/hosts/hiero/plugins/load/load_clip.py
index b905dd4431..d3908695a2 100644
--- a/openpype/hosts/hiero/plugins/load/load_clip.py
+++ b/openpype/hosts/hiero/plugins/load/load_clip.py
@@ -1,4 +1,5 @@
-from avalon import io, api
+from avalon import io
+from openpype.pipeline import get_representation_path
import openpype.hosts.hiero.api as phiero
# from openpype.hosts.hiero.api import plugin, lib
# reload(lib)
@@ -112,7 +113,7 @@ class LoadClip(phiero.SequenceLoader):
version_name = version.get("name", None)
colorspace = version_data.get("colorspace", None)
object_name = "{}_{}".format(name, namespace)
- file = api.get_representation_path(representation).replace("\\", "/")
+ file = get_representation_path(representation).replace("\\", "/")
clip = track_item.source()
# reconnect media to new path
diff --git a/openpype/hosts/houdini/api/__init__.py b/openpype/hosts/houdini/api/__init__.py
index e1500aa5f5..fddf7ab98d 100644
--- a/openpype/hosts/houdini/api/__init__.py
+++ b/openpype/hosts/houdini/api/__init__.py
@@ -24,8 +24,7 @@ from .lib import (
lsattrs,
read,
- maintained_selection,
- unique_name
+ maintained_selection
)
@@ -51,8 +50,7 @@ __all__ = [
"lsattrs",
"read",
- "maintained_selection",
- "unique_name"
+ "maintained_selection"
]
# Backwards API compatibility
diff --git a/openpype/hosts/houdini/api/lib.py b/openpype/hosts/houdini/api/lib.py
index 72f1c8e71f..bd41618856 100644
--- a/openpype/hosts/houdini/api/lib.py
+++ b/openpype/hosts/houdini/api/lib.py
@@ -99,65 +99,6 @@ def get_id_required_nodes():
return list(nodes)
-def get_additional_data(container):
- """Not implemented yet!"""
- return container
-
-
-def set_parameter_callback(node, parameter, language, callback):
- """Link a callback to a parameter of a node
-
- Args:
- node(hou.Node): instance of the nodee
- parameter(str): name of the parameter
- language(str): name of the language, e.g.: python
- callback(str): command which needs to be triggered
-
- Returns:
- None
-
- """
-
- template_grp = node.parmTemplateGroup()
- template = template_grp.find(parameter)
- if not template:
- return
-
- script_language = (hou.scriptLanguage.Python if language == "python" else
- hou.scriptLanguage.Hscript)
-
- template.setScriptCallbackLanguage(script_language)
- template.setScriptCallback(callback)
-
- template.setTags({"script_callback": callback,
- "script_callback_language": language.lower()})
-
- # Replace the existing template with the adjusted one
- template_grp.replace(parameter, template)
-
- node.setParmTemplateGroup(template_grp)
-
-
-def set_parameter_callbacks(node, parameter_callbacks):
- """Set callbacks for multiple parameters of a node
-
- Args:
- node(hou.Node): instance of a hou.Node
- parameter_callbacks(dict): collection of parameter and callback data
- example: {"active" :
- {"language": "python",
- "callback": "print('hello world)'"}
- }
- Returns:
- None
- """
- for parameter, data in parameter_callbacks.items():
- language = data["language"]
- callback = data["callback"]
-
- set_parameter_callback(node, parameter, language, callback)
-
-
def get_output_parameter(node):
"""Return the render output parameter name of the given node
@@ -189,19 +130,6 @@ def get_output_parameter(node):
raise TypeError("Node type '%s' not supported" % node_type)
-@contextmanager
-def attribute_values(node, data):
-
- previous_attrs = {key: node.parm(key).eval() for key in data.keys()}
- try:
- node.setParms(data)
- yield
- except Exception as exc:
- pass
- finally:
- node.setParms(previous_attrs)
-
-
def set_scene_fps(fps):
hou.setFps(fps)
@@ -349,10 +277,6 @@ def render_rop(ropnode):
raise RuntimeError("Render failed: {0}".format(exc))
-def children_as_string(node):
- return [c.name() for c in node.children()]
-
-
def imprint(node, data):
"""Store attributes with value on a node
@@ -473,53 +397,6 @@ def read(node):
parameter in node.spareParms()}
-def unique_name(name, format="%03d", namespace="", prefix="", suffix="",
- separator="_"):
- """Return unique `name`
-
- The function takes into consideration an optional `namespace`
- and `suffix`. The suffix is included in evaluating whether a
- name exists - such as `name` + "_GRP" - but isn't included
- in the returned value.
-
- If a namespace is provided, only names within that namespace
- are considered when evaluating whether the name is unique.
-
- Arguments:
- format (str, optional): The `name` is given a number, this determines
- how this number is formatted. Defaults to a padding of 2.
- E.g. my_name01, my_name02.
- namespace (str, optional): Only consider names within this namespace.
- suffix (str, optional): Only consider names with this suffix.
-
- Example:
- >>> name = hou.node("/obj").createNode("geo", name="MyName")
- >>> assert hou.node("/obj/MyName")
- True
- >>> unique = unique_name(name)
- >>> assert hou.node("/obj/{}".format(unique))
- False
-
- """
-
- iteration = 1
-
- parts = [prefix, name, format % iteration, suffix]
- if namespace:
- parts.insert(0, namespace)
-
- unique = separator.join(parts)
- children = children_as_string(hou.node("/obj"))
- while unique in children:
- iteration += 1
- unique = separator.join(parts)
-
- if suffix:
- return unique[:-len(suffix)]
-
- return unique
-
-
@contextmanager
def maintained_selection():
"""Maintain selection during context
@@ -542,3 +419,37 @@ def maintained_selection():
if previous_selection:
for node in previous_selection:
node.setSelected(on=True)
+
+
+def reset_framerange():
+ """Set frame range to current asset"""
+
+ asset_name = api.Session["AVALON_ASSET"]
+ asset = io.find_one({"name": asset_name, "type": "asset"})
+
+ frame_start = asset["data"].get("frameStart")
+ frame_end = asset["data"].get("frameEnd")
+ # Backwards compatibility
+ if frame_start is None or frame_end is None:
+ frame_start = asset["data"].get("edit_in")
+ frame_end = asset["data"].get("edit_out")
+
+ if frame_start is None or frame_end is None:
+ log.warning("No edit information found for %s" % asset_name)
+ return
+
+ handles = asset["data"].get("handles") or 0
+ handle_start = asset["data"].get("handleStart")
+ if handle_start is None:
+ handle_start = handles
+
+ handle_end = asset["data"].get("handleEnd")
+ if handle_end is None:
+ handle_end = handles
+
+ frame_start -= int(handle_start)
+ frame_end += int(handle_end)
+
+ hou.playbar.setFrameRange(frame_start, frame_end)
+ hou.playbar.setPlaybackRange(frame_start, frame_end)
+ hou.setFrame(frame_start)
diff --git a/openpype/hosts/houdini/api/pipeline.py b/openpype/hosts/houdini/api/pipeline.py
index c3dbdc5ef5..7d4e58efb7 100644
--- a/openpype/hosts/houdini/api/pipeline.py
+++ b/openpype/hosts/houdini/api/pipeline.py
@@ -4,17 +4,24 @@ import logging
import contextlib
import hou
+import hdefereval
import pyblish.api
import avalon.api
from avalon.pipeline import AVALON_CONTAINER_ID
from avalon.lib import find_submodule
+from openpype.pipeline import (
+ LegacyCreator,
+ register_loader_plugin_path,
+)
import openpype.hosts.houdini
from openpype.hosts.houdini.api import lib
from openpype.lib import (
- any_outdated
+ register_event_callback,
+ emit_event,
+ any_outdated,
)
from .lib import get_asset_fps
@@ -46,15 +53,15 @@ def install():
pyblish.api.register_host("hpython")
pyblish.api.register_plugin_path(PUBLISH_PATH)
- avalon.api.register_plugin_path(avalon.api.Loader, LOAD_PATH)
- avalon.api.register_plugin_path(avalon.api.Creator, CREATE_PATH)
+ register_loader_plugin_path(LOAD_PATH)
+ avalon.api.register_plugin_path(LegacyCreator, CREATE_PATH)
log.info("Installing callbacks ... ")
- # avalon.on("init", on_init)
- avalon.api.before("save", before_save)
- avalon.api.on("save", on_save)
- avalon.api.on("open", on_open)
- avalon.api.on("new", on_new)
+ # register_event_callback("init", on_init)
+ register_event_callback("before.save", before_save)
+ register_event_callback("save", on_save)
+ register_event_callback("open", on_open)
+ register_event_callback("new", on_new)
pyblish.api.register_callback(
"instanceToggled", on_pyblish_instance_toggled
@@ -66,9 +73,10 @@ def install():
sys.path.append(hou_pythonpath)
- # Set asset FPS for the empty scene directly after launch of Houdini
- # so it initializes into the correct scene FPS
- _set_asset_fps()
+ # Set asset settings for the empty scene directly after launch of Houdini
+ # so it initializes into the correct scene FPS, Frame Range, etc.
+ # todo: make sure this doesn't trigger when opening with last workfile
+ _set_context_settings()
def uninstall():
@@ -99,13 +107,13 @@ def _register_callbacks():
def on_file_event_callback(event):
if event == hou.hipFileEventType.AfterLoad:
- avalon.api.emit("open", [event])
+ emit_event("open")
elif event == hou.hipFileEventType.AfterSave:
- avalon.api.emit("save", [event])
+ emit_event("save")
elif event == hou.hipFileEventType.BeforeSave:
- avalon.api.emit("before_save", [event])
+ emit_event("before.save")
elif event == hou.hipFileEventType.AfterClear:
- avalon.api.emit("new", [event])
+ emit_event("new")
def get_main_window():
@@ -227,11 +235,11 @@ def ls():
yield data
-def before_save(*args):
+def before_save():
return lib.validate_fps()
-def on_save(*args):
+def on_save():
log.info("Running callback on save..")
@@ -240,7 +248,7 @@ def on_save(*args):
lib.set_id(node, new_id, overwrite=False)
-def on_open(*args):
+def on_open():
if not hou.isUIAvailable():
log.debug("Batch mode detected, ignoring `on_open` callbacks..")
@@ -277,20 +285,51 @@ def on_open(*args):
dialog.show()
-def on_new(_):
+def on_new():
"""Set project resolution and fps when create a new file"""
+
+ if hou.hipFile.isLoadingHipFile():
+ # This event also triggers when Houdini opens a file due to the
+ # new event being registered to 'afterClear'. As such we can skip
+ # 'new' logic if the user is opening a file anyway
+ log.debug("Skipping on new callback due to scene being opened.")
+ return
+
log.info("Running callback on new..")
- _set_asset_fps()
+ _set_context_settings()
+
+ # It seems that the current frame always gets reset to frame 1 on
+ # new scene. So we enforce current frame to be at the start of the playbar
+ # with execute deferred
+ def _enforce_start_frame():
+ start = hou.playbar.playbackRange()[0]
+ hou.setFrame(start)
+
+ hdefereval.executeDeferred(_enforce_start_frame)
-def _set_asset_fps():
- """Set Houdini scene FPS to the default required for current asset"""
+def _set_context_settings():
+ """Apply the project settings from the project definition
+
+ Settings can be overwritten by an asset if the asset.data contains
+ any information regarding those settings.
+
+ Examples of settings:
+ fps
+ resolution
+ renderer
+
+ Returns:
+ None
+ """
# Set new scene fps
fps = get_asset_fps()
print("Setting scene FPS to %i" % fps)
lib.set_scene_fps(fps)
+ lib.reset_framerange()
+
def on_pyblish_instance_toggled(instance, new_value, old_value):
"""Toggle saver tool passthrough states on instance toggles."""
diff --git a/openpype/hosts/houdini/api/plugin.py b/openpype/hosts/houdini/api/plugin.py
index 4967d01d43..2bbb65aa05 100644
--- a/openpype/hosts/houdini/api/plugin.py
+++ b/openpype/hosts/houdini/api/plugin.py
@@ -2,11 +2,12 @@
"""Houdini specific Avalon/Pyblish plugin definitions."""
import sys
import six
-import avalon.api
-from avalon.api import CreatorError
import hou
-from openpype.api import PypeCreatorMixin
+from openpype.pipeline import (
+ CreatorError,
+ LegacyCreator
+)
from .lib import imprint
@@ -14,7 +15,7 @@ class OpenPypeCreatorError(CreatorError):
pass
-class Creator(PypeCreatorMixin, avalon.api.Creator):
+class Creator(LegacyCreator):
"""Creator plugin to create instances in Houdini
To support the wide range of node types for render output (Alembic, VDB,
diff --git a/openpype/hosts/houdini/plugins/load/actions.py b/openpype/hosts/houdini/plugins/load/actions.py
index 6e9410ff58..63d74c39a5 100644
--- a/openpype/hosts/houdini/plugins/load/actions.py
+++ b/openpype/hosts/houdini/plugins/load/actions.py
@@ -2,10 +2,10 @@
"""
-from avalon import api
+from openpype.pipeline import load
-class SetFrameRangeLoader(api.Loader):
+class SetFrameRangeLoader(load.LoaderPlugin):
"""Set Houdini frame range"""
families = [
@@ -29,8 +29,8 @@ class SetFrameRangeLoader(api.Loader):
version = context["version"]
version_data = version.get("data", {})
- start = version_data.get("startFrame", None)
- end = version_data.get("endFrame", None)
+ start = version_data.get("frameStart", None)
+ end = version_data.get("frameEnd", None)
if start is None or end is None:
print(
@@ -43,7 +43,7 @@ class SetFrameRangeLoader(api.Loader):
hou.playbar.setPlaybackRange(start, end)
-class SetFrameRangeWithHandlesLoader(api.Loader):
+class SetFrameRangeWithHandlesLoader(load.LoaderPlugin):
"""Set Maya frame range including pre- and post-handles"""
families = [
@@ -67,8 +67,8 @@ class SetFrameRangeWithHandlesLoader(api.Loader):
version = context["version"]
version_data = version.get("data", {})
- start = version_data.get("startFrame", None)
- end = version_data.get("endFrame", None)
+ start = version_data.get("frameStart", None)
+ end = version_data.get("frameEnd", None)
if start is None or end is None:
print(
@@ -78,9 +78,8 @@ class SetFrameRangeWithHandlesLoader(api.Loader):
return
# Include handles
- handles = version_data.get("handles", 0)
- start -= handles
- end += handles
+ start -= version_data.get("handleStart", 0)
+ end += version_data.get("handleEnd", 0)
hou.playbar.setFrameRange(start, end)
hou.playbar.setPlaybackRange(start, end)
diff --git a/openpype/hosts/houdini/plugins/load/load_alembic.py b/openpype/hosts/houdini/plugins/load/load_alembic.py
index eaab81f396..0214229d5a 100644
--- a/openpype/hosts/houdini/plugins/load/load_alembic.py
+++ b/openpype/hosts/houdini/plugins/load/load_alembic.py
@@ -1,10 +1,12 @@
import os
-from avalon import api
-
+from openpype.pipeline import (
+ load,
+ get_representation_path,
+)
from openpype.hosts.houdini.api import pipeline
-class AbcLoader(api.Loader):
+class AbcLoader(load.LoaderPlugin):
"""Specific loader of Alembic for the avalon.animation family"""
families = ["model", "animation", "pointcache", "gpuCache"]
@@ -90,7 +92,7 @@ class AbcLoader(api.Loader):
return
# Update the file path
- file_path = api.get_representation_path(representation)
+ file_path = get_representation_path(representation)
file_path = file_path.replace("\\", "/")
alembic_node.setParms({"fileName": file_path})
diff --git a/openpype/hosts/houdini/plugins/load/load_ass.py b/openpype/hosts/houdini/plugins/load/load_ass.py
index 8c272044ec..0144bbaefd 100644
--- a/openpype/hosts/houdini/plugins/load/load_ass.py
+++ b/openpype/hosts/houdini/plugins/load/load_ass.py
@@ -1,11 +1,15 @@
import os
-from avalon import api
-from avalon.houdini import pipeline
import clique
+from openpype.pipeline import (
+ load,
+ get_representation_path,
+)
+
+from openpype.hosts.houdini.api import pipeline
-class AssLoader(api.Loader):
+class AssLoader(load.LoaderPlugin):
"""Load .ass with Arnold Procedural"""
families = ["ass"]
@@ -88,7 +92,7 @@ class AssLoader(api.Loader):
def update(self, container, representation):
# Update the file path
- file_path = api.get_representation_path(representation)
+ file_path = get_representation_path(representation)
file_path = file_path.replace("\\", "/")
procedural = container["node"]
diff --git a/openpype/hosts/houdini/plugins/load/load_camera.py b/openpype/hosts/houdini/plugins/load/load_camera.py
index 8916d3b9b7..ef57d115da 100644
--- a/openpype/hosts/houdini/plugins/load/load_camera.py
+++ b/openpype/hosts/houdini/plugins/load/load_camera.py
@@ -1,4 +1,7 @@
-from avalon import api
+from openpype.pipeline import (
+ load,
+ get_representation_path,
+)
from openpype.hosts.houdini.api import pipeline
@@ -74,7 +77,7 @@ def transfer_non_default_values(src, dest, ignore=None):
dest_parm.setFromParm(parm)
-class CameraLoader(api.Loader):
+class CameraLoader(load.LoaderPlugin):
"""Specific loader of Alembic for the avalon.animation family"""
families = ["camera"]
@@ -129,7 +132,7 @@ class CameraLoader(api.Loader):
node = container["node"]
# Update the file path
- file_path = api.get_representation_path(representation)
+ file_path = get_representation_path(representation)
file_path = file_path.replace("\\", "/")
# Update attributes
diff --git a/openpype/hosts/houdini/plugins/load/load_hda.py b/openpype/hosts/houdini/plugins/load/load_hda.py
index f5f2fb7481..2438570c6e 100644
--- a/openpype/hosts/houdini/plugins/load/load_hda.py
+++ b/openpype/hosts/houdini/plugins/load/load_hda.py
@@ -1,10 +1,13 @@
# -*- coding: utf-8 -*-
-from avalon import api
-
+import os
+from openpype.pipeline import (
+ load,
+ get_representation_path,
+)
from openpype.hosts.houdini.api import pipeline
-class HdaLoader(api.Loader):
+class HdaLoader(load.LoaderPlugin):
"""Load Houdini Digital Asset file."""
families = ["hda"]
@@ -15,7 +18,6 @@ class HdaLoader(api.Loader):
color = "orange"
def load(self, context, name=None, namespace=None, data=None):
- import os
import hou
# Format file name, Houdini only wants forward slashes
@@ -49,7 +51,7 @@ class HdaLoader(api.Loader):
import hou
hda_node = container["node"]
- file_path = api.get_representation_path(representation)
+ file_path = get_representation_path(representation)
file_path = file_path.replace("\\", "/")
hou.hda.installFile(file_path)
defs = hda_node.type().allInstalledDefinitions()
diff --git a/openpype/hosts/houdini/plugins/load/load_image.py b/openpype/hosts/houdini/plugins/load/load_image.py
index 39f583677b..bd9ea3eee3 100644
--- a/openpype/hosts/houdini/plugins/load/load_image.py
+++ b/openpype/hosts/houdini/plugins/load/load_image.py
@@ -1,6 +1,9 @@
import os
-from avalon import api
+from openpype.pipeline import (
+ load,
+ get_representation_path,
+)
from openpype.hosts.houdini.api import lib, pipeline
import hou
@@ -37,7 +40,7 @@ def get_image_avalon_container():
return image_container
-class ImageLoader(api.Loader):
+class ImageLoader(load.LoaderPlugin):
"""Specific loader of Alembic for the avalon.animation family"""
families = ["colorbleed.imagesequence"]
@@ -87,7 +90,7 @@ class ImageLoader(api.Loader):
node = container["node"]
# Update the file path
- file_path = api.get_representation_path(representation)
+ file_path = get_representation_path(representation)
file_path = file_path.replace("\\", "/")
file_path = self._get_file_sequence(file_path)
diff --git a/openpype/hosts/houdini/plugins/load/load_usd_layer.py b/openpype/hosts/houdini/plugins/load/load_usd_layer.py
index 0d4378b480..d803e6abfe 100644
--- a/openpype/hosts/houdini/plugins/load/load_usd_layer.py
+++ b/openpype/hosts/houdini/plugins/load/load_usd_layer.py
@@ -1,8 +1,11 @@
-from avalon import api
+from openpype.pipeline import (
+ load,
+ get_representation_path,
+)
from openpype.hosts.houdini.api import lib, pipeline
-class USDSublayerLoader(api.Loader):
+class USDSublayerLoader(load.LoaderPlugin):
"""Sublayer USD file in Solaris"""
families = [
@@ -57,7 +60,7 @@ class USDSublayerLoader(api.Loader):
node = container["node"]
# Update the file path
- file_path = api.get_representation_path(representation)
+ file_path = get_representation_path(representation)
file_path = file_path.replace("\\", "/")
# Update attributes
diff --git a/openpype/hosts/houdini/plugins/load/load_usd_reference.py b/openpype/hosts/houdini/plugins/load/load_usd_reference.py
index 0edd8d9af6..fdb443f4cf 100644
--- a/openpype/hosts/houdini/plugins/load/load_usd_reference.py
+++ b/openpype/hosts/houdini/plugins/load/load_usd_reference.py
@@ -1,8 +1,11 @@
-from avalon import api
+from openpype.pipeline import (
+ load,
+ get_representation_path,
+)
from openpype.hosts.houdini.api import lib, pipeline
-class USDReferenceLoader(api.Loader):
+class USDReferenceLoader(load.LoaderPlugin):
"""Reference USD file in Solaris"""
families = [
@@ -57,7 +60,7 @@ class USDReferenceLoader(api.Loader):
node = container["node"]
# Update the file path
- file_path = api.get_representation_path(representation)
+ file_path = get_representation_path(representation)
file_path = file_path.replace("\\", "/")
# Update attributes
diff --git a/openpype/hosts/houdini/plugins/load/load_vdb.py b/openpype/hosts/houdini/plugins/load/load_vdb.py
index 40aa7a1d18..06bb9e45e4 100644
--- a/openpype/hosts/houdini/plugins/load/load_vdb.py
+++ b/openpype/hosts/houdini/plugins/load/load_vdb.py
@@ -1,11 +1,14 @@
import os
import re
-from avalon import api
+from openpype.pipeline import (
+ load,
+ get_representation_path,
+)
from openpype.hosts.houdini.api import pipeline
-class VdbLoader(api.Loader):
+class VdbLoader(load.LoaderPlugin):
"""Specific loader of Alembic for the avalon.animation family"""
families = ["vdbcache"]
@@ -96,7 +99,7 @@ class VdbLoader(api.Loader):
return
# Update the file path
- file_path = api.get_representation_path(representation)
+ file_path = get_representation_path(representation)
file_path = self.format_path(file_path)
file_node.setParms({"fileName": file_path})
diff --git a/openpype/hosts/houdini/plugins/load/show_usdview.py b/openpype/hosts/houdini/plugins/load/show_usdview.py
index f23974094e..8066615181 100644
--- a/openpype/hosts/houdini/plugins/load/show_usdview.py
+++ b/openpype/hosts/houdini/plugins/load/show_usdview.py
@@ -1,7 +1,7 @@
-from avalon import api
+from openpype.pipeline import load
-class ShowInUsdview(api.Loader):
+class ShowInUsdview(load.LoaderPlugin):
"""Open USD file in usdview"""
families = ["colorbleed.usd"]
diff --git a/openpype/hosts/houdini/plugins/publish/extract_usd_layered.py b/openpype/hosts/houdini/plugins/publish/extract_usd_layered.py
index 645bd05d4b..3e842ae766 100644
--- a/openpype/hosts/houdini/plugins/publish/extract_usd_layered.py
+++ b/openpype/hosts/houdini/plugins/publish/extract_usd_layered.py
@@ -7,6 +7,7 @@ from collections import deque
import pyblish.api
import openpype.api
+from openpype.pipeline import get_representation_path
import openpype.hosts.houdini.api.usd as hou_usdlib
from openpype.hosts.houdini.api.lib import render_rop
@@ -308,7 +309,7 @@ class ExtractUSDLayered(openpype.api.Extractor):
self.log.debug("No existing representation..")
return False
- old_file = api.get_representation_path(representation)
+ old_file = get_representation_path(representation)
if not os.path.exists(old_file):
return False
diff --git a/openpype/hosts/houdini/plugins/publish/save_scene.py b/openpype/hosts/houdini/plugins/publish/save_scene.py
index 1b12efa603..fe5962fbd3 100644
--- a/openpype/hosts/houdini/plugins/publish/save_scene.py
+++ b/openpype/hosts/houdini/plugins/publish/save_scene.py
@@ -2,26 +2,14 @@ import pyblish.api
import avalon.api
-class SaveCurrentScene(pyblish.api.InstancePlugin):
+class SaveCurrentScene(pyblish.api.ContextPlugin):
"""Save current scene"""
label = "Save current file"
- order = pyblish.api.IntegratorOrder - 0.49
+ order = pyblish.api.ExtractorOrder - 0.49
hosts = ["houdini"]
- families = ["usdrender",
- "redshift_rop"]
- targets = ["local"]
- def process(self, instance):
-
- # This should be a ContextPlugin, but this is a workaround
- # for a bug in pyblish to run once for a family: issue #250
- context = instance.context
- key = "__hasRun{}".format(self.__class__.__name__)
- if context.data.get(key, False):
- return
- else:
- context.data[key] = True
+ def process(self, context):
# Filename must not have changed since collecting
host = avalon.api.registered_host()
diff --git a/openpype/hosts/houdini/plugins/publish/save_scene_deadline.py b/openpype/hosts/houdini/plugins/publish/save_scene_deadline.py
deleted file mode 100644
index a0efd0610c..0000000000
--- a/openpype/hosts/houdini/plugins/publish/save_scene_deadline.py
+++ /dev/null
@@ -1,23 +0,0 @@
-import pyblish.api
-
-
-class SaveCurrentSceneDeadline(pyblish.api.ContextPlugin):
- """Save current scene"""
-
- label = "Save current file"
- order = pyblish.api.IntegratorOrder - 0.49
- hosts = ["houdini"]
- targets = ["deadline"]
-
- def process(self, context):
- import hou
-
- assert (
- context.data["currentFile"] == hou.hipFile.path()
- ), "Collected filename from current scene name."
-
- if hou.hipFile.hasUnsavedChanges():
- self.log.info("Saving current file..")
- hou.hipFile.save(save_to_recent_files=True)
- else:
- self.log.debug("No unsaved changes, skipping file save..")
diff --git a/openpype/hosts/houdini/plugins/publish/validate_abc_primitive_to_detail.py b/openpype/hosts/houdini/plugins/publish/validate_abc_primitive_to_detail.py
index 8fe1b44b7a..3e17d3e8de 100644
--- a/openpype/hosts/houdini/plugins/publish/validate_abc_primitive_to_detail.py
+++ b/openpype/hosts/houdini/plugins/publish/validate_abc_primitive_to_detail.py
@@ -65,7 +65,7 @@ class ValidateAbcPrimitiveToDetail(pyblish.api.InstancePlugin):
cls.log.debug("Checking with path attribute: %s" % path_attr)
# Check if the primitive attribute exists
- frame = instance.data.get("startFrame", 0)
+ frame = instance.data.get("frameStart", 0)
geo = output.geometryAtFrame(frame)
# If there are no primitives on the start frame then it might be
diff --git a/openpype/hosts/houdini/plugins/publish/validate_alembic_input_node.py b/openpype/hosts/houdini/plugins/publish/validate_alembic_input_node.py
index 17c9da837a..8d7e3b611f 100644
--- a/openpype/hosts/houdini/plugins/publish/validate_alembic_input_node.py
+++ b/openpype/hosts/houdini/plugins/publish/validate_alembic_input_node.py
@@ -38,7 +38,7 @@ class ValidateAlembicInputNode(pyblish.api.InstancePlugin):
cls.log.warning("No geometry output node found, skipping check..")
return
- frame = instance.data.get("startFrame", 0)
+ frame = instance.data.get("frameStart", 0)
geo = node.geometryAtFrame(frame)
invalid = False
diff --git a/openpype/hosts/houdini/plugins/publish/validate_output_node.py b/openpype/hosts/houdini/plugins/publish/validate_output_node.py
deleted file mode 100644
index 0b60ab5c48..0000000000
--- a/openpype/hosts/houdini/plugins/publish/validate_output_node.py
+++ /dev/null
@@ -1,77 +0,0 @@
-import pyblish.api
-
-
-class ValidateOutputNode(pyblish.api.InstancePlugin):
- """Validate the instance SOP Output Node.
-
- This will ensure:
- - The SOP Path is set.
- - The SOP Path refers to an existing object.
- - The SOP Path node is a SOP node.
- - The SOP Path node has at least one input connection (has an input)
- - The SOP Path has geometry data.
-
- """
-
- order = pyblish.api.ValidatorOrder
- families = ["pointcache", "vdbcache"]
- hosts = ["houdini"]
- label = "Validate Output Node"
-
- def process(self, instance):
-
- invalid = self.get_invalid(instance)
- if invalid:
- raise RuntimeError(
- "Output node(s) `%s` are incorrect. "
- "See plug-in log for details." % invalid
- )
-
- @classmethod
- def get_invalid(cls, instance):
-
- import hou
-
- output_node = instance.data["output_node"]
-
- if output_node is None:
- node = instance[0]
- cls.log.error(
- "SOP Output node in '%s' does not exist. "
- "Ensure a valid SOP output path is set." % node.path()
- )
-
- return [node.path()]
-
- # Output node must be a Sop node.
- if not isinstance(output_node, hou.SopNode):
- cls.log.error(
- "Output node %s is not a SOP node. "
- "SOP Path must point to a SOP node, "
- "instead found category type: %s"
- % (output_node.path(), output_node.type().category().name())
- )
- return [output_node.path()]
-
- # For the sake of completeness also assert the category type
- # is Sop to avoid potential edge case scenarios even though
- # the isinstance check above should be stricter than this category
- assert output_node.type().category().name() == "Sop", (
- "Output node %s is not of category Sop. This is a bug.."
- % output_node.path()
- )
-
- # Check if output node has incoming connections
- if not output_node.inputConnections():
- cls.log.error(
- "Output node `%s` has no incoming connections"
- % output_node.path()
- )
- return [output_node.path()]
-
- # Ensure the output node has at least Geometry data
- if not output_node.geometry():
- cls.log.error(
- "Output node `%s` has no geometry data." % output_node.path()
- )
- return [output_node.path()]
diff --git a/openpype/hosts/houdini/plugins/publish/validate_primitive_hierarchy_paths.py b/openpype/hosts/houdini/plugins/publish/validate_primitive_hierarchy_paths.py
index 3c15532be8..1eb36763bb 100644
--- a/openpype/hosts/houdini/plugins/publish/validate_primitive_hierarchy_paths.py
+++ b/openpype/hosts/houdini/plugins/publish/validate_primitive_hierarchy_paths.py
@@ -51,7 +51,7 @@ class ValidatePrimitiveHierarchyPaths(pyblish.api.InstancePlugin):
cls.log.debug("Checking for attribute: %s" % path_attr)
# Check if the primitive attribute exists
- frame = instance.data.get("startFrame", 0)
+ frame = instance.data.get("frameStart", 0)
geo = output.geometryAtFrame(frame)
# If there are no primitives on the current frame then we can't
diff --git a/openpype/hosts/houdini/startup/MainMenuCommon.xml b/openpype/hosts/houdini/startup/MainMenuCommon.xml
index b8c7f93d76..abfa3f136e 100644
--- a/openpype/hosts/houdini/startup/MainMenuCommon.xml
+++ b/openpype/hosts/houdini/startup/MainMenuCommon.xml
@@ -66,6 +66,14 @@ host_tools.show_workfiles(parent)
]]>
+
+
+
+
+
diff --git a/openpype/hosts/houdini/startup/scripts/123.py b/openpype/hosts/houdini/startup/python2.7libs/pythonrc.py
similarity index 100%
rename from openpype/hosts/houdini/startup/scripts/123.py
rename to openpype/hosts/houdini/startup/python2.7libs/pythonrc.py
diff --git a/openpype/hosts/houdini/startup/scripts/houdinicore.py b/openpype/hosts/houdini/startup/python3.7libs/pythonrc.py
similarity index 100%
rename from openpype/hosts/houdini/startup/scripts/houdinicore.py
rename to openpype/hosts/houdini/startup/python3.7libs/pythonrc.py
diff --git a/openpype/hosts/maya/api/__init__.py b/openpype/hosts/maya/api/__init__.py
index 9ea798e927..5d76bf0f04 100644
--- a/openpype/hosts/maya/api/__init__.py
+++ b/openpype/hosts/maya/api/__init__.py
@@ -10,12 +10,6 @@ from .pipeline import (
ls,
containerise,
-
- lock,
- unlock,
- is_locked,
- lock_ignored,
-
)
from .plugin import (
Creator,
@@ -38,11 +32,9 @@ from .lib import (
read,
apply_shaders,
- without_extension,
maintained_selection,
suspended_refresh,
- unique_name,
unique_namespace,
)
@@ -54,11 +46,6 @@ __all__ = [
"ls",
"containerise",
- "lock",
- "unlock",
- "is_locked",
- "lock_ignored",
-
"Creator",
"Loader",
@@ -76,11 +63,9 @@ __all__ = [
"lsattrs",
"read",
- "unique_name",
"unique_namespace",
"apply_shaders",
- "without_extension",
"maintained_selection",
"suspended_refresh",
diff --git a/openpype/hosts/maya/api/commands.py b/openpype/hosts/maya/api/commands.py
index c774afcc12..a1e0be2cfe 100644
--- a/openpype/hosts/maya/api/commands.py
+++ b/openpype/hosts/maya/api/commands.py
@@ -37,17 +37,17 @@ class ToolWindows:
def edit_shader_definitions():
- from avalon.tools import lib
from Qt import QtWidgets
from openpype.hosts.maya.api.shader_definition_editor import (
ShaderDefinitionsEditor
)
+ from openpype.tools.utils import qt_app_context
top_level_widgets = QtWidgets.QApplication.topLevelWidgets()
main_window = next(widget for widget in top_level_widgets
if widget.objectName() == "MayaWindow")
- with lib.application():
+ with qt_app_context():
window = ToolWindows.get_window("shader_definition_editor")
if not window:
window = ShaderDefinitionsEditor(parent=main_window)
diff --git a/openpype/hosts/maya/api/customize.py b/openpype/hosts/maya/api/customize.py
index 37fd543315..683e6b24b0 100644
--- a/openpype/hosts/maya/api/customize.py
+++ b/openpype/hosts/maya/api/customize.py
@@ -5,7 +5,7 @@ import logging
from functools import partial
-import maya.cmds as mc
+import maya.cmds as cmds
import maya.mel as mel
from openpype.api import resources
@@ -30,9 +30,9 @@ def override_component_mask_commands():
log.info("Installing override_component_mask_commands..")
# Get all object mask buttons
- buttons = mc.formLayout("objectMaskIcons",
- query=True,
- childArray=True)
+ buttons = cmds.formLayout("objectMaskIcons",
+ query=True,
+ childArray=True)
# Skip the triangle list item
buttons = [btn for btn in buttons if btn != "objPickMenuLayout"]
@@ -43,14 +43,14 @@ def override_component_mask_commands():
# toggle the others based on whether any of the buttons
# was remaining active after the toggle, if not then
# enable all
- if mc.getModifiers() == 4: # = CTRL
+ if cmds.getModifiers() == 4: # = CTRL
state = True
- active = [mc.iconTextCheckBox(btn, query=True, value=True) for btn
- in buttons]
+ active = [cmds.iconTextCheckBox(btn, query=True, value=True)
+ for btn in buttons]
if any(active):
- mc.selectType(allObjects=False)
+ cmds.selectType(allObjects=False)
else:
- mc.selectType(allObjects=True)
+ cmds.selectType(allObjects=True)
# Replace #1 with the current button state
cmd = raw_command.replace(" #1", " {}".format(int(state)))
@@ -63,13 +63,13 @@ def override_component_mask_commands():
# try to implement the fix. (This also allows us to
# "uninstall" the behavior later)
if btn not in COMPONENT_MASK_ORIGINAL:
- original = mc.iconTextCheckBox(btn, query=True, cc=True)
+ original = cmds.iconTextCheckBox(btn, query=True, cc=True)
COMPONENT_MASK_ORIGINAL[btn] = original
# Assign the special callback
original = COMPONENT_MASK_ORIGINAL[btn]
new_fn = partial(on_changed_callback, original)
- mc.iconTextCheckBox(btn, edit=True, cc=new_fn)
+ cmds.iconTextCheckBox(btn, edit=True, cc=new_fn)
def override_toolbox_ui():
@@ -78,25 +78,36 @@ def override_toolbox_ui():
parent_widget = get_main_window()
# Ensure the maya web icon on toolbox exists
- web_button = "ToolBox|MainToolboxLayout|mayaWebButton"
- if not mc.iconTextButton(web_button, query=True, exists=True):
+ button_names = [
+ # Maya 2022.1+ with maya.cmds.iconTextStaticLabel
+ "ToolBox|MainToolboxLayout|mayaHomeToolboxButton",
+ # Older with maya.cmds.iconTextButton
+ "ToolBox|MainToolboxLayout|mayaWebButton"
+ ]
+ for name in button_names:
+ if cmds.control(name, query=True, exists=True):
+ web_button = name
+ break
+ else:
+ # Button does not exist
+ log.warning("Can't find Maya Home/Web button to override toolbox ui..")
return
- mc.iconTextButton(web_button, edit=True, visible=False)
+ cmds.control(web_button, edit=True, visible=False)
# real = 32, but 36 with padding - according to toolbox mel script
icon_size = 36
parent = web_button.rsplit("|", 1)[0]
# Ensure the parent is a formLayout
- if not mc.objectTypeUI(parent) == "formLayout":
+ if not cmds.objectTypeUI(parent) == "formLayout":
return
# Create our controls
controls = []
controls.append(
- mc.iconTextButton(
+ cmds.iconTextButton(
"pype_toolbox_lookmanager",
annotation="Look Manager",
label="Look Manager",
@@ -109,7 +120,7 @@ def override_toolbox_ui():
)
controls.append(
- mc.iconTextButton(
+ cmds.iconTextButton(
"pype_toolbox_workfiles",
annotation="Work Files",
label="Work Files",
@@ -124,7 +135,7 @@ def override_toolbox_ui():
)
controls.append(
- mc.iconTextButton(
+ cmds.iconTextButton(
"pype_toolbox_loader",
annotation="Loader",
label="Loader",
@@ -139,7 +150,7 @@ def override_toolbox_ui():
)
controls.append(
- mc.iconTextButton(
+ cmds.iconTextButton(
"pype_toolbox_manager",
annotation="Inventory",
label="Inventory",
@@ -159,7 +170,7 @@ def override_toolbox_ui():
for i, control in enumerate(controls):
previous = controls[i - 1] if i > 0 else web_button
- mc.formLayout(parent, edit=True,
- attachControl=[control, "bottom", 0, previous],
- attachForm=([control, "left", 1],
- [control, "right", 1]))
+ cmds.formLayout(parent, edit=True,
+ attachControl=[control, "bottom", 0, previous],
+ attachForm=([control, "left", 1],
+ [control, "right", 1]))
diff --git a/openpype/hosts/maya/api/lib.py b/openpype/hosts/maya/api/lib.py
index 09d1a68223..9f97eef2f1 100644
--- a/openpype/hosts/maya/api/lib.py
+++ b/openpype/hosts/maya/api/lib.py
@@ -2,14 +2,12 @@
import os
import sys
-import re
import platform
import uuid
import math
import json
import logging
-import itertools
import contextlib
from collections import OrderedDict, defaultdict
from math import ceil
@@ -19,10 +17,16 @@ import bson
from maya import cmds, mel
import maya.api.OpenMaya as om
-from avalon import api, io, pipeline
+from avalon import api, io
from openpype import lib
from openpype.api import get_anatomy_settings
+from openpype.pipeline import (
+ discover_loader_plugins,
+ loaders_from_representation,
+ get_representation_path,
+ load_container,
+)
from .commands import reset_frame_range
@@ -154,73 +158,59 @@ def maintained_selection():
cmds.select(clear=True)
-def unique_name(name, format="%02d", namespace="", prefix="", suffix=""):
- """Return unique `name`
-
- The function takes into consideration an optional `namespace`
- and `suffix`. The suffix is included in evaluating whether a
- name exists - such as `name` + "_GRP" - but isn't included
- in the returned value.
-
- If a namespace is provided, only names within that namespace
- are considered when evaluating whether the name is unique.
-
- Arguments:
- format (str, optional): The `name` is given a number, this determines
- how this number is formatted. Defaults to a padding of 2.
- E.g. my_name01, my_name02.
- namespace (str, optional): Only consider names within this namespace.
- suffix (str, optional): Only consider names with this suffix.
-
- Example:
- >>> name = cmds.createNode("transform", name="MyName")
- >>> cmds.objExists(name)
- True
- >>> unique = unique_name(name)
- >>> cmds.objExists(unique)
- False
-
- """
-
- iteration = 1
- unique = prefix + (name + format % iteration) + suffix
-
- while cmds.objExists(namespace + ":" + unique):
- iteration += 1
- unique = prefix + (name + format % iteration) + suffix
-
- if suffix:
- return unique[:-len(suffix)]
-
- return unique
-
-
def unique_namespace(namespace, format="%02d", prefix="", suffix=""):
"""Return unique namespace
- Similar to :func:`unique_name` but evaluating namespaces
- as opposed to object names.
-
Arguments:
namespace (str): Name of namespace to consider
format (str, optional): Formatting of the given iteration number
suffix (str, optional): Only consider namespaces with this suffix.
+ >>> unique_namespace("bar")
+ # bar01
+ >>> unique_namespace(":hello")
+ # :hello01
+ >>> unique_namespace("bar:", suffix="_NS")
+ # bar01_NS:
+
"""
+ def current_namespace():
+ current = cmds.namespaceInfo(currentNamespace=True,
+ absoluteName=True)
+ # When inside a namespace Maya adds no trailing :
+ if not current.endswith(":"):
+ current += ":"
+ return current
+
+ # Always check against the absolute namespace root
+ # There's no clash with :x if we're defining namespace :a:x
+ ROOT = ":" if namespace.startswith(":") else current_namespace()
+
+ # Strip trailing `:` tokens since we might want to add a suffix
+ start = ":" if namespace.startswith(":") else ""
+ end = ":" if namespace.endswith(":") else ""
+ namespace = namespace.strip(":")
+ if ":" in namespace:
+ # Split off any nesting that we don't uniqify anyway.
+ parents, namespace = namespace.rsplit(":", 1)
+ start += parents + ":"
+ ROOT += start
+
+ def exists(n):
+ # Check for clash with nodes and namespaces
+ fullpath = ROOT + n
+ return cmds.objExists(fullpath) or cmds.namespace(exists=fullpath)
+
iteration = 1
- unique = prefix + (namespace + format % iteration) + suffix
+ while True:
+ nr_namespace = namespace + format % iteration
+ unique = prefix + nr_namespace + suffix
+
+ if not exists(unique):
+ return start + unique + end
- # The `existing` set does not just contain the namespaces but *all* nodes
- # within "current namespace". We need all because the namespace could
- # also clash with a node name. To be truly unique and valid one needs to
- # check against all.
- existing = set(cmds.namespaceInfo(listNamespace=True))
- while unique in existing:
iteration += 1
- unique = prefix + (namespace + format % iteration) + suffix
-
- return unique
def read(node):
@@ -282,155 +272,10 @@ def float_round(num, places=0, direction=ceil):
def pairwise(iterable):
"""s -> (s0,s1), (s2,s3), (s4, s5), ..."""
+ from six.moves import zip
+
a = iter(iterable)
- return itertools.izip(a, a)
-
-
-def unique(name):
- assert isinstance(name, string_types), "`name` must be string"
-
- while cmds.objExists(name):
- matches = re.findall(r"\d+$", name)
-
- if matches:
- match = matches[-1]
- name = name.rstrip(match)
- number = int(match) + 1
- else:
- number = 1
-
- name = name + str(number)
-
- return name
-
-
-def uv_from_element(element):
- """Return the UV coordinate of given 'element'
-
- Supports components, meshes, nurbs.
-
- """
-
- supported = ["mesh", "nurbsSurface"]
-
- uv = [0.5, 0.5]
-
- if "." not in element:
- type = cmds.nodeType(element)
- if type == "transform":
- geometry_shape = cmds.listRelatives(element, shapes=True)
-
- if len(geometry_shape) >= 1:
- geometry_shape = geometry_shape[0]
- else:
- return
-
- elif type in supported:
- geometry_shape = element
-
- else:
- cmds.error("Could not do what you wanted..")
- return
- else:
- # If it is indeed a component - get the current Mesh
- try:
- parent = element.split(".", 1)[0]
-
- # Maya is funny in that when the transform of the shape
- # of the component element has children, the name returned
- # by that elementection is the shape. Otherwise, it is
- # the transform. So lets see what type we're dealing with here.
- if cmds.nodeType(parent) in supported:
- geometry_shape = parent
- else:
- geometry_shape = cmds.listRelatives(parent, shapes=1)[0]
-
- if not geometry_shape:
- cmds.error("Skipping %s: Could not find shape." % element)
- return
-
- if len(cmds.ls(geometry_shape)) > 1:
- cmds.warning("Multiple shapes with identical "
- "names found. This might not work")
-
- except TypeError as e:
- cmds.warning("Skipping %s: Didn't find a shape "
- "for component elementection. %s" % (element, e))
- return
-
- try:
- type = cmds.nodeType(geometry_shape)
-
- if type == "nurbsSurface":
- # If a surfacePoint is elementected on a nurbs surface
- root, u, v = element.rsplit("[", 2)
- uv = [float(u[:-1]), float(v[:-1])]
-
- if type == "mesh":
- # -----------
- # Average the U and V values
- # ===========
- uvs = cmds.polyListComponentConversion(element, toUV=1)
- if not uvs:
- cmds.warning("Couldn't derive any UV's from "
- "component, reverting to default U and V")
- raise TypeError
-
- # Flatten list of Uv's as sometimes it returns
- # neighbors like this [2:3] instead of [2], [3]
- flattened = []
-
- for uv in uvs:
- flattened.extend(cmds.ls(uv, flatten=True))
-
- uvs = flattened
-
- sumU = 0
- sumV = 0
- for uv in uvs:
- try:
- u, v = cmds.polyEditUV(uv, query=True)
- except Exception:
- cmds.warning("Couldn't find any UV coordinated, "
- "reverting to default U and V")
- raise TypeError
-
- sumU += u
- sumV += v
-
- averagedU = sumU / len(uvs)
- averagedV = sumV / len(uvs)
-
- uv = [averagedU, averagedV]
- except TypeError:
- pass
-
- return uv
-
-
-def shape_from_element(element):
- """Return shape of given 'element'
-
- Supports components, meshes, and surfaces
-
- """
-
- try:
- # Get either shape or transform, based on element-type
- node = cmds.ls(element, objectsOnly=True)[0]
- except Exception:
- cmds.warning("Could not find node in %s" % element)
- return None
-
- if cmds.nodeType(node) == 'transform':
- try:
- return cmds.listRelatives(node, shapes=True)[0]
- except Exception:
- cmds.warning("Could not find shape in %s" % element)
- return None
-
- else:
- return node
+ return zip(a, a)
def export_alembic(nodes,
@@ -516,7 +361,8 @@ def collect_animation_data(fps=False):
data = OrderedDict()
data["frameStart"] = start
data["frameEnd"] = end
- data["handles"] = 0
+ data["handleStart"] = 0
+ data["handleEnd"] = 0
data["step"] = 1.0
if fps:
@@ -577,115 +423,6 @@ def imprint(node, data):
cmds.setAttr(node + "." + key, value, **set_type)
-def serialise_shaders(nodes):
- """Generate a shader set dictionary
-
- Arguments:
- nodes (list): Absolute paths to nodes
-
- Returns:
- dictionary of (shader: id) pairs
-
- Schema:
- {
- "shader1": ["id1", "id2"],
- "shader2": ["id3", "id1"]
- }
-
- Example:
- {
- "Bazooka_Brothers01_:blinn4SG": [
- "f9520572-ac1d-11e6-b39e-3085a99791c9.f[4922:5001]",
- "f9520572-ac1d-11e6-b39e-3085a99791c9.f[4587:4634]",
- "f9520572-ac1d-11e6-b39e-3085a99791c9.f[1120:1567]",
- "f9520572-ac1d-11e6-b39e-3085a99791c9.f[4251:4362]"
- ],
- "lambert2SG": [
- "f9520571-ac1d-11e6-9dbb-3085a99791c9"
- ]
- }
-
- """
-
- valid_nodes = cmds.ls(
- nodes,
- long=True,
- recursive=True,
- showType=True,
- objectsOnly=True,
- type="transform"
- )
-
- meshes_by_id = {}
- for mesh in valid_nodes:
- shapes = cmds.listRelatives(valid_nodes[0],
- shapes=True,
- fullPath=True) or list()
-
- if shapes:
- shape = shapes[0]
- if not cmds.nodeType(shape):
- continue
-
- try:
- id_ = cmds.getAttr(mesh + ".mbID")
-
- if id_ not in meshes_by_id:
- meshes_by_id[id_] = list()
-
- meshes_by_id[id_].append(mesh)
-
- except ValueError:
- continue
-
- meshes_by_shader = dict()
- for mesh in meshes_by_id.values():
- shape = cmds.listRelatives(mesh,
- shapes=True,
- fullPath=True) or list()
-
- for shader in cmds.listConnections(shape,
- type="shadingEngine") or list():
-
- # Objects in this group are those that haven't got
- # any shaders. These are expected to be managed
- # elsewhere, such as by the default model loader.
- if shader == "initialShadingGroup":
- continue
-
- if shader not in meshes_by_shader:
- meshes_by_shader[shader] = list()
-
- shaded = cmds.sets(shader, query=True) or list()
- meshes_by_shader[shader].extend(shaded)
-
- shader_by_id = {}
- for shader, shaded in meshes_by_shader.items():
-
- if shader not in shader_by_id:
- shader_by_id[shader] = list()
-
- for mesh in shaded:
-
- # Enable shader assignment to faces.
- name = mesh.split(".f[")[0]
-
- transform = name
- if cmds.objectType(transform) == "mesh":
- transform = cmds.listRelatives(name, parent=True)[0]
-
- try:
- id_ = cmds.getAttr(transform + ".mbID")
- shader_by_id[shader].append(mesh.replace(name, id_))
- except KeyError:
- continue
-
- # Remove duplicates
- shader_by_id[shader] = list(set(shader_by_id[shader]))
-
- return shader_by_id
-
-
def lsattr(attr, value=None):
"""Return nodes matching `key` and `value`
@@ -764,17 +501,6 @@ def lsattrs(attrs):
return list(matches)
-@contextlib.contextmanager
-def without_extension():
- """Use cmds.file with defaultExtensions=False"""
- previous_setting = cmds.file(defaultExtensions=True, query=True)
- try:
- cmds.file(defaultExtensions=False)
- yield
- finally:
- cmds.file(defaultExtensions=previous_setting)
-
-
@contextlib.contextmanager
def attribute_values(attr_values):
"""Remaps node attributes to values during context.
@@ -853,26 +579,6 @@ def evaluation(mode="off"):
cmds.evaluationManager(mode=original)
-@contextlib.contextmanager
-def no_refresh():
- """Temporarily disables Maya's UI updates
-
- Note:
- This only disabled the main pane and will sometimes still
- trigger updates in torn off panels.
-
- """
-
- pane = _get_mel_global('gMainPane')
- state = cmds.paneLayout(pane, query=True, manage=True)
- cmds.paneLayout(pane, edit=True, manage=False)
-
- try:
- yield
- finally:
- cmds.paneLayout(pane, edit=True, manage=state)
-
-
@contextlib.contextmanager
def empty_sets(sets, force=False):
"""Remove all members of the sets during the context"""
@@ -1539,15 +1245,6 @@ def extract_alembic(file,
return file
-def maya_temp_folder():
- scene_dir = os.path.dirname(cmds.file(query=True, sceneName=True))
- tmp_dir = os.path.abspath(os.path.join(scene_dir, "..", "tmp"))
- if not os.path.isdir(tmp_dir):
- os.makedirs(tmp_dir)
-
- return tmp_dir
-
-
# region ID
def get_id_required_nodes(referenced_nodes=False, nodes=None):
"""Filter out any node which are locked (reference) or readOnly
@@ -1732,22 +1429,6 @@ def set_id(node, unique_id, overwrite=False):
cmds.setAttr(attr, unique_id, type="string")
-def remove_id(node):
- """Remove the id attribute from the input node.
-
- Args:
- node (str): The node name
-
- Returns:
- bool: Whether an id attribute was deleted
-
- """
- if cmds.attributeQuery("cbId", node=node, exists=True):
- cmds.deleteAttr("{}.cbId".format(node))
- return True
- return False
-
-
# endregion ID
def get_reference_node(path):
"""
@@ -1905,21 +1586,21 @@ def assign_look_by_version(nodes, version_id):
log.info("Using look for the first time ..")
# Load file
- loaders = api.loaders_from_representation(api.discover(api.Loader),
- representation_id)
+ _loaders = discover_loader_plugins()
+ loaders = loaders_from_representation(_loaders, representation_id)
Loader = next((i for i in loaders if i.__name__ == "LookLoader"), None)
if Loader is None:
raise RuntimeError("Could not find LookLoader, this is a bug")
# Reference the look file
with maintained_selection():
- container_node = pipeline.load(Loader, look_representation)
+ container_node = load_container(Loader, look_representation)
# Get container members
shader_nodes = get_container_members(container_node)
# Load relationships
- shader_relation = api.get_representation_path(json_representation)
+ shader_relation = get_representation_path(json_representation)
with open(shader_relation, "r") as f:
relationships = json.load(f)
@@ -2423,6 +2104,7 @@ def reset_scene_resolution():
set_scene_resolution(width, height, pixelAspect)
+
def set_context_settings():
"""Apply the project settings from the project definition
@@ -2881,7 +2563,7 @@ def get_attr_in_layer(attr, layer):
def fix_incompatible_containers():
- """Return whether the current scene has any outdated content"""
+ """Backwards compatibility: old containers to use new ReferenceLoader"""
host = api.registered_host()
for container in host.ls():
@@ -3120,7 +2802,7 @@ class RenderSetupListObserver:
cmds.delete(render_layer_set_name)
-class RenderSetupItemObserver():
+class RenderSetupItemObserver:
"""Handle changes in render setup items."""
def __init__(self, item):
@@ -3312,7 +2994,27 @@ def set_colorspace():
"""
project_name = os.getenv("AVALON_PROJECT")
imageio = get_anatomy_settings(project_name)["imageio"]["maya"]
- root_dict = imageio["colorManagementPreference"]
+
+ # Maya 2022+ introduces new OCIO v2 color management settings that
+ # can override the old color managenement preferences. OpenPype has
+ # separate settings for both so we fall back when necessary.
+ use_ocio_v2 = imageio["colorManagementPreference_v2"]["enabled"]
+ required_maya_version = 2022
+ maya_version = int(cmds.about(version=True))
+ maya_supports_ocio_v2 = maya_version >= required_maya_version
+ if use_ocio_v2 and not maya_supports_ocio_v2:
+ # Fallback to legacy behavior with a warning
+ log.warning("Color Management Preference v2 is enabled but not "
+ "supported by current Maya version: {} (< {}). Falling "
+ "back to legacy settings.".format(
+ maya_version, required_maya_version)
+ )
+ use_ocio_v2 = False
+
+ if use_ocio_v2:
+ root_dict = imageio["colorManagementPreference_v2"]
+ else:
+ root_dict = imageio["colorManagementPreference"]
if not isinstance(root_dict, dict):
msg = "set_colorspace(): argument should be dictionary"
@@ -3320,11 +3022,12 @@ def set_colorspace():
log.debug(">> root_dict: {}".format(root_dict))
- # first enable color management
+ # enable color management
cmds.colorManagementPrefs(e=True, cmEnabled=True)
cmds.colorManagementPrefs(e=True, ocioRulesEnabled=True)
- # second set config path
+ # set config path
+ custom_ocio_config = False
if root_dict.get("configFilePath"):
unresolved_path = root_dict["configFilePath"]
ocio_paths = unresolved_path[platform.system().lower()]
@@ -3341,22 +3044,56 @@ def set_colorspace():
cmds.colorManagementPrefs(e=True, cmConfigFileEnabled=True)
log.debug("maya '{}' changed to: {}".format(
"configFilePath", resolved_path))
- root_dict.pop("configFilePath")
+ custom_ocio_config = True
else:
cmds.colorManagementPrefs(e=True, cmConfigFileEnabled=False)
- cmds.colorManagementPrefs(e=True, configFilePath="" )
+ cmds.colorManagementPrefs(e=True, configFilePath="")
- # third set rendering space and view transform
- renderSpace = root_dict["renderSpace"]
- cmds.colorManagementPrefs(e=True, renderingSpaceName=renderSpace)
- viewTransform = root_dict["viewTransform"]
- cmds.colorManagementPrefs(e=True, viewTransformName=viewTransform)
+ # If no custom OCIO config file was set we make sure that Maya 2022+
+ # either chooses between Maya's newer default v2 or legacy config based
+ # on OpenPype setting to use ocio v2 or not.
+ if maya_supports_ocio_v2 and not custom_ocio_config:
+ if use_ocio_v2:
+ # Use Maya 2022+ default OCIO v2 config
+ log.info("Setting default Maya OCIO v2 config")
+ cmds.colorManagementPrefs(edit=True, configFilePath="")
+ else:
+ # Set the Maya default config file path
+ log.info("Setting default Maya OCIO v1 legacy config")
+ cmds.colorManagementPrefs(edit=True, configFilePath="legacy")
+
+ # set color spaces for rendering space and view transforms
+ def _colormanage(**kwargs):
+ """Wrapper around `cmds.colorManagementPrefs`.
+
+ This logs errors instead of raising an error so color management
+ settings get applied as much as possible.
+
+ """
+ assert len(kwargs) == 1, "Must receive one keyword argument"
+ try:
+ cmds.colorManagementPrefs(edit=True, **kwargs)
+ log.debug("Setting Color Management Preference: {}".format(kwargs))
+ except RuntimeError as exc:
+ log.error(exc)
+
+ if use_ocio_v2:
+ _colormanage(renderingSpaceName=root_dict["renderSpace"])
+ _colormanage(displayName=root_dict["displayName"])
+ _colormanage(viewName=root_dict["viewName"])
+ else:
+ _colormanage(renderingSpaceName=root_dict["renderSpace"])
+ if maya_supports_ocio_v2:
+ _colormanage(viewName=root_dict["viewTransform"])
+ _colormanage(displayName="legacy")
+ else:
+ _colormanage(viewTransformName=root_dict["viewTransform"])
@contextlib.contextmanager
def root_parent(nodes):
# type: (list) -> list
- """Context manager to un-parent provided nodes and return then back."""
+ """Context manager to un-parent provided nodes and return them back."""
import pymel.core as pm # noqa
node_parents = []
diff --git a/openpype/hosts/maya/api/menu.json b/openpype/hosts/maya/api/menu.json
deleted file mode 100644
index a2efd5233c..0000000000
--- a/openpype/hosts/maya/api/menu.json
+++ /dev/null
@@ -1,924 +0,0 @@
-[
- {
- "type": "action",
- "command": "$OPENPYPE_SCRIPTS\\others\\save_scene_incremental.py",
- "sourcetype": "file",
- "title": "# Version Up",
- "tooltip": "Incremental save with a specific format"
- },
- {
- "type": "action",
- "command": "$OPENPYPE_SCRIPTS\\others\\open_current_folder.py",
- "sourcetype": "file",
- "title": "Open working folder..",
- "tooltip": "Show current scene in Explorer"
- },
- {
- "type": "action",
- "command": "$OPENPYPE_SCRIPTS\\avalon\\launch_manager.py",
- "sourcetype": "file",
- "title": "# Project Manager",
- "tooltip": "Add assets to the project"
- },
- {
- "type": "action",
- "command": "from openpype.tools.assetcreator import app as assetcreator; assetcreator.show(context='maya')",
- "sourcetype": "python",
- "title": "Asset Creator",
- "tooltip": "Open the Asset Creator"
- },
- {
- "type": "separator"
- },
- {
- "type": "menu",
- "title": "Modeling",
- "items": [
- {
- "type": "action",
- "command": "import easyTreezSource; reload(easyTreezSource); easyTreezSource.easyTreez()",
- "sourcetype": "python",
- "tags": ["modeling", "trees", "generate", "create", "plants"],
- "title": "EasyTreez",
- "tooltip": ""
- },
- {
- "type": "action",
- "command": "$OPENPYPE_SCRIPTS\\modeling\\separateMeshPerShader.py",
- "sourcetype": "file",
- "tags": ["modeling", "separateMeshPerShader"],
- "title": "# Separate Mesh Per Shader",
- "tooltip": ""
- },
- {
- "type": "action",
- "command": "$OPENPYPE_SCRIPTS\\modeling\\polyDetachSeparate.py",
- "sourcetype": "file",
- "tags": ["modeling", "poly", "detach", "separate"],
- "title": "# Polygon Detach and Separate",
- "tooltip": ""
- },
- {
- "type": "action",
- "command": "$OPENPYPE_SCRIPTS\\modeling\\polySelectEveryNthEdgeUI.py",
- "sourcetype": "file",
- "tags": ["modeling", "select", "nth", "edge", "ui"],
- "title": "# Select Every Nth Edge"
- },
- {
- "type": "action",
- "command": "$OPENPYPE_SCRIPTS\\modeling\\djPFXUVs.py",
- "sourcetype": "file",
- "tags": ["modeling", "djPFX", "UVs"],
- "title": "# dj PFX UVs",
- "tooltip": ""
- }
- ]
- },
- {
- "type": "menu",
- "title": "Rigging",
- "items": [
- {
- "type": "action",
- "command": "$OPENPYPE_SCRIPTS\\rigging\\advancedSkeleton.py",
- "sourcetype": "file",
- "tags": [
- "rigging",
- "autorigger",
- "advanced",
- "skeleton",
- "advancedskeleton",
- "file"
- ],
- "title": "Advanced Skeleton"
- }
- ]
- },
- {
- "type": "menu",
- "title": "Shading",
- "items": [
- {
- "type": "menu",
- "title": "# VRay",
- "items": [
- {
- "type": "action",
- "title": "# Import Proxies",
- "command": "$OPENPYPE_SCRIPTS\\shading\\vray\\vrayImportProxies.py",
- "sourcetype": "file",
- "tags": ["shading", "vray", "import", "proxies"],
- "tooltip": ""
- },
- {
- "type": "separator"
- },
- {
- "type": "action",
- "title": "# Select All GES",
- "command": "$OPENPYPE_SCRIPTS\\shading\\vray\\selectAllGES.py",
- "sourcetype": "file",
- "tooltip": "",
- "tags": ["shading", "vray", "select All GES"]
- },
- {
- "type": "action",
- "title": "# Select All GES Under Selection",
- "command": "$OPENPYPE_SCRIPTS\\shading\\vray\\selectAllGESUnderSelection.py",
- "sourcetype": "file",
- "tooltip": "",
- "tags": ["shading", "vray", "select", "all", "GES"]
- },
- {
- "type": "separator"
- },
- {
- "type": "action",
- "title": "# Selection To VRay Mesh",
- "command": "$OPENPYPE_SCRIPTS\\shading\\vray\\selectionToVrayMesh.py",
- "sourcetype": "file",
- "tooltip": "",
- "tags": ["shading", "vray", "selection", "vraymesh"]
- },
- {
- "type": "action",
- "title": "# Add VRay Round Edges Attribute",
- "command": "$OPENPYPE_SCRIPTS\\shading\\vray\\addVrayRoundEdgesAttribute.py",
- "sourcetype": "file",
- "tooltip": "",
- "tags": ["shading", "vray", "round edges", "attribute"]
- },
- {
- "type": "action",
- "title": "# Add Gamma",
- "command": "$OPENPYPE_SCRIPTS\\shading\\vray\\vrayAddGamma.py",
- "sourcetype": "file",
- "tooltip": "",
- "tags": ["shading", "vray", "add gamma"]
- },
- {
- "type": "separator"
- },
- {
- "type": "action",
- "command": "$OPENPYPE_SCRIPTS\\shading\\vray\\select_vraymesh_materials_with_unconnected_shader_slots.py",
- "sourcetype": "file",
- "title": "# Select Unconnected Shader Materials",
- "tags": [
- "shading",
- "vray",
- "select",
- "vraymesh",
- "materials",
- "unconnected shader slots"
- ],
- "tooltip": ""
- },
- {
- "type": "action",
- "command": "$OPENPYPE_SCRIPTS\\shading\\vray\\vrayMergeSimilarVRayMeshMaterials.py",
- "sourcetype": "file",
- "title": "# Merge Similar VRay Mesh Materials",
- "tags": [
- "shading",
- "vray",
- "Merge",
- "VRayMesh",
- "Materials"
- ],
- "tooltip": ""
- },
- {
- "type": "action",
- "title": "# Create Two Sided Material",
- "command": "$OPENPYPE_SCRIPTS\\shading\\vray\\vrayCreate2SidedMtlForSelectedMtlRenamed.py",
- "sourcetype": "file",
- "tooltip": "Creates two sided material for selected material and renames it",
- "tags": ["shading", "vray", "two sided", "material"]
- },
- {
- "type": "action",
- "title": "# Create Two Sided Material For Selected",
- "command": "$OPENPYPE_SCRIPTS\\shading\\vray\\vrayCreate2SidedMtlForSelectedMtl.py",
- "sourcetype": "file",
- "tooltip": "Select material to create a two sided version from it",
- "tags": [
- "shading",
- "vray",
- "Create2SidedMtlForSelectedMtl.py"
- ]
- },
- {
- "type": "separator"
- },
- {
- "type": "action",
- "title": "# Add OpenSubdiv Attribute",
- "command": "$OPENPYPE_SCRIPTS\\shading\\vray\\addVrayOpenSubdivAttribute.py",
- "sourcetype": "file",
- "tooltip": "",
- "tags": [
- "shading",
- "vray",
- "add",
- "open subdiv",
- "attribute"
- ]
- },
- {
- "type": "action",
- "title": "# Remove OpenSubdiv Attribute",
- "command": "$OPENPYPE_SCRIPTS\\shading\\vray\\removeVrayOpenSubdivAttribute.py",
- "sourcetype": "file",
- "tooltip": "",
- "tags": [
- "shading",
- "vray",
- "remove",
- "opensubdiv",
- "attributee"
- ]
- },
- {
- "type": "separator"
- },
- {
- "type": "action",
- "title": "# Add Subdivision Attribute",
- "command": "$OPENPYPE_SCRIPTS\\shading\\vray\\addVraySubdivisionAttribute.py",
- "sourcetype": "file",
- "tooltip": "",
- "tags": [
- "shading",
- "vray",
- "addVraySubdivisionAttribute"
- ]
- },
- {
- "type": "action",
- "title": "# Remove Subdivision Attribute.py",
- "command": "$OPENPYPE_SCRIPTS\\shading\\vray\\removeVraySubdivisionAttribute.py",
- "sourcetype": "file",
- "tooltip": "",
- "tags": [
- "shading",
- "vray",
- "remove",
- "subdivision",
- "attribute"
- ]
- },
- {
- "type": "separator"
- },
- {
- "type": "action",
- "title": "# Add Vray Object Ids",
- "command": "$OPENPYPE_SCRIPTS\\shading\\vray\\addVrayObjectIds.py",
- "sourcetype": "file",
- "tooltip": "",
- "tags": ["shading", "vray", "add", "object id"]
- },
- {
- "type": "action",
- "title": "# Add Vray Material Ids",
- "command": "$OPENPYPE_SCRIPTS\\shading\\vray\\addVrayMaterialIds.py",
- "sourcetype": "file",
- "tooltip": "",
- "tags": ["shading", "vray", "addVrayMaterialIds.py"]
- },
- {
- "type": "separator"
- },
- {
- "type": "action",
- "title": "# Set Physical DOF Depth",
- "command": "$OPENPYPE_SCRIPTS\\shading\\vray\\vrayPhysicalDOFSetDepth.py",
- "sourcetype": "file",
- "tooltip": "",
- "tags": ["shading", "vray", "physical", "DOF ", "Depth"]
- },
- {
- "type": "action",
- "title": "# Magic Vray Proxy UI",
- "command": "$OPENPYPE_SCRIPTS\\shading\\vray\\magicVrayProxyUI.py",
- "sourcetype": "file",
- "tooltip": "",
- "tags": ["shading", "vray", "magicVrayProxyUI"]
- }
- ]
- },
- {
- "type": "action",
- "command": "$OPENPYPE_SCRIPTS\\pyblish\\lighting\\set_filename_prefix.py",
- "sourcetype": "file",
- "tags": [
- "shading",
- "lookdev",
- "assign",
- "shaders",
- "prefix",
- "filename",
- "render"
- ],
- "title": "# Set filename prefix",
- "tooltip": "Set the render file name prefix."
- },
- {
- "type": "action",
- "command": "import mayalookassigner; mayalookassigner.show()",
- "sourcetype": "python",
- "tags": ["shading", "look", "assign", "shaders", "auto"],
- "title": "Look Manager",
- "tooltip": "Open the Look Manager UI for look assignment"
- },
- {
- "type": "action",
- "command": "$OPENPYPE_SCRIPTS\\shading\\LightLinkUi.py",
- "sourcetype": "file",
- "tags": ["shading", "light", "link", "ui"],
- "title": "# Light Link UI",
- "tooltip": ""
- },
- {
- "type": "action",
- "command": "$OPENPYPE_SCRIPTS\\shading\\vdviewer_ui.py",
- "sourcetype": "file",
- "tags": [
- "shading",
- "look",
- "vray",
- "displacement",
- "shaders",
- "auto"
- ],
- "title": "# VRay Displ Viewer",
- "tooltip": "Open the VRay Displacement Viewer, select and control the content of the set"
- },
- {
- "type": "action",
- "command": "$OPENPYPE_SCRIPTS\\shading\\setTexturePreviewToCLRImage.py",
- "sourcetype": "file",
- "tags": ["shading", "CLRImage", "textures", "preview"],
- "title": "# Set Texture Preview To CLRImage",
- "tooltip": ""
- },
- {
- "type": "action",
- "command": "$OPENPYPE_SCRIPTS\\shading\\fixDefaultShaderSetBehavior.py",
- "sourcetype": "file",
- "tags": ["shading", "fix", "DefaultShaderSet", "Behavior"],
- "title": "# Fix Default Shader Set Behavior",
- "tooltip": ""
- },
- {
- "type": "action",
- "command": "$OPENPYPE_SCRIPTS\\shading\\fixSelectedShapesReferenceAssignments.py",
- "sourcetype": "file",
- "tags": [
- "shading",
- "fix",
- "Selected",
- "Shapes",
- "Reference",
- "Assignments"
- ],
- "title": "# Fix Shapes Reference Assignments",
- "tooltip": "Select shapes to fix the reference assignments"
- },
- {
- "type": "action",
- "command": "$OPENPYPE_SCRIPTS\\shading\\selectLambert1Members.py",
- "sourcetype": "file",
- "tags": ["shading", "selectLambert1Members"],
- "title": "# Select Lambert1 Members",
- "tooltip": "Selects all objects which have the Lambert1 shader assigned"
- },
- {
- "type": "action",
- "command": "$OPENPYPE_SCRIPTS\\shading\\selectShapesWithoutShader.py",
- "sourcetype": "file",
- "tags": ["shading", "selectShapesWithoutShader"],
- "title": "# Select Shapes Without Shader",
- "tooltip": ""
- },
- {
- "type": "action",
- "command": "$OPENPYPE_SCRIPTS\\shading\\fixRenderLayerOutAdjustmentErrors.py",
- "sourcetype": "file",
- "tags": ["shading", "fixRenderLayerOutAdjustmentErrors"],
- "title": "# Fix RenderLayer Out Adjustment Errors",
- "tooltip": ""
- },
- {
- "type": "action",
- "command": "$OPENPYPE_SCRIPTS\\shading\\fix_renderlayer_missing_node_override.py",
- "sourcetype": "file",
- "tags": [
- "shading",
- "renderlayer",
- "missing",
- "reference",
- "switch",
- "layer"
- ],
- "title": "# Fix RenderLayer Missing Referenced Nodes Overrides",
- "tooltip": ""
- },
- {
- "type": "action",
- "title": "# Image 2 Tiled EXR",
- "command": "$OPENPYPE_SCRIPTS\\shading\\open_img2exr.py",
- "sourcetype": "file",
- "tooltip": "",
- "tags": ["shading", "vray", "exr"]
- }
- ]
- },
- {
- "type": "menu",
- "title": "# Rendering",
- "items": [
- {
- "type": "action",
- "command": "$OPENPYPE_SCRIPTS\\pyblish\\open_deadline_submission_settings.py",
- "sourcetype": "file",
- "tags": ["settings", "deadline", "globals", "render"],
- "title": "# DL Submission Settings UI",
- "tooltip": "Open the Deadline Submission Settings UI"
- }
- ]
- },
- {
- "type": "menu",
- "title": "Animation",
- "items": [
- {
- "type": "menu",
- "title": "# Attributes",
- "tooltip": "",
- "items": [
- {
- "type": "action",
- "command": "$OPENPYPE_SCRIPTS\\animation\\attributes\\copyValues.py",
- "sourcetype": "file",
- "tags": ["animation", "copy", "attributes"],
- "title": "# Copy Values",
- "tooltip": "Copy attribute values"
- },
- {
- "type": "action",
- "command": "$OPENPYPE_SCRIPTS\\animation\\attributes\\copyInConnections.py",
- "sourcetype": "file",
- "tags": [
- "animation",
- "copy",
- "attributes",
- "connections",
- "incoming"
- ],
- "title": "# Copy In Connections",
- "tooltip": "Copy incoming connections"
- },
- {
- "type": "action",
- "command": "$OPENPYPE_SCRIPTS\\animation\\attributes\\copyOutConnections.py",
- "sourcetype": "file",
- "tags": [
- "animation",
- "copy",
- "attributes",
- "connections",
- "out"
- ],
- "title": "# Copy Out Connections",
- "tooltip": "Copy outcoming connections"
- },
- {
- "type": "action",
- "command": "$OPENPYPE_SCRIPTS\\animation\\attributes\\copyTransformLocal.py",
- "sourcetype": "file",
- "tags": [
- "animation",
- "copy",
- "attributes",
- "transforms",
- "local"
- ],
- "title": "# Copy Local Transforms",
- "tooltip": "Copy local transforms"
- },
- {
- "type": "action",
- "command": "$OPENPYPE_SCRIPTS\\animation\\attributes\\copyTransformMatrix.py",
- "sourcetype": "file",
- "tags": [
- "animation",
- "copy",
- "attributes",
- "transforms",
- "matrix"
- ],
- "title": "# Copy Matrix Transforms",
- "tooltip": "Copy Matrix transforms"
- },
- {
- "type": "action",
- "command": "$OPENPYPE_SCRIPTS\\animation\\attributes\\copyTransformUI.py",
- "sourcetype": "file",
- "tags": [
- "animation",
- "copy",
- "attributes",
- "transforms",
- "UI"
- ],
- "title": "# Copy Transforms UI",
- "tooltip": "Open the Copy Transforms UI"
- },
- {
- "type": "action",
- "command": "$OPENPYPE_SCRIPTS\\animation\\attributes\\simpleCopyUI.py",
- "sourcetype": "file",
- "tags": [
- "animation",
- "copy",
- "attributes",
- "transforms",
- "UI",
- "simple"
- ],
- "title": "# Simple Copy UI",
- "tooltip": "Open the simple Copy Transforms UI"
- }
- ]
- },
- {
- "type": "menu",
- "title": "# Optimize",
- "tooltip": "Optimization scripts",
- "items": [
- {
- "type": "action",
- "command": "$OPENPYPE_SCRIPTS\\animation\\optimize\\toggleFreezeHierarchy.py",
- "sourcetype": "file",
- "tags": ["animation", "hierarchy", "toggle", "freeze"],
- "title": "# Toggle Freeze Hierarchy",
- "tooltip": "Freeze and unfreeze hierarchy"
- },
- {
- "type": "action",
- "command": "$OPENPYPE_SCRIPTS\\animation\\optimize\\toggleParallelNucleus.py",
- "sourcetype": "file",
- "tags": ["animation", "nucleus", "toggle", "parallel"],
- "title": "# Toggle Parallel Nucleus",
- "tooltip": "Toggle parallel nucleus"
- }
- ]
- },
- {
- "sourcetype": "file",
- "command": "$OPENPYPE_SCRIPTS\\animation\\bakeSelectedToWorldSpace.py",
- "tags": ["animation", "bake", "selection", "worldspace.py"],
- "title": "# Bake Selected To Worldspace",
- "type": "action"
- },
- {
- "sourcetype": "file",
- "command": "$OPENPYPE_SCRIPTS\\animation\\timeStepper.py",
- "tags": ["animation", "time", "stepper"],
- "title": "# Time Stepper",
- "type": "action"
- },
- {
- "sourcetype": "file",
- "command": "$OPENPYPE_SCRIPTS\\animation\\capture_ui.py",
- "tags": [
- "animation",
- "capture",
- "ui",
- "screen",
- "movie",
- "image"
- ],
- "title": "# Capture UI",
- "type": "action"
- },
- {
- "sourcetype": "file",
- "command": "$OPENPYPE_SCRIPTS\\animation\\simplePlayblastUI.py",
- "tags": ["animation", "simple", "playblast", "ui"],
- "title": "# Simple Playblast UI",
- "type": "action"
- },
- {
- "sourcetype": "file",
- "command": "$OPENPYPE_SCRIPTS\\animation\\tweenMachineUI.py",
- "tags": ["animation", "tween", "machine"],
- "title": "# Tween Machine UI",
- "type": "action"
- },
- {
- "sourcetype": "file",
- "command": "$OPENPYPE_SCRIPTS\\animation\\selectAllAnimationCurves.py",
- "tags": ["animation", "select", "curves"],
- "title": "# Select All Animation Curves",
- "type": "action"
- },
- {
- "sourcetype": "file",
- "command": "$OPENPYPE_SCRIPTS\\animation\\pathAnimation.py",
- "tags": ["animation", "path", "along"],
- "title": "# Path Animation",
- "type": "action"
- },
- {
- "sourcetype": "file",
- "command": "$OPENPYPE_SCRIPTS\\animation\\offsetSelectedObjectsUI.py",
- "tags": ["animation", "offsetSelectedObjectsUI.py"],
- "title": "# Offset Selected Objects UI",
- "type": "action"
- },
- {
- "sourcetype": "file",
- "command": "$OPENPYPE_SCRIPTS\\animation\\key_amplifier_ui.py",
- "tags": ["animation", "key", "amplifier"],
- "title": "# Key Amplifier UI",
- "type": "action"
- },
- {
- "sourcetype": "file",
- "command": "$OPENPYPE_SCRIPTS\\animation\\anim_scene_optimizer.py",
- "tags": ["animation", "anim_scene_optimizer.py"],
- "title": "# Anim_Scene_Optimizer",
- "type": "action"
- },
- {
- "sourcetype": "file",
- "command": "$OPENPYPE_SCRIPTS\\animation\\zvParentMaster.py",
- "tags": ["animation", "zvParentMaster.py"],
- "title": "# ZV Parent Master",
- "type": "action"
- },
- {
- "sourcetype": "file",
- "command": "$OPENPYPE_SCRIPTS\\animation\\animLibrary.py",
- "tags": ["animation", "studiolibrary.py"],
- "title": "Anim Library",
- "type": "action"
- }
- ]
- },
- {
- "type": "menu",
- "title": "# Layout",
- "items": [
- {
- "type": "action",
- "command": "$OPENPYPE_SCRIPTS\\layout\\alignDistributeUI.py",
- "sourcetype": "file",
- "tags": ["layout", "align", "Distribute", "UI"],
- "title": "# Align Distribute UI",
- "tooltip": ""
- },
- {
- "type": "action",
- "command": "$OPENPYPE_SCRIPTS\\layout\\alignSimpleUI.py",
- "sourcetype": "file",
- "tags": ["layout", "align", "UI", "Simple"],
- "title": "# Align Simple UI",
- "tooltip": ""
- },
- {
- "type": "action",
- "command": "$OPENPYPE_SCRIPTS\\layout\\center_locator.py",
- "sourcetype": "file",
- "tags": ["layout", "center", "locator"],
- "title": "# Center Locator",
- "tooltip": ""
- },
- {
- "type": "action",
- "command": "$OPENPYPE_SCRIPTS\\layout\\average_locator.py",
- "sourcetype": "file",
- "tags": ["layout", "average", "locator"],
- "title": "# Average Locator",
- "tooltip": ""
- },
- {
- "type": "action",
- "command": "$OPENPYPE_SCRIPTS\\layout\\selectWithinProximityUI.py",
- "sourcetype": "file",
- "tags": ["layout", "select", "proximity", "ui"],
- "title": "# Select Within Proximity UI",
- "tooltip": ""
- },
- {
- "type": "action",
- "command": "$OPENPYPE_SCRIPTS\\layout\\dupCurveUI.py",
- "sourcetype": "file",
- "tags": ["layout", "Duplicate", "Curve", "UI"],
- "title": "# Duplicate Curve UI",
- "tooltip": ""
- },
- {
- "type": "action",
- "command": "$OPENPYPE_SCRIPTS\\layout\\randomDeselectUI.py",
- "sourcetype": "file",
- "tags": ["layout", "random", "Deselect", "UI"],
- "title": "# Random Deselect UI",
- "tooltip": ""
- },
- {
- "type": "action",
- "command": "$OPENPYPE_SCRIPTS\\layout\\multiReferencerUI.py",
- "sourcetype": "file",
- "tags": ["layout", "multi", "reference"],
- "title": "# Multi Referencer UI",
- "tooltip": ""
- },
- {
- "type": "action",
- "command": "$OPENPYPE_SCRIPTS\\layout\\duplicateOffsetUI.py",
- "sourcetype": "file",
- "tags": ["layout", "duplicate", "offset", "UI"],
- "title": "# Duplicate Offset UI",
- "tooltip": ""
- },
- {
- "type": "action",
- "command": "$OPENPYPE_SCRIPTS\\layout\\spPaint3d.py",
- "sourcetype": "file",
- "tags": ["layout", "spPaint3d", "paint", "tool"],
- "title": "# SP Paint 3d",
- "tooltip": ""
- },
- {
- "type": "action",
- "command": "$OPENPYPE_SCRIPTS\\layout\\randomizeUI.py",
- "sourcetype": "file",
- "tags": ["layout", "randomize", "UI"],
- "title": "# Randomize UI",
- "tooltip": ""
- },
- {
- "type": "action",
- "command": "$OPENPYPE_SCRIPTS\\layout\\distributeWithinObjectUI.py",
- "sourcetype": "file",
- "tags": ["layout", "distribute", "ObjectUI", "within"],
- "title": "# Distribute Within Object UI",
- "tooltip": ""
- }
- ]
- },
- {
- "type": "menu",
- "title": "# Particles",
- "items": [
- {
- "type": "action",
- "command": "$OPENPYPE_SCRIPTS\\particles\\instancerToObjects.py",
- "sourcetype": "file",
- "tags": ["particles", "instancerToObjects"],
- "title": "# Instancer To Objects",
- "tooltip": ""
- },
- {
- "type": "action",
- "command": "$OPENPYPE_SCRIPTS\\particles\\instancerToObjectsInstances.py",
- "sourcetype": "file",
- "tags": ["particles", "instancerToObjectsInstances"],
- "title": "# Instancer To Objects Instances",
- "tooltip": ""
- },
- {
- "type": "action",
- "command": "$OPENPYPE_SCRIPTS\\particles\\instancerToObjectsInstancesWithAnimation.py",
- "sourcetype": "file",
- "tags": [
- "particles",
- "instancerToObjectsInstancesWithAnimation"
- ],
- "title": "# Instancer To Objects Instances With Animation",
- "tooltip": ""
- },
- {
- "type": "action",
- "command": "$OPENPYPE_SCRIPTS\\particles\\instancerToObjectsWithAnimation.py",
- "sourcetype": "file",
- "tags": ["particles", "instancerToObjectsWithAnimation"],
- "title": "# Instancer To Objects With Animation",
- "tooltip": ""
- }
- ]
- },
- {
- "type": "menu",
- "title": "Cleanup",
- "items": [
- {
- "type": "action",
- "command": "$OPENPYPE_SCRIPTS\\cleanup\\repair_faulty_containers.py",
- "sourcetype": "file",
- "tags": ["cleanup", "repair", "containers"],
- "title": "# Find and Repair Containers",
- "tooltip": ""
- },
- {
- "type": "separator"
- },
- {
- "type": "action",
- "command": "$OPENPYPE_SCRIPTS\\cleanup\\removeNamespaces.py",
- "sourcetype": "file",
- "tags": ["cleanup", "remove", "namespaces"],
- "title": "# Remove Namespaces",
- "tooltip": "Remove all namespaces"
- },
- {
- "type": "action",
- "command": "$OPENPYPE_SCRIPTS\\cleanup\\remove_user_defined_attributes.py",
- "sourcetype": "file",
- "tags": ["cleanup", "remove_user_defined_attributes"],
- "title": "# Remove User Defined Attributes",
- "tooltip": "Remove all user-defined attributes from all nodes"
- },
- {
- "type": "action",
- "command": "$OPENPYPE_SCRIPTS\\cleanup\\removeUnknownNodes.py",
- "sourcetype": "file",
- "tags": ["cleanup", "removeUnknownNodes"],
- "title": "# Remove Unknown Nodes",
- "tooltip": "Remove all unknown nodes"
- },
- {
- "type": "action",
- "command": "$OPENPYPE_SCRIPTS\\cleanup\\removeUnloadedReferences.py",
- "sourcetype": "file",
- "tags": ["cleanup", "removeUnloadedReferences"],
- "title": "# Remove Unloaded References",
- "tooltip": "Remove all unloaded references"
- },
- {
- "type": "action",
- "command": "$OPENPYPE_SCRIPTS\\cleanup\\removeReferencesFailedEdits.py",
- "sourcetype": "file",
- "tags": ["cleanup", "removeReferencesFailedEdits"],
- "title": "# Remove References Failed Edits",
- "tooltip": "Remove failed edits for all references"
- },
- {
- "type": "action",
- "command": "$OPENPYPE_SCRIPTS\\cleanup\\remove_unused_looks.py",
- "sourcetype": "file",
- "tags": ["cleanup", "removeUnusedLooks"],
- "title": "# Remove Unused Looks",
- "tooltip": "Remove all loaded yet unused Avalon look containers"
- },
- {
- "type": "separator"
- },
- {
- "type": "action",
- "command": "$OPENPYPE_SCRIPTS\\cleanup\\uniqifyNodeNames.py",
- "sourcetype": "file",
- "tags": ["cleanup", "uniqifyNodeNames"],
- "title": "# Uniqify Node Names",
- "tooltip": ""
- },
- {
- "type": "action",
- "command": "$OPENPYPE_SCRIPTS\\cleanup\\autoRenameFileNodes.py",
- "sourcetype": "file",
- "tags": ["cleanup", "auto", "rename", "filenodes"],
- "title": "# Auto Rename File Nodes",
- "tooltip": ""
- },
- {
- "type": "action",
- "command": "$OPENPYPE_SCRIPTS\\cleanup\\update_asset_id.py",
- "sourcetype": "file",
- "tags": ["cleanup", "update", "database", "asset", "id"],
- "title": "# Update Asset ID",
- "tooltip": "Will replace the Colorbleed ID with a new one (asset ID : Unique number)"
- },
- {
- "type": "action",
- "command": "$OPENPYPE_SCRIPTS\\cleanup\\ccRenameReplace.py",
- "sourcetype": "file",
- "tags": ["cleanup", "rename", "ui"],
- "title": "Renamer",
- "tooltip": "Rename UI"
- },
- {
- "type": "action",
- "command": "$OPENPYPE_SCRIPTS\\cleanup\\renameShapesToTransform.py",
- "sourcetype": "file",
- "tags": ["cleanup", "renameShapesToTransform"],
- "title": "# Rename Shapes To Transform",
- "tooltip": ""
- }
- ]
- }
-]
diff --git a/openpype/hosts/maya/api/menu.py b/openpype/hosts/maya/api/menu.py
index b1934c757d..5f0fc39bf3 100644
--- a/openpype/hosts/maya/api/menu.py
+++ b/openpype/hosts/maya/api/menu.py
@@ -36,7 +36,7 @@ def install():
return
def deferred():
- from avalon.tools import publish
+ pyblish_icon = host_tools.get_pyblish_icon()
parent_widget = get_main_window()
cmds.menu(
MENU_NAME,
@@ -80,7 +80,7 @@ def install():
command=lambda *args: host_tools.show_publish(
parent=parent_widget
),
- image=publish.ICON
+ image=pyblish_icon
)
cmds.menuItem(
diff --git a/openpype/hosts/maya/api/menu_backup.json b/openpype/hosts/maya/api/menu_backup.json
deleted file mode 100644
index e2a558aedc..0000000000
--- a/openpype/hosts/maya/api/menu_backup.json
+++ /dev/null
@@ -1,1567 +0,0 @@
-[
- {
- "type": "action",
- "command": "$OPENPYPE_SCRIPTS\\others\\save_scene_incremental.py",
- "sourcetype": "file",
- "title": "Version Up",
- "tooltip": "Incremental save with a specific format"
- },
- {
- "type": "action",
- "command": "$OPENPYPE_SCRIPTS\\others\\show_current_scene_in_explorer.py",
- "sourcetype": "file",
- "title": "Explore current scene..",
- "tooltip": "Show current scene in Explorer"
- },
- {
- "type": "action",
- "command": "$OPENPYPE_SCRIPTS\\avalon\\launch_manager.py",
- "sourcetype": "file",
- "title": "Project Manager",
- "tooltip": "Add assets to the project"
- },
- {
- "type": "separator"
- },
- {
- "type": "menu",
- "title": "Modeling",
- "items": [
- {
- "type": "action",
- "command": "$OPENPYPE_SCRIPTS\\modeling\\duplicate_normalized.py",
- "sourcetype": "file",
- "tags": ["modeling", "duplicate", "normalized"],
- "title": "Duplicate Normalized",
- "tooltip": ""
- },
- {
- "type": "action",
- "command": "$OPENPYPE_SCRIPTS\\modeling\\transferUVs.py",
- "sourcetype": "file",
- "tags": ["modeling", "transfer", "uv"],
- "title": "Transfer UVs",
- "tooltip": ""
- },
- {
- "type": "action",
- "command": "$OPENPYPE_SCRIPTS\\modeling\\mirrorSymmetry.py",
- "sourcetype": "file",
- "tags": ["modeling", "mirror", "symmetry"],
- "title": "Mirror Symmetry",
- "tooltip": ""
- },
- {
- "type": "action",
- "command": "$OPENPYPE_SCRIPTS\\modeling\\selectOutlineUI.py",
- "sourcetype": "file",
- "tags": ["modeling", "select", "outline", "ui"],
- "title": "Select Outline UI",
- "tooltip": ""
- },
- {
- "type": "action",
- "command": "$OPENPYPE_SCRIPTS\\modeling\\polyDeleteOtherUVSets.py",
- "sourcetype": "file",
- "tags": ["modeling", "polygon", "uvset", "delete"],
- "title": "Polygon Delete Other UV Sets",
- "tooltip": ""
- },
- {
- "type": "action",
- "command": "$OPENPYPE_SCRIPTS\\modeling\\polyCombineQuick.py",
- "sourcetype": "file",
- "tags": ["modeling", "combine", "polygon", "quick"],
- "title": "Polygon Combine Quick",
- "tooltip": ""
- },
- {
- "type": "action",
- "command": "$OPENPYPE_SCRIPTS\\modeling\\separateMeshPerShader.py",
- "sourcetype": "file",
- "tags": ["modeling", "separateMeshPerShader"],
- "title": "Separate Mesh Per Shader",
- "tooltip": ""
- },
- {
- "type": "action",
- "command": "$OPENPYPE_SCRIPTS\\modeling\\polyDetachSeparate.py",
- "sourcetype": "file",
- "tags": ["modeling", "poly", "detach", "separate"],
- "title": "Polygon Detach and Separate",
- "tooltip": ""
- },
- {
- "type": "action",
- "command": "$OPENPYPE_SCRIPTS\\modeling\\polyRelaxVerts.py",
- "sourcetype": "file",
- "tags": ["modeling", "relax", "verts"],
- "title": "Polygon Relax Vertices",
- "tooltip": ""
- },
- {
- "type": "action",
- "command": "$OPENPYPE_SCRIPTS\\modeling\\polySelectEveryNthEdgeUI.py",
- "sourcetype": "file",
- "tags": ["modeling", "select", "nth", "edge", "ui"],
- "title": "Select Every Nth Edge"
- },
- {
- "type": "action",
- "command": "$OPENPYPE_SCRIPTS\\modeling\\djPFXUVs.py",
- "sourcetype": "file",
- "tags": ["modeling", "djPFX", "UVs"],
- "title": "dj PFX UVs",
- "tooltip": ""
- }
- ]
- },
- {
- "type": "menu",
- "title": "Rigging",
- "items": [
- {
- "type": "action",
- "command": "$OPENPYPE_SCRIPTS\\rigging\\addCurveBetween.py",
- "sourcetype": "file",
- "tags": ["rigging", "addCurveBetween", "file"],
- "title": "Add Curve Between"
- },
- {
- "type": "action",
- "command": "$OPENPYPE_SCRIPTS\\rigging\\averageSkinWeights.py",
- "sourcetype": "file",
- "tags": ["rigging", "average", "skin weights", "file"],
- "title": "Average Skin Weights"
- },
- {
- "type": "action",
- "command": "$OPENPYPE_SCRIPTS\\rigging\\cbSmoothSkinWeightUI.py",
- "sourcetype": "file",
- "tags": ["rigging", "cbSmoothSkinWeightUI", "file"],
- "title": "CB Smooth Skin Weight UI"
- },
- {
- "type": "action",
- "command": "$OPENPYPE_SCRIPTS\\rigging\\channelBoxManagerUI.py",
- "sourcetype": "file",
- "tags": ["rigging", "channelBoxManagerUI", "file"],
- "title": "Channel Box Manager UI"
- },
- {
- "type": "action",
- "command": "$OPENPYPE_SCRIPTS\\rigging\\characterAutorigger.py",
- "sourcetype": "file",
- "tags": ["rigging", "characterAutorigger", "file"],
- "title": "Character Auto Rigger"
- },
- {
- "type": "action",
- "command": "$OPENPYPE_SCRIPTS\\rigging\\connectUI.py",
- "sourcetype": "file",
- "tags": ["rigging", "connectUI", "file"],
- "title": "Connect UI"
- },
- {
- "type": "action",
- "command": "$OPENPYPE_SCRIPTS\\rigging\\copySkinWeightsLocal.py",
- "sourcetype": "file",
- "tags": ["rigging", "copySkinWeightsLocal", "file"],
- "title": "Copy Skin Weights Local"
- },
- {
- "type": "action",
- "command": "$OPENPYPE_SCRIPTS\\rigging\\createCenterLocator.py",
- "sourcetype": "file",
- "tags": ["rigging", "createCenterLocator", "file"],
- "title": "Create Center Locator"
- },
- {
- "type": "action",
- "command": "$OPENPYPE_SCRIPTS\\rigging\\freezeTransformToGroup.py",
- "sourcetype": "file",
- "tags": ["rigging", "freezeTransformToGroup", "file"],
- "title": "Freeze Transform To Group"
- },
- {
- "type": "action",
- "command": "$OPENPYPE_SCRIPTS\\rigging\\groupSelected.py",
- "sourcetype": "file",
- "tags": ["rigging", "groupSelected", "file"],
- "title": "Group Selected"
- },
- {
- "type": "action",
- "command": "$OPENPYPE_SCRIPTS\\rigging\\ikHandlePoleVectorLocator.py",
- "sourcetype": "file",
- "tags": ["rigging", "ikHandlePoleVectorLocator", "file"],
- "title": "IK Handle Pole Vector Locator"
- },
- {
- "type": "action",
- "command": "$OPENPYPE_SCRIPTS\\rigging\\jointOrientUI.py",
- "sourcetype": "file",
- "tags": ["rigging", "jointOrientUI", "file"],
- "title": "Joint Orient UI"
- },
- {
- "type": "action",
- "command": "$OPENPYPE_SCRIPTS\\rigging\\jointsOnCurve.py",
- "sourcetype": "file",
- "tags": ["rigging", "jointsOnCurve", "file"],
- "title": "Joints On Curve"
- },
- {
- "type": "action",
- "command": "$OPENPYPE_SCRIPTS\\rigging\\resetBindSelectedSkinJoints.py",
- "sourcetype": "file",
- "tags": ["rigging", "resetBindSelectedSkinJoints", "file"],
- "title": "Reset Bind Selected Skin Joints"
- },
- {
- "type": "action",
- "command": "$OPENPYPE_SCRIPTS\\rigging\\selectSkinclusterJointsFromSelectedComponents.py",
- "sourcetype": "file",
- "tags": [
- "rigging",
- "selectSkinclusterJointsFromSelectedComponents",
- "file"
- ],
- "title": "Select Skincluster Joints From Selected Components"
- },
- {
- "type": "action",
- "command": "$OPENPYPE_SCRIPTS\\rigging\\selectSkinclusterJointsFromSelectedMesh.py",
- "sourcetype": "file",
- "tags": [
- "rigging",
- "selectSkinclusterJointsFromSelectedMesh",
- "file"
- ],
- "title": "Select Skincluster Joints From Selected Mesh"
- },
- {
- "type": "action",
- "command": "$OPENPYPE_SCRIPTS\\rigging\\setJointLabels.py",
- "sourcetype": "file",
- "tags": ["rigging", "setJointLabels", "file"],
- "title": "Set Joint Labels"
- },
- {
- "type": "action",
- "command": "$OPENPYPE_SCRIPTS\\rigging\\setJointOrientationFromCurrentRotation.py",
- "sourcetype": "file",
- "tags": [
- "rigging",
- "setJointOrientationFromCurrentRotation",
- "file"
- ],
- "title": "Set Joint Orientation From Current Rotation"
- },
- {
- "type": "action",
- "command": "$OPENPYPE_SCRIPTS\\rigging\\setSelectedJointsOrientationZero.py",
- "sourcetype": "file",
- "tags": ["rigging", "setSelectedJointsOrientationZero", "file"],
- "title": "Set Selected Joints Orientation Zero"
- },
- {
- "type": "action",
- "command": "$OPENPYPE_SCRIPTS\\rigging\\mirrorCurveShape.py",
- "sourcetype": "file",
- "tags": ["rigging", "mirrorCurveShape", "file"],
- "title": "Mirror Curve Shape"
- },
- {
- "type": "action",
- "command": "$OPENPYPE_SCRIPTS\\rigging\\setRotationOrderUI.py",
- "sourcetype": "file",
- "tags": ["rigging", "setRotationOrderUI", "file"],
- "title": "Set Rotation Order UI"
- },
- {
- "type": "action",
- "command": "$OPENPYPE_SCRIPTS\\rigging\\paintItNowUI.py",
- "sourcetype": "file",
- "tags": ["rigging", "paintItNowUI", "file"],
- "title": "Paint It Now UI"
- },
- {
- "type": "action",
- "command": "$OPENPYPE_SCRIPTS\\rigging\\parentScaleConstraint.py",
- "sourcetype": "file",
- "tags": ["rigging", "parentScaleConstraint", "file"],
- "title": "Parent Scale Constraint"
- },
- {
- "type": "action",
- "command": "$OPENPYPE_SCRIPTS\\rigging\\quickSetWeightsUI.py",
- "sourcetype": "file",
- "tags": ["rigging", "quickSetWeightsUI", "file"],
- "title": "Quick Set Weights UI"
- },
- {
- "type": "action",
- "command": "$OPENPYPE_SCRIPTS\\rigging\\rapidRig.py",
- "sourcetype": "file",
- "tags": ["rigging", "rapidRig", "file"],
- "title": "Rapid Rig"
- },
- {
- "type": "action",
- "command": "$OPENPYPE_SCRIPTS\\rigging\\regenerate_blendshape_targets.py",
- "sourcetype": "file",
- "tags": ["rigging", "regenerate_blendshape_targets", "file"],
- "title": "Regenerate Blendshape Targets"
- },
- {
- "type": "action",
- "command": "$OPENPYPE_SCRIPTS\\rigging\\removeRotationAxis.py",
- "sourcetype": "file",
- "tags": ["rigging", "removeRotationAxis", "file"],
- "title": "Remove Rotation Axis"
- },
- {
- "type": "action",
- "command": "$OPENPYPE_SCRIPTS\\rigging\\resetBindSelectedMeshes.py",
- "sourcetype": "file",
- "tags": ["rigging", "resetBindSelectedMeshes", "file"],
- "title": "Reset Bind Selected Meshes"
- },
- {
- "type": "action",
- "command": "$OPENPYPE_SCRIPTS\\rigging\\simpleControllerOnSelection.py",
- "sourcetype": "file",
- "tags": ["rigging", "simpleControllerOnSelection", "file"],
- "title": "Simple Controller On Selection"
- },
- {
- "type": "action",
- "command": "$OPENPYPE_SCRIPTS\\rigging\\simpleControllerOnSelectionHierarchy.py",
- "sourcetype": "file",
- "tags": [
- "rigging",
- "simpleControllerOnSelectionHierarchy",
- "file"
- ],
- "title": "Simple Controller On Selection Hierarchy"
- },
- {
- "type": "action",
- "command": "$OPENPYPE_SCRIPTS\\rigging\\superRelativeCluster.py",
- "sourcetype": "file",
- "tags": ["rigging", "superRelativeCluster", "file"],
- "title": "Super Relative Cluster"
- },
- {
- "type": "action",
- "command": "$OPENPYPE_SCRIPTS\\rigging\\tfSmoothSkinWeight.py",
- "sourcetype": "file",
- "tags": ["rigging", "tfSmoothSkinWeight", "file"],
- "title": "TF Smooth Skin Weight"
- },
- {
- "type": "action",
- "command": "$OPENPYPE_SCRIPTS\\rigging\\toggleIntermediates.py",
- "sourcetype": "file",
- "tags": ["rigging", "toggleIntermediates", "file"],
- "title": "Toggle Intermediates"
- },
- {
- "type": "action",
- "command": "$OPENPYPE_SCRIPTS\\rigging\\toggleSegmentScaleCompensate.py",
- "sourcetype": "file",
- "tags": ["rigging", "toggleSegmentScaleCompensate", "file"],
- "title": "Toggle Segment Scale Compensate"
- },
- {
- "type": "action",
- "command": "$OPENPYPE_SCRIPTS\\rigging\\toggleSkinclusterDeformNormals.py",
- "sourcetype": "file",
- "tags": ["rigging", "toggleSkinclusterDeformNormals", "file"],
- "title": "Toggle Skincluster Deform Normals"
- }
- ]
- },
- {
- "type": "menu",
- "title": "Shading",
- "items": [
- {
- "type": "menu",
- "title": "VRay",
- "items": [
- {
- "type": "action",
- "title": "Import Proxies",
- "command": "$OPENPYPE_SCRIPTS\\shading\\vray\\vrayImportProxies.py",
- "sourcetype": "file",
- "tags": ["shading", "vray", "import", "proxies"],
- "tooltip": ""
- },
- {
- "type": "separator"
- },
- {
- "type": "action",
- "title": "Select All GES",
- "command": "$OPENPYPE_SCRIPTS\\shading\\vray\\selectAllGES.py",
- "sourcetype": "file",
- "tooltip": "",
- "tags": ["shading", "vray", "select All GES"]
- },
- {
- "type": "action",
- "title": "Select All GES Under Selection",
- "command": "$OPENPYPE_SCRIPTS\\shading\\vray\\selectAllGESUnderSelection.py",
- "sourcetype": "file",
- "tooltip": "",
- "tags": ["shading", "vray", "select", "all", "GES"]
- },
- {
- "type": "separator"
- },
- {
- "type": "action",
- "title": "Selection To VRay Mesh",
- "command": "$OPENPYPE_SCRIPTS\\shading\\vray\\selectionToVrayMesh.py",
- "sourcetype": "file",
- "tooltip": "",
- "tags": ["shading", "vray", "selection", "vraymesh"]
- },
- {
- "type": "action",
- "title": "Add VRay Round Edges Attribute",
- "command": "$OPENPYPE_SCRIPTS\\shading\\vray\\addVrayRoundEdgesAttribute.py",
- "sourcetype": "file",
- "tooltip": "",
- "tags": ["shading", "vray", "round edges", "attribute"]
- },
- {
- "type": "action",
- "title": "Add Gamma",
- "command": "$OPENPYPE_SCRIPTS\\shading\\vray\\vrayAddGamma.py",
- "sourcetype": "file",
- "tooltip": "",
- "tags": ["shading", "vray", "add gamma"]
- },
- {
- "type": "separator"
- },
- {
- "type": "action",
- "command": "$OPENPYPE_SCRIPTS\\shading\\vray\\select_vraymesh_materials_with_unconnected_shader_slots.py",
- "sourcetype": "file",
- "title": "Select Unconnected Shader Materials",
- "tags": [
- "shading",
- "vray",
- "select",
- "vraymesh",
- "materials",
- "unconnected shader slots"
- ],
- "tooltip": ""
- },
- {
- "type": "action",
- "command": "$OPENPYPE_SCRIPTS\\shading\\vray\\vrayMergeSimilarVRayMeshMaterials.py",
- "sourcetype": "file",
- "title": "Merge Similar VRay Mesh Materials",
- "tags": [
- "shading",
- "vray",
- "Merge",
- "VRayMesh",
- "Materials"
- ],
- "tooltip": ""
- },
- {
- "type": "action",
- "title": "Create Two Sided Material",
- "command": "$OPENPYPE_SCRIPTS\\shading\\vray\\vrayCreate2SidedMtlForSelectedMtlRenamed.py",
- "sourcetype": "file",
- "tooltip": "Creates two sided material for selected material and renames it",
- "tags": ["shading", "vray", "two sided", "material"]
- },
- {
- "type": "action",
- "title": "Create Two Sided Material For Selected",
- "command": "$OPENPYPE_SCRIPTS\\shading\\vray\\vrayCreate2SidedMtlForSelectedMtl.py",
- "sourcetype": "file",
- "tooltip": "Select material to create a two sided version from it",
- "tags": [
- "shading",
- "vray",
- "Create2SidedMtlForSelectedMtl.py"
- ]
- },
- {
- "type": "separator"
- },
- {
- "type": "action",
- "title": "Add OpenSubdiv Attribute",
- "command": "$OPENPYPE_SCRIPTS\\shading\\vray\\addVrayOpenSubdivAttribute.py",
- "sourcetype": "file",
- "tooltip": "",
- "tags": [
- "shading",
- "vray",
- "add",
- "open subdiv",
- "attribute"
- ]
- },
- {
- "type": "action",
- "title": "Remove OpenSubdiv Attribute",
- "command": "$OPENPYPE_SCRIPTS\\shading\\vray\\removeVrayOpenSubdivAttribute.py",
- "sourcetype": "file",
- "tooltip": "",
- "tags": [
- "shading",
- "vray",
- "remove",
- "opensubdiv",
- "attributee"
- ]
- },
- {
- "type": "separator"
- },
- {
- "type": "action",
- "title": "Add Subdivision Attribute",
- "command": "$OPENPYPE_SCRIPTS\\shading\\vray\\addVraySubdivisionAttribute.py",
- "sourcetype": "file",
- "tooltip": "",
- "tags": [
- "shading",
- "vray",
- "addVraySubdivisionAttribute"
- ]
- },
- {
- "type": "action",
- "title": "Remove Subdivision Attribute.py",
- "command": "$OPENPYPE_SCRIPTS\\shading\\vray\\removeVraySubdivisionAttribute.py",
- "sourcetype": "file",
- "tooltip": "",
- "tags": [
- "shading",
- "vray",
- "remove",
- "subdivision",
- "attribute"
- ]
- },
- {
- "type": "separator"
- },
- {
- "type": "action",
- "title": "Add Vray Object Ids",
- "command": "$OPENPYPE_SCRIPTS\\shading\\vray\\addVrayObjectIds.py",
- "sourcetype": "file",
- "tooltip": "",
- "tags": ["shading", "vray", "add", "object id"]
- },
- {
- "type": "action",
- "title": "Add Vray Material Ids",
- "command": "$OPENPYPE_SCRIPTS\\shading\\vray\\addVrayMaterialIds.py",
- "sourcetype": "file",
- "tooltip": "",
- "tags": ["shading", "vray", "addVrayMaterialIds.py"]
- },
- {
- "type": "separator"
- },
- {
- "type": "action",
- "title": "Set Physical DOF Depth",
- "command": "$OPENPYPE_SCRIPTS\\shading\\vray\\vrayPhysicalDOFSetDepth.py",
- "sourcetype": "file",
- "tooltip": "",
- "tags": ["shading", "vray", "physical", "DOF ", "Depth"]
- },
- {
- "type": "action",
- "title": "Magic Vray Proxy UI",
- "command": "$OPENPYPE_SCRIPTS\\shading\\vray\\magicVrayProxyUI.py",
- "sourcetype": "file",
- "tooltip": "",
- "tags": ["shading", "vray", "magicVrayProxyUI"]
- }
- ]
- },
- {
- "type": "action",
- "command": "$OPENPYPE_SCRIPTS\\pyblish\\lighting\\set_filename_prefix.py",
- "sourcetype": "file",
- "tags": [
- "shading",
- "lookdev",
- "assign",
- "shaders",
- "prefix",
- "filename",
- "render"
- ],
- "title": "Set filename prefix",
- "tooltip": "Set the render file name prefix."
- },
- {
- "type": "action",
- "command": "import mayalookassigner; mayalookassigner.show()",
- "sourcetype": "python",
- "tags": ["shading", "look", "assign", "shaders", "auto"],
- "title": "Look Manager",
- "tooltip": "Open the Look Manager UI for look assignment"
- },
- {
- "type": "action",
- "command": "$OPENPYPE_SCRIPTS\\shading\\LightLinkUi.py",
- "sourcetype": "file",
- "tags": ["shading", "light", "link", "ui"],
- "title": "Light Link UI",
- "tooltip": ""
- },
- {
- "type": "action",
- "command": "$OPENPYPE_SCRIPTS\\shading\\vdviewer_ui.py",
- "sourcetype": "file",
- "tags": [
- "shading",
- "look",
- "vray",
- "displacement",
- "shaders",
- "auto"
- ],
- "title": "VRay Displ Viewer",
- "tooltip": "Open the VRay Displacement Viewer, select and control the content of the set"
- },
- {
- "type": "action",
- "command": "$OPENPYPE_SCRIPTS\\shading\\setTexturePreviewToCLRImage.py",
- "sourcetype": "file",
- "tags": ["shading", "CLRImage", "textures", "preview"],
- "title": "Set Texture Preview To CLRImage",
- "tooltip": ""
- },
- {
- "type": "action",
- "command": "$OPENPYPE_SCRIPTS\\shading\\fixDefaultShaderSetBehavior.py",
- "sourcetype": "file",
- "tags": ["shading", "fix", "DefaultShaderSet", "Behavior"],
- "title": "Fix Default Shader Set Behavior",
- "tooltip": ""
- },
- {
- "type": "action",
- "command": "$OPENPYPE_SCRIPTS\\shading\\fixSelectedShapesReferenceAssignments.py",
- "sourcetype": "file",
- "tags": [
- "shading",
- "fix",
- "Selected",
- "Shapes",
- "Reference",
- "Assignments"
- ],
- "title": "Fix Shapes Reference Assignments",
- "tooltip": "Select shapes to fix the reference assignments"
- },
- {
- "type": "action",
- "command": "$OPENPYPE_SCRIPTS\\shading\\selectLambert1Members.py",
- "sourcetype": "file",
- "tags": ["shading", "selectLambert1Members"],
- "title": "Select Lambert1 Members",
- "tooltip": "Selects all objects which have the Lambert1 shader assigned"
- },
- {
- "type": "action",
- "command": "$OPENPYPE_SCRIPTS\\shading\\selectShapesWithoutShader.py",
- "sourcetype": "file",
- "tags": ["shading", "selectShapesWithoutShader"],
- "title": "Select Shapes Without Shader",
- "tooltip": ""
- },
- {
- "type": "action",
- "command": "$OPENPYPE_SCRIPTS\\shading\\fixRenderLayerOutAdjustmentErrors.py",
- "sourcetype": "file",
- "tags": ["shading", "fixRenderLayerOutAdjustmentErrors"],
- "title": "Fix RenderLayer Out Adjustment Errors",
- "tooltip": ""
- },
- {
- "type": "action",
- "command": "$OPENPYPE_SCRIPTS\\shading\\fix_renderlayer_missing_node_override.py",
- "sourcetype": "file",
- "tags": [
- "shading",
- "renderlayer",
- "missing",
- "reference",
- "switch",
- "layer"
- ],
- "title": "Fix RenderLayer Missing Referenced Nodes Overrides",
- "tooltip": ""
- },
- {
- "type": "action",
- "title": "Image 2 Tiled EXR",
- "command": "$OPENPYPE_SCRIPTS\\shading\\open_img2exr.py",
- "sourcetype": "file",
- "tooltip": "",
- "tags": ["shading", "vray", "exr"]
- }
- ]
- },
- {
- "type": "menu",
- "title": "Rendering",
- "items": [
- {
- "type": "action",
- "command": "$OPENPYPE_SCRIPTS\\pyblish\\open_deadline_submission_settings.py",
- "sourcetype": "file",
- "tags": ["settings", "deadline", "globals", "render"],
- "title": "DL Submission Settings UI",
- "tooltip": "Open the Deadline Submission Settings UI"
- }
- ]
- },
- {
- "type": "menu",
- "title": "Animation",
- "items": [
- {
- "type": "menu",
- "title": "Attributes",
- "tooltip": "",
- "items": [
- {
- "type": "action",
- "command": "$OPENPYPE_SCRIPTS\\animation\\attributes\\copyValues.py",
- "sourcetype": "file",
- "tags": ["animation", "copy", "attributes"],
- "title": "Copy Values",
- "tooltip": "Copy attribute values"
- },
- {
- "type": "action",
- "command": "$OPENPYPE_SCRIPTS\\animation\\attributes\\copyInConnections.py",
- "sourcetype": "file",
- "tags": [
- "animation",
- "copy",
- "attributes",
- "connections",
- "incoming"
- ],
- "title": "Copy In Connections",
- "tooltip": "Copy incoming connections"
- },
- {
- "type": "action",
- "command": "$OPENPYPE_SCRIPTS\\animation\\attributes\\copyOutConnections.py",
- "sourcetype": "file",
- "tags": [
- "animation",
- "copy",
- "attributes",
- "connections",
- "out"
- ],
- "title": "Copy Out Connections",
- "tooltip": "Copy outcoming connections"
- },
- {
- "type": "action",
- "command": "$OPENPYPE_SCRIPTS\\animation\\attributes\\copyTransformLocal.py",
- "sourcetype": "file",
- "tags": [
- "animation",
- "copy",
- "attributes",
- "transforms",
- "local"
- ],
- "title": "Copy Local Transforms",
- "tooltip": "Copy local transforms"
- },
- {
- "type": "action",
- "command": "$OPENPYPE_SCRIPTS\\animation\\attributes\\copyTransformMatrix.py",
- "sourcetype": "file",
- "tags": [
- "animation",
- "copy",
- "attributes",
- "transforms",
- "matrix"
- ],
- "title": "Copy Matrix Transforms",
- "tooltip": "Copy Matrix transforms"
- },
- {
- "type": "action",
- "command": "$OPENPYPE_SCRIPTS\\animation\\attributes\\copyTransformUI.py",
- "sourcetype": "file",
- "tags": [
- "animation",
- "copy",
- "attributes",
- "transforms",
- "UI"
- ],
- "title": "Copy Transforms UI",
- "tooltip": "Open the Copy Transforms UI"
- },
- {
- "type": "action",
- "command": "$OPENPYPE_SCRIPTS\\animation\\attributes\\simpleCopyUI.py",
- "sourcetype": "file",
- "tags": [
- "animation",
- "copy",
- "attributes",
- "transforms",
- "UI",
- "simple"
- ],
- "title": "Simple Copy UI",
- "tooltip": "Open the simple Copy Transforms UI"
- }
- ]
- },
- {
- "type": "menu",
- "title": "Optimize",
- "tooltip": "Optimization scripts",
- "items": [
- {
- "type": "action",
- "command": "$OPENPYPE_SCRIPTS\\animation\\optimize\\toggleFreezeHierarchy.py",
- "sourcetype": "file",
- "tags": ["animation", "hierarchy", "toggle", "freeze"],
- "title": "Toggle Freeze Hierarchy",
- "tooltip": "Freeze and unfreeze hierarchy"
- },
- {
- "type": "action",
- "command": "$OPENPYPE_SCRIPTS\\animation\\optimize\\toggleParallelNucleus.py",
- "sourcetype": "file",
- "tags": ["animation", "nucleus", "toggle", "parallel"],
- "title": "Toggle Parallel Nucleus",
- "tooltip": "Toggle parallel nucleus"
- }
- ]
- },
- {
- "sourcetype": "file",
- "command": "$OPENPYPE_SCRIPTS\\animation\\bakeSelectedToWorldSpace.py",
- "tags": ["animation", "bake", "selection", "worldspace.py"],
- "title": "Bake Selected To Worldspace",
- "type": "action"
- },
- {
- "sourcetype": "file",
- "command": "$OPENPYPE_SCRIPTS\\animation\\timeStepper.py",
- "tags": ["animation", "time", "stepper"],
- "title": "Time Stepper",
- "type": "action"
- },
- {
- "sourcetype": "file",
- "command": "$OPENPYPE_SCRIPTS\\animation\\capture_ui.py",
- "tags": [
- "animation",
- "capture",
- "ui",
- "screen",
- "movie",
- "image"
- ],
- "title": "Capture UI",
- "type": "action"
- },
- {
- "sourcetype": "file",
- "command": "$OPENPYPE_SCRIPTS\\animation\\simplePlayblastUI.py",
- "tags": ["animation", "simple", "playblast", "ui"],
- "title": "Simple Playblast UI",
- "type": "action"
- },
- {
- "sourcetype": "file",
- "command": "$OPENPYPE_SCRIPTS\\animation\\tweenMachineUI.py",
- "tags": ["animation", "tween", "machine"],
- "title": "Tween Machine UI",
- "type": "action"
- },
- {
- "sourcetype": "file",
- "command": "$OPENPYPE_SCRIPTS\\animation\\selectAllAnimationCurves.py",
- "tags": ["animation", "select", "curves"],
- "title": "Select All Animation Curves",
- "type": "action"
- },
- {
- "sourcetype": "file",
- "command": "$OPENPYPE_SCRIPTS\\animation\\pathAnimation.py",
- "tags": ["animation", "path", "along"],
- "title": "Path Animation",
- "type": "action"
- },
- {
- "sourcetype": "file",
- "command": "$OPENPYPE_SCRIPTS\\animation\\offsetSelectedObjectsUI.py",
- "tags": ["animation", "offsetSelectedObjectsUI.py"],
- "title": "Offset Selected Objects UI",
- "type": "action"
- },
- {
- "sourcetype": "file",
- "command": "$OPENPYPE_SCRIPTS\\animation\\key_amplifier_ui.py",
- "tags": ["animation", "key", "amplifier"],
- "title": "Key Amplifier UI",
- "type": "action"
- },
- {
- "sourcetype": "file",
- "command": "$OPENPYPE_SCRIPTS\\animation\\anim_scene_optimizer.py",
- "tags": ["animation", "anim_scene_optimizer.py"],
- "title": "Anim_Scene_Optimizer",
- "type": "action"
- },
- {
- "sourcetype": "file",
- "command": "$OPENPYPE_SCRIPTS\\animation\\zvParentMaster.py",
- "tags": ["animation", "zvParentMaster.py"],
- "title": "ZV Parent Master",
- "type": "action"
- },
- {
- "sourcetype": "file",
- "command": "$OPENPYPE_SCRIPTS\\animation\\poseLibrary.py",
- "tags": ["animation", "poseLibrary.py"],
- "title": "Pose Library",
- "type": "action"
- }
- ]
- },
- {
- "type": "menu",
- "title": "Layout",
- "items": [
- {
- "type": "action",
- "command": "$OPENPYPE_SCRIPTS\\layout\\alignDistributeUI.py",
- "sourcetype": "file",
- "tags": ["layout", "align", "Distribute", "UI"],
- "title": "Align Distribute UI",
- "tooltip": ""
- },
- {
- "type": "action",
- "command": "$OPENPYPE_SCRIPTS\\layout\\alignSimpleUI.py",
- "sourcetype": "file",
- "tags": ["layout", "align", "UI", "Simple"],
- "title": "Align Simple UI",
- "tooltip": ""
- },
- {
- "type": "action",
- "command": "$OPENPYPE_SCRIPTS\\layout\\center_locator.py",
- "sourcetype": "file",
- "tags": ["layout", "center", "locator"],
- "title": "Center Locator",
- "tooltip": ""
- },
- {
- "type": "action",
- "command": "$OPENPYPE_SCRIPTS\\layout\\average_locator.py",
- "sourcetype": "file",
- "tags": ["layout", "average", "locator"],
- "title": "Average Locator",
- "tooltip": ""
- },
- {
- "type": "action",
- "command": "$OPENPYPE_SCRIPTS\\layout\\selectWithinProximityUI.py",
- "sourcetype": "file",
- "tags": ["layout", "select", "proximity", "ui"],
- "title": "Select Within Proximity UI",
- "tooltip": ""
- },
- {
- "type": "action",
- "command": "$OPENPYPE_SCRIPTS\\layout\\dupCurveUI.py",
- "sourcetype": "file",
- "tags": ["layout", "Duplicate", "Curve", "UI"],
- "title": "Duplicate Curve UI",
- "tooltip": ""
- },
- {
- "type": "action",
- "command": "$OPENPYPE_SCRIPTS\\layout\\randomDeselectUI.py",
- "sourcetype": "file",
- "tags": ["layout", "random", "Deselect", "UI"],
- "title": "Random Deselect UI",
- "tooltip": ""
- },
- {
- "type": "action",
- "command": "$OPENPYPE_SCRIPTS\\layout\\multiReferencerUI.py",
- "sourcetype": "file",
- "tags": ["layout", "multi", "reference"],
- "title": "Multi Referencer UI",
- "tooltip": ""
- },
- {
- "type": "action",
- "command": "$OPENPYPE_SCRIPTS\\layout\\duplicateOffsetUI.py",
- "sourcetype": "file",
- "tags": ["layout", "duplicate", "offset", "UI"],
- "title": "Duplicate Offset UI",
- "tooltip": ""
- },
- {
- "type": "action",
- "command": "$OPENPYPE_SCRIPTS\\layout\\spPaint3d.py",
- "sourcetype": "file",
- "tags": ["layout", "spPaint3d", "paint", "tool"],
- "title": "SP Paint 3d",
- "tooltip": ""
- },
- {
- "type": "action",
- "command": "$OPENPYPE_SCRIPTS\\layout\\randomizeUI.py",
- "sourcetype": "file",
- "tags": ["layout", "randomize", "UI"],
- "title": "Randomize UI",
- "tooltip": ""
- },
- {
- "type": "action",
- "command": "$OPENPYPE_SCRIPTS\\layout\\distributeWithinObjectUI.py",
- "sourcetype": "file",
- "tags": ["layout", "distribute", "ObjectUI", "within"],
- "title": "Distribute Within Object UI",
- "tooltip": ""
- }
- ]
- },
- {
- "type": "menu",
- "title": "Particles",
- "items": [
- {
- "type": "action",
- "command": "$OPENPYPE_SCRIPTS\\particles\\instancerToObjects.py",
- "sourcetype": "file",
- "tags": ["particles", "instancerToObjects"],
- "title": "Instancer To Objects",
- "tooltip": ""
- },
- {
- "type": "action",
- "command": "$OPENPYPE_SCRIPTS\\particles\\instancerToObjectsInstances.py",
- "sourcetype": "file",
- "tags": ["particles", "instancerToObjectsInstances"],
- "title": "Instancer To Objects Instances",
- "tooltip": ""
- },
- {
- "type": "action",
- "command": "$OPENPYPE_SCRIPTS\\particles\\objectsToParticlesAndInstancerCleanSource.py",
- "sourcetype": "file",
- "tags": [
- "particles",
- "objects",
- "Particles",
- "Instancer",
- "Clean",
- "Source"
- ],
- "title": "Objects To Particles & Instancer - Clean Source",
- "tooltip": ""
- },
- {
- "type": "action",
- "command": "$OPENPYPE_SCRIPTS\\particles\\particleComponentsToLocators.py",
- "sourcetype": "file",
- "tags": ["particles", "components", "locators"],
- "title": "Particle Components To Locators",
- "tooltip": ""
- },
- {
- "type": "action",
- "command": "$OPENPYPE_SCRIPTS\\particles\\objectsToParticlesAndInstancer.py",
- "sourcetype": "file",
- "tags": ["particles", "objects", "particles", "instancer"],
- "title": "Objects To Particles And Instancer",
- "tooltip": ""
- },
- {
- "type": "action",
- "command": "$OPENPYPE_SCRIPTS\\particles\\spawnParticlesOnMesh.py",
- "sourcetype": "file",
- "tags": ["particles", "spawn", "on", "mesh"],
- "title": "Spawn Particles On Mesh",
- "tooltip": ""
- },
- {
- "type": "action",
- "command": "$OPENPYPE_SCRIPTS\\particles\\instancerToObjectsInstancesWithAnimation.py",
- "sourcetype": "file",
- "tags": [
- "particles",
- "instancerToObjectsInstancesWithAnimation"
- ],
- "title": "Instancer To Objects Instances With Animation",
- "tooltip": ""
- },
- {
- "type": "action",
- "command": "$OPENPYPE_SCRIPTS\\particles\\objectsToParticles.py",
- "sourcetype": "file",
- "tags": ["particles", "objectsToParticles"],
- "title": "Objects To Particles",
- "tooltip": ""
- },
- {
- "type": "action",
- "command": "$OPENPYPE_SCRIPTS\\particles\\add_particle_cacheFile_attrs.py",
- "sourcetype": "file",
- "tags": ["particles", "add_particle_cacheFile_attrs"],
- "title": "Add Particle CacheFile Attributes",
- "tooltip": ""
- },
- {
- "type": "action",
- "command": "$OPENPYPE_SCRIPTS\\particles\\mergeParticleSystems.py",
- "sourcetype": "file",
- "tags": ["particles", "mergeParticleSystems"],
- "title": "Merge Particle Systems",
- "tooltip": ""
- },
- {
- "type": "action",
- "command": "$OPENPYPE_SCRIPTS\\particles\\particlesToLocators.py",
- "sourcetype": "file",
- "tags": ["particles", "particlesToLocators"],
- "title": "Particles To Locators",
- "tooltip": ""
- },
- {
- "type": "action",
- "command": "$OPENPYPE_SCRIPTS\\particles\\instancerToObjectsWithAnimation.py",
- "sourcetype": "file",
- "tags": ["particles", "instancerToObjectsWithAnimation"],
- "title": "Instancer To Objects With Animation",
- "tooltip": ""
- },
- {
- "type": "separator"
- },
- {
- "type": "action",
- "command": "$OPENPYPE_SCRIPTS\\particles\\mayaReplicateHoudiniTool.py",
- "sourcetype": "file",
- "tags": [
- "particles",
- "houdini",
- "houdiniTool",
- "houdiniEngine"
- ],
- "title": "Replicate Houdini Tool",
- "tooltip": ""
- },
- {
- "type": "separator"
- },
- {
- "type": "action",
- "command": "$OPENPYPE_SCRIPTS\\particles\\clearInitialState.py",
- "sourcetype": "file",
- "tags": ["particles", "clearInitialState"],
- "title": "Clear Initial State",
- "tooltip": ""
- },
- {
- "type": "action",
- "command": "$OPENPYPE_SCRIPTS\\particles\\killSelectedParticles.py",
- "sourcetype": "file",
- "tags": ["particles", "killSelectedParticles"],
- "title": "Kill Selected Particles",
- "tooltip": ""
- }
- ]
- },
- {
- "type": "menu",
- "title": "Yeti",
- "items": [
- {
- "type": "action",
- "command": "$OPENPYPE_SCRIPTS\\yeti\\yeti_rig_manager.py",
- "sourcetype": "file",
- "tags": ["yeti", "rig", "fur", "manager"],
- "title": "Open Yeti Rig Manager",
- "tooltip": ""
- }
- ]
- },
- {
- "type": "menu",
- "title": "Cleanup",
- "items": [
- {
- "type": "action",
- "command": "$OPENPYPE_SCRIPTS\\cleanup\\repair_faulty_containers.py",
- "sourcetype": "file",
- "tags": ["cleanup", "repair", "containers"],
- "title": "Find and Repair Containers",
- "tooltip": ""
- },
- {
- "type": "action",
- "command": "$OPENPYPE_SCRIPTS\\cleanup\\selectByType.py",
- "sourcetype": "file",
- "tags": ["cleanup", "selectByType"],
- "title": "Select By Type",
- "tooltip": ""
- },
- {
- "type": "action",
- "command": "$OPENPYPE_SCRIPTS\\cleanup\\selectIntermediateObjects.py",
- "sourcetype": "file",
- "tags": ["cleanup", "selectIntermediateObjects"],
- "title": "Select Intermediate Objects",
- "tooltip": ""
- },
- {
- "type": "action",
- "command": "$OPENPYPE_SCRIPTS\\cleanup\\selectNonUniqueNames.py",
- "sourcetype": "file",
- "tags": ["cleanup", "select", "non unique", "names"],
- "title": "Select Non Unique Names",
- "tooltip": ""
- },
- {
- "type": "separator"
- },
- {
- "type": "action",
- "command": "$OPENPYPE_SCRIPTS\\cleanup\\removeNamespaces.py",
- "sourcetype": "file",
- "tags": ["cleanup", "remove", "namespaces"],
- "title": "Remove Namespaces",
- "tooltip": "Remove all namespaces"
- },
- {
- "type": "action",
- "command": "$OPENPYPE_SCRIPTS\\cleanup\\remove_user_defined_attributes.py",
- "sourcetype": "file",
- "tags": ["cleanup", "remove_user_defined_attributes"],
- "title": "Remove User Defined Attributes",
- "tooltip": "Remove all user-defined attributes from all nodes"
- },
- {
- "type": "action",
- "command": "$OPENPYPE_SCRIPTS\\cleanup\\removeUnknownNodes.py",
- "sourcetype": "file",
- "tags": ["cleanup", "removeUnknownNodes"],
- "title": "Remove Unknown Nodes",
- "tooltip": "Remove all unknown nodes"
- },
- {
- "type": "action",
- "command": "$OPENPYPE_SCRIPTS\\cleanup\\removeUnloadedReferences.py",
- "sourcetype": "file",
- "tags": ["cleanup", "removeUnloadedReferences"],
- "title": "Remove Unloaded References",
- "tooltip": "Remove all unloaded references"
- },
- {
- "type": "action",
- "command": "$OPENPYPE_SCRIPTS\\cleanup\\removeReferencesFailedEdits.py",
- "sourcetype": "file",
- "tags": ["cleanup", "removeReferencesFailedEdits"],
- "title": "Remove References Failed Edits",
- "tooltip": "Remove failed edits for all references"
- },
- {
- "type": "action",
- "command": "$OPENPYPE_SCRIPTS\\cleanup\\remove_unused_looks.py",
- "sourcetype": "file",
- "tags": ["cleanup", "removeUnusedLooks"],
- "title": "Remove Unused Looks",
- "tooltip": "Remove all loaded yet unused Avalon look containers"
- },
- {
- "type": "action",
- "command": "$OPENPYPE_SCRIPTS\\cleanup\\deleteGhostIntermediateObjects.py",
- "sourcetype": "file",
- "tags": ["cleanup", "deleteGhostIntermediateObjects"],
- "title": "Delete Ghost Intermediate Objects",
- "tooltip": ""
- },
- {
- "type": "separator"
- },
- {
- "type": "action",
- "command": "$OPENPYPE_SCRIPTS\\cleanup\\resetViewportCache.py",
- "sourcetype": "file",
- "tags": ["cleanup", "reset", "viewport", "cache"],
- "title": "Reset Viewport Cache",
- "tooltip": ""
- },
- {
- "type": "action",
- "command": "$OPENPYPE_SCRIPTS\\cleanup\\uniqifyNodeNames.py",
- "sourcetype": "file",
- "tags": ["cleanup", "uniqifyNodeNames"],
- "title": "Uniqify Node Names",
- "tooltip": ""
- },
- {
- "type": "action",
- "command": "$OPENPYPE_SCRIPTS\\cleanup\\autoRenameFileNodes.py",
- "sourcetype": "file",
- "tags": ["cleanup", "auto", "rename", "filenodes"],
- "title": "Auto Rename File Nodes",
- "tooltip": ""
- },
- {
- "type": "action",
- "command": "$OPENPYPE_SCRIPTS\\cleanup\\update_asset_id.py",
- "sourcetype": "file",
- "tags": ["cleanup", "update", "database", "asset", "id"],
- "title": "Update Asset ID",
- "tooltip": "Will replace the Colorbleed ID with a new one (asset ID : Unique number)"
- },
- {
- "type": "action",
- "command": "$OPENPYPE_SCRIPTS\\cleanup\\colorbleedRename.py",
- "sourcetype": "file",
- "tags": ["cleanup", "rename", "ui"],
- "title": "Colorbleed Renamer",
- "tooltip": "Colorbleed Rename UI"
- },
- {
- "type": "action",
- "command": "$OPENPYPE_SCRIPTS\\cleanup\\renameShapesToTransform.py",
- "sourcetype": "file",
- "tags": ["cleanup", "renameShapesToTransform"],
- "title": "Rename Shapes To Transform",
- "tooltip": ""
- },
- {
- "type": "action",
- "command": "$OPENPYPE_SCRIPTS\\cleanup\\reorderUI.py",
- "sourcetype": "file",
- "tags": ["cleanup", "reorderUI"],
- "title": "Reorder UI",
- "tooltip": ""
- },
- {
- "type": "action",
- "command": "$OPENPYPE_SCRIPTS\\cleanup\\pastedCleaner.py",
- "sourcetype": "file",
- "tags": ["cleanup", "pastedCleaner"],
- "title": "Pasted Cleaner",
- "tooltip": ""
- }
- ]
- },
- {
- "type": "menu",
- "title": "Others",
- "items": [
- {
- "type": "menu",
- "sourcetype": "file",
- "title": "Yeti",
- "items": [
- {
- "type": "action",
- "command": "$OPENPYPE_SCRIPTS\\others\\yeti\\cache_selected_yeti_nodes.py",
- "sourcetype": "file",
- "tags": ["others", "yeti", "cache", "selected"],
- "title": "Cache Selected Yeti Nodes",
- "tooltip": ""
- }
- ]
- },
- {
- "type": "menu",
- "title": "Hair",
- "tooltip": "",
- "items": [
- {
- "type": "action",
- "command": "$OPENPYPE_SCRIPTS\\others\\hair\\recolorHairCurrentCurve",
- "sourcetype": "file",
- "tags": ["others", "selectSoftSelection"],
- "title": "Select Soft Selection",
- "tooltip": ""
- }
- ]
- },
- {
- "type": "menu",
- "command": "$OPENPYPE_SCRIPTS\\others\\display",
- "sourcetype": "file",
- "tags": ["others", "display"],
- "title": "Display",
- "items": [
- {
- "type": "action",
- "command": "$OPENPYPE_SCRIPTS\\others\\display\\wireframeSelectedObjects.py",
- "sourcetype": "file",
- "tags": ["others", "wireframe", "selected", "objects"],
- "title": "Wireframe Selected Objects",
- "tooltip": ""
- }
- ]
- },
- {
- "type": "action",
- "command": "$OPENPYPE_SCRIPTS\\others\\archiveSceneUI.py",
- "sourcetype": "file",
- "tags": ["others", "archiveSceneUI"],
- "title": "Archive Scene UI",
- "tooltip": ""
- },
- {
- "type": "action",
- "command": "$OPENPYPE_SCRIPTS\\others\\getSimilarMeshes.py",
- "sourcetype": "file",
- "tags": ["others", "getSimilarMeshes"],
- "title": "Get Similar Meshes",
- "tooltip": ""
- },
- {
- "type": "action",
- "command": "$OPENPYPE_SCRIPTS\\others\\createBoundingBoxEachSelected.py",
- "sourcetype": "file",
- "tags": ["others", "createBoundingBoxEachSelected"],
- "title": "Create BoundingBox Each Selected",
- "tooltip": ""
- },
- {
- "type": "action",
- "command": "$OPENPYPE_SCRIPTS\\others\\curveFromPositionEveryFrame.py",
- "sourcetype": "file",
- "tags": ["others", "curveFromPositionEveryFrame"],
- "title": "Curve From Position",
- "tooltip": ""
- },
- {
- "type": "action",
- "command": "$OPENPYPE_SCRIPTS\\others\\instanceLeafSmartTransform.py",
- "sourcetype": "file",
- "tags": ["others", "instance", "leaf", "smart", "transform"],
- "title": "Instance Leaf Smart Transform",
- "tooltip": ""
- },
- {
- "type": "action",
- "command": "$OPENPYPE_SCRIPTS\\others\\instanceSmartTransform.py",
- "sourcetype": "file",
- "tags": ["others", "instance", "smart", "transform"],
- "title": "Instance Smart Transform",
- "tooltip": ""
- },
- {
- "type": "action",
- "command": "$OPENPYPE_SCRIPTS\\others\\randomizeUVShellsSelectedObjects.py",
- "sourcetype": "file",
- "tags": ["others", "randomizeUVShellsSelectedObjects"],
- "title": "Randomize UV Shells",
- "tooltip": "Select objects before running action"
- },
- {
- "type": "action",
- "command": "$OPENPYPE_SCRIPTS\\others\\centerPivotGroup.py",
- "sourcetype": "file",
- "tags": ["others", "centerPivotGroup"],
- "title": "Center Pivot Group",
- "tooltip": ""
- },
- {
- "type": "separator"
- },
- {
- "type": "action",
- "command": "$OPENPYPE_SCRIPTS\\others\\locatorsOnSelectedFaces.py",
- "sourcetype": "file",
- "tags": ["others", "locatorsOnSelectedFaces"],
- "title": "Locators On Selected Faces",
- "tooltip": ""
- },
- {
- "type": "action",
- "command": "$OPENPYPE_SCRIPTS\\others\\locatorsOnEdgeSelectionPrompt.py",
- "sourcetype": "file",
- "tags": ["others", "locatorsOnEdgeSelectionPrompt"],
- "title": "Locators On Edge Selection Prompt",
- "tooltip": ""
- },
- {
- "type": "separator"
- },
- {
- "type": "action",
- "command": "$OPENPYPE_SCRIPTS\\others\\copyDeformers.py",
- "sourcetype": "file",
- "tags": ["others", "copyDeformers"],
- "title": "Copy Deformers",
- "tooltip": ""
- },
- {
- "type": "action",
- "command": "$OPENPYPE_SCRIPTS\\others\\selectInReferenceEditor.py",
- "sourcetype": "file",
- "tags": ["others", "selectInReferenceEditor"],
- "title": "Select In Reference Editor",
- "tooltip": ""
- },
- {
- "type": "action",
- "command": "$OPENPYPE_SCRIPTS\\others\\selectConstrainingObject.py",
- "sourcetype": "file",
- "tags": ["others", "selectConstrainingObject"],
- "title": "Select Constraining Object",
- "tooltip": ""
- },
- {
- "type": "action",
- "command": "$OPENPYPE_SCRIPTS\\others\\deformerSetRelationsUI.py",
- "sourcetype": "file",
- "tags": ["others", "deformerSetRelationsUI"],
- "title": "Deformer Set Relations UI",
- "tooltip": ""
- },
- {
- "type": "action",
- "command": "$OPENPYPE_SCRIPTS\\others\\recreateBaseNodesForAllLatticeNodes.py",
- "sourcetype": "file",
- "tags": ["others", "recreate", "base", "nodes", "lattice"],
- "title": "Recreate Base Nodes For Lattice Nodes",
- "tooltip": ""
- }
- ]
- }
-]
diff --git a/openpype/hosts/maya/api/pipeline.py b/openpype/hosts/maya/api/pipeline.py
index 14e8f4eb45..5cdc3ff4fd 100644
--- a/openpype/hosts/maya/api/pipeline.py
+++ b/openpype/hosts/maya/api/pipeline.py
@@ -2,7 +2,6 @@ import os
import sys
import errno
import logging
-import contextlib
from maya import utils, cmds, OpenMaya
import maya.api.OpenMaya as om
@@ -15,8 +14,17 @@ from avalon.pipeline import AVALON_CONTAINER_ID
import openpype.hosts.maya
from openpype.tools.utils import host_tools
-from openpype.lib import any_outdated
+from openpype.lib import (
+ any_outdated,
+ register_event_callback,
+ emit_event
+)
from openpype.lib.path_tools import HostDirmap
+from openpype.pipeline import (
+ LegacyCreator,
+ register_loader_plugin_path,
+ deregister_loader_plugin_path,
+)
from openpype.hosts.maya.lib import copy_workspace_mel
from . import menu, lib
@@ -49,13 +57,13 @@ def install():
pyblish.api.register_host("mayapy")
pyblish.api.register_host("maya")
- avalon.api.register_plugin_path(avalon.api.Loader, LOAD_PATH)
- avalon.api.register_plugin_path(avalon.api.Creator, CREATE_PATH)
+ register_loader_plugin_path(LOAD_PATH)
+ avalon.api.register_plugin_path(LegacyCreator, CREATE_PATH)
avalon.api.register_plugin_path(avalon.api.InventoryAction, INVENTORY_PATH)
log.info(PUBLISH_PATH)
log.info("Installing callbacks ... ")
- avalon.api.on("init", on_init)
+ register_event_callback("init", on_init)
# Callbacks below are not required for headless mode, the `init` however
# is important to load referenced Alembics correctly at rendertime.
@@ -69,12 +77,12 @@ def install():
menu.install()
- avalon.api.on("save", on_save)
- avalon.api.on("open", on_open)
- avalon.api.on("new", on_new)
- avalon.api.before("save", on_before_save)
- avalon.api.on("taskChanged", on_task_changed)
- avalon.api.on("before.workfile.save", before_workfile_save)
+ register_event_callback("save", on_save)
+ register_event_callback("open", on_open)
+ register_event_callback("new", on_new)
+ register_event_callback("before.save", on_before_save)
+ register_event_callback("taskChanged", on_task_changed)
+ register_event_callback("workfile.save.before", before_workfile_save)
def _set_project():
@@ -137,7 +145,7 @@ def _register_callbacks():
def _on_maya_initialized(*args):
- avalon.api.emit("init", args)
+ emit_event("init")
if cmds.about(batch=True):
log.warning("Running batch mode ...")
@@ -148,15 +156,15 @@ def _on_maya_initialized(*args):
def _on_scene_new(*args):
- avalon.api.emit("new", args)
+ emit_event("new")
def _on_scene_save(*args):
- avalon.api.emit("save", args)
+ emit_event("save")
def _on_scene_open(*args):
- avalon.api.emit("open", args)
+ emit_event("open")
def _before_scene_save(return_code, client_data):
@@ -166,7 +174,10 @@ def _before_scene_save(return_code, client_data):
# in order to block the operation.
OpenMaya.MScriptUtil.setBool(return_code, True)
- avalon.api.emit("before_save", [return_code, client_data])
+ emit_event(
+ "before.save",
+ {"return_code": return_code}
+ )
def uninstall():
@@ -175,8 +186,8 @@ def uninstall():
pyblish.api.deregister_host("mayapy")
pyblish.api.deregister_host("maya")
- avalon.api.deregister_plugin_path(avalon.api.Loader, LOAD_PATH)
- avalon.api.deregister_plugin_path(avalon.api.Creator, CREATE_PATH)
+ deregister_loader_plugin_path(LOAD_PATH)
+ avalon.api.deregister_plugin_path(LegacyCreator, CREATE_PATH)
avalon.api.deregister_plugin_path(
avalon.api.InventoryAction, INVENTORY_PATH
)
@@ -184,76 +195,6 @@ def uninstall():
menu.uninstall()
-def lock():
- """Lock scene
-
- Add an invisible node to your Maya scene with the name of the
- current file, indicating that this file is "locked" and cannot
- be modified any further.
-
- """
-
- if not cmds.objExists("lock"):
- with lib.maintained_selection():
- cmds.createNode("objectSet", name="lock")
- cmds.addAttr("lock", ln="basename", dataType="string")
-
- # Permanently hide from outliner
- cmds.setAttr("lock.verticesOnlySet", True)
-
- fname = cmds.file(query=True, sceneName=True)
- basename = os.path.basename(fname)
- cmds.setAttr("lock.basename", basename, type="string")
-
-
-def unlock():
- """Permanently unlock a locked scene
-
- Doesn't throw an error if scene is already unlocked.
-
- """
-
- try:
- cmds.delete("lock")
- except ValueError:
- pass
-
-
-def is_locked():
- """Query whether current scene is locked"""
- fname = cmds.file(query=True, sceneName=True)
- basename = os.path.basename(fname)
-
- if self._ignore_lock:
- return False
-
- try:
- return cmds.getAttr("lock.basename") == basename
- except ValueError:
- return False
-
-
-@contextlib.contextmanager
-def lock_ignored():
- """Context manager for temporarily ignoring the lock of a scene
-
- The purpose of this function is to enable locking a scene and
- saving it with the lock still in place.
-
- Example:
- >>> with lock_ignored():
- ... pass # Do things without lock
-
- """
-
- self._ignore_lock = True
-
- try:
- yield
- finally:
- self._ignore_lock = False
-
-
def parse_container(container):
"""Return the container node's full container data.
@@ -413,7 +354,7 @@ def containerise(name,
return container
-def on_init(_):
+def on_init():
log.info("Running callback on init..")
def safe_deferred(fn):
@@ -454,12 +395,12 @@ def on_init(_):
safe_deferred(override_toolbox_ui)
-def on_before_save(return_code, _):
+def on_before_save():
"""Run validation for scene's FPS prior to saving"""
return lib.validate_fps()
-def on_save(_):
+def on_save():
"""Automatically add IDs to new nodes
Any transform of a mesh, without an existing ID, is given one
@@ -477,7 +418,7 @@ def on_save(_):
lib.set_id(node, new_id, overwrite=False)
-def on_open(_):
+def on_open():
"""On scene open let's assume the containers have changed."""
from Qt import QtWidgets
@@ -525,7 +466,7 @@ def on_open(_):
dialog.show()
-def on_new(_):
+def on_new():
"""Set project resolution and fps when create a new file"""
log.info("Running callback on new..")
with lib.suspended_refresh():
@@ -541,7 +482,7 @@ def on_new(_):
lib.set_context_settings()
-def on_task_changed(*args):
+def on_task_changed():
"""Wrapped function of app initialize and maya's on task changed"""
# Run
menu.update_menu_task_label()
@@ -579,7 +520,7 @@ def on_task_changed(*args):
def before_workfile_save(event):
- workdir_path = event.workdir_path
+ workdir_path = event["workdir_path"]
if workdir_path:
copy_workspace_mel(workdir_path)
diff --git a/openpype/hosts/maya/api/plugin.py b/openpype/hosts/maya/api/plugin.py
index bdb8fcf13a..84379bc145 100644
--- a/openpype/hosts/maya/api/plugin.py
+++ b/openpype/hosts/maya/api/plugin.py
@@ -2,9 +2,14 @@ import os
from maya import cmds
-from avalon import api
-from avalon.vendor import qargparse
-from openpype.api import PypeCreatorMixin
+import qargparse
+
+from avalon.pipeline import AVALON_CONTAINER_ID
+from openpype.pipeline import (
+ LegacyCreator,
+ LoaderPlugin,
+ get_representation_path,
+)
from .pipeline import containerise
from . import lib
@@ -77,7 +82,7 @@ def get_reference_node_parents(ref):
return parents
-class Creator(PypeCreatorMixin, api.Creator):
+class Creator(LegacyCreator):
defaults = ['Main']
def process(self):
@@ -93,7 +98,7 @@ class Creator(PypeCreatorMixin, api.Creator):
return instance
-class Loader(api.Loader):
+class Loader(LoaderPlugin):
hosts = ["maya"]
@@ -168,16 +173,18 @@ class ReferenceLoader(Loader):
return
ref_node = get_reference_node(nodes, self.log)
- loaded_containers.append(containerise(
+ container = containerise(
name=name,
namespace=namespace,
nodes=[ref_node],
context=context,
loader=self.__class__.__name__
- ))
-
+ )
+ loaded_containers.append(container)
+ self._organize_containers(nodes, container)
c += 1
namespace = None
+
return loaded_containers
def process_reference(self, context, name, namespace, data):
@@ -190,7 +197,7 @@ class ReferenceLoader(Loader):
node = container["objectName"]
- path = api.get_representation_path(representation)
+ path = get_representation_path(representation)
# Get reference node from container members
members = get_container_members(node)
@@ -243,6 +250,8 @@ class ReferenceLoader(Loader):
self.log.warning("Ignoring file read error:\n%s", exc)
+ self._organize_containers(content, container["objectName"])
+
# Reapply alembic settings.
if representation["name"] == "abc" and alembic_data:
alembic_nodes = cmds.ls(
@@ -280,7 +289,6 @@ class ReferenceLoader(Loader):
to remove from scene.
"""
-
from maya import cmds
node = container["objectName"]
@@ -310,3 +318,14 @@ class ReferenceLoader(Loader):
deleteNamespaceContent=True)
except RuntimeError:
pass
+
+ @staticmethod
+ def _organize_containers(nodes, container):
+ # type: (list, str) -> None
+ """Put containers in loaded data to correct hierarchy."""
+ for node in nodes:
+ id_attr = "{}.id".format(node)
+ if not cmds.attributeQuery("id", node=node, exists=True):
+ continue
+ if cmds.getAttr(id_attr) == AVALON_CONTAINER_ID:
+ cmds.sets(node, forceElement=container)
diff --git a/openpype/hosts/maya/api/setdress.py b/openpype/hosts/maya/api/setdress.py
index 1a7c3933a1..96a9700b88 100644
--- a/openpype/hosts/maya/api/setdress.py
+++ b/openpype/hosts/maya/api/setdress.py
@@ -8,7 +8,15 @@ import copy
import six
from maya import cmds
-from avalon import api, io
+from avalon import io
+from openpype.pipeline import (
+ discover_loader_plugins,
+ loaders_from_representation,
+ load_container,
+ update_container,
+ remove_container,
+ get_representation_path,
+)
from openpype.hosts.maya.api.lib import (
matrix_equals,
unique_namespace
@@ -120,12 +128,13 @@ def load_package(filepath, name, namespace=None):
root = "{}:{}".format(namespace, name)
containers = []
- all_loaders = api.discover(api.Loader)
+ all_loaders = discover_loader_plugins()
for representation_id, instances in data.items():
# Find the compatible loaders
- loaders = api.loaders_from_representation(all_loaders,
- representation_id)
+ loaders = loaders_from_representation(
+ all_loaders, representation_id
+ )
for instance in instances:
container = _add(instance=instance,
@@ -180,9 +189,11 @@ def _add(instance, representation_id, loaders, namespace, root="|"):
instance['loader'], instance)
raise RuntimeError("Loader is missing.")
- container = api.load(Loader,
- representation_id,
- namespace=instance['namespace'])
+ container = load_container(
+ Loader,
+ representation_id,
+ namespace=instance['namespace']
+ )
# Get the root from the loaded container
loaded_root = get_container_transforms({"objectName": container},
@@ -320,13 +331,13 @@ def update_package(set_container, representation):
"type": "representation"
})
- current_file = api.get_representation_path(current_representation)
+ current_file = get_representation_path(current_representation)
assert current_file.endswith(".json")
with open(current_file, "r") as fp:
current_data = json.load(fp)
# Load the new package data
- new_file = api.get_representation_path(representation)
+ new_file = get_representation_path(representation)
assert new_file.endswith(".json")
with open(new_file, "r") as fp:
new_data = json.load(fp)
@@ -460,12 +471,12 @@ def update_scene(set_container, containers, current_data, new_data, new_file):
# considered as new element and added afterwards.
processed_containers.pop()
processed_namespaces.remove(container_ns)
- api.remove(container)
+ remove_container(container)
continue
# Check whether the conversion can be done by the Loader.
# They *must* use the same asset, subset and Loader for
- # `api.update` to make sense.
+ # `update_container` to make sense.
old = io.find_one({
"_id": io.ObjectId(representation_current)
})
@@ -479,20 +490,21 @@ def update_scene(set_container, containers, current_data, new_data, new_file):
continue
new_version = new["context"]["version"]
- api.update(container, version=new_version)
+ update_container(container, version=new_version)
else:
# Remove this container because it's not in the new data
log.warning("Removing content: %s", container_ns)
- api.remove(container)
+ remove_container(container)
# Add new assets
- all_loaders = api.discover(api.Loader)
+ all_loaders = discover_loader_plugins()
for representation_id, instances in new_data.items():
# Find the compatible loaders
- loaders = api.loaders_from_representation(all_loaders,
- representation_id)
+ loaders = loaders_from_representation(
+ all_loaders, representation_id
+ )
for instance in instances:
# Already processed in update functionality
@@ -517,7 +529,7 @@ def update_scene(set_container, containers, current_data, new_data, new_file):
def compare_representations(old, new):
"""Check if the old representation given can be updated
- Due to limitations of the `api.update` function we cannot allow
+ Due to limitations of the `update_container` function we cannot allow
differences in the following data:
* Representation name (extension)
diff --git a/openpype/hosts/maya/plugins/create/create_render.py b/openpype/hosts/maya/plugins/create/create_render.py
index 97a190d57d..9002ae3876 100644
--- a/openpype/hosts/maya/plugins/create/create_render.py
+++ b/openpype/hosts/maya/plugins/create/create_render.py
@@ -19,9 +19,9 @@ from openpype.api import (
get_project_settings,
get_asset)
from openpype.modules import ModulesManager
+from openpype.pipeline import CreatorError
from avalon.api import Session
-from avalon.api import CreatorError
class CreateRender(plugin.Creator):
@@ -253,7 +253,7 @@ class CreateRender(plugin.Creator):
# get pools
pool_names = []
- self.server_aliases = self.deadline_servers.keys()
+ self.server_aliases = list(self.deadline_servers.keys())
self.data["deadlineServers"] = self.server_aliases
self.data["suspendPublishJob"] = False
self.data["review"] = True
@@ -286,15 +286,12 @@ class CreateRender(plugin.Creator):
raise RuntimeError("Both Deadline and Muster are enabled")
if deadline_enabled:
- # if default server is not between selected, use first one for
- # initial list of pools.
try:
deadline_url = self.deadline_servers["default"]
except KeyError:
- deadline_url = [
- self.deadline_servers[k]
- for k in self.deadline_servers.keys()
- ][0]
+ # if 'default' server is not between selected,
+ # use first one for initial list of pools.
+ deadline_url = next(iter(self.deadline_servers.values()))
pool_names = self._get_deadline_pools(deadline_url)
diff --git a/openpype/hosts/maya/plugins/create/create_vrayscene.py b/openpype/hosts/maya/plugins/create/create_vrayscene.py
index f2096d902e..fa9c59e016 100644
--- a/openpype/hosts/maya/plugins/create/create_vrayscene.py
+++ b/openpype/hosts/maya/plugins/create/create_vrayscene.py
@@ -19,10 +19,10 @@ from openpype.api import (
get_project_settings
)
+from openpype.pipeline import CreatorError
from openpype.modules import ModulesManager
from avalon.api import Session
-from avalon.api import CreatorError
class CreateVRayScene(plugin.Creator):
diff --git a/openpype/hosts/maya/plugins/inventory/import_modelrender.py b/openpype/hosts/maya/plugins/inventory/import_modelrender.py
index 119edccb7a..c5d3d0c8f4 100644
--- a/openpype/hosts/maya/plugins/inventory/import_modelrender.py
+++ b/openpype/hosts/maya/plugins/inventory/import_modelrender.py
@@ -1,5 +1,9 @@
import json
-from avalon import api, io, pipeline
+from avalon import api, io
+from openpype.pipeline import (
+ get_representation_context,
+ get_representation_path_from_context,
+)
from openpype.hosts.maya.api.lib import (
maintained_selection,
apply_shaders
@@ -73,11 +77,11 @@ class ImportModelRender(api.InventoryAction):
"name": self.look_data_type,
})
- context = pipeline.get_representation_context(look_repr["_id"])
- maya_file = pipeline.get_representation_path_from_context(context)
+ context = get_representation_context(look_repr["_id"])
+ maya_file = get_representation_path_from_context(context)
- context = pipeline.get_representation_context(json_repr["_id"])
- json_file = pipeline.get_representation_path_from_context(context)
+ context = get_representation_context(json_repr["_id"])
+ json_file = get_representation_path_from_context(context)
# Import the look file
with maintained_selection():
diff --git a/openpype/hosts/maya/plugins/load/actions.py b/openpype/hosts/maya/plugins/load/actions.py
index 1cc7ee0c03..483ad32402 100644
--- a/openpype/hosts/maya/plugins/load/actions.py
+++ b/openpype/hosts/maya/plugins/load/actions.py
@@ -2,14 +2,14 @@
"""
-from avalon import api
+from openpype.pipeline import load
from openpype.hosts.maya.api.lib import (
maintained_selection,
unique_namespace
)
-class SetFrameRangeLoader(api.Loader):
+class SetFrameRangeLoader(load.LoaderPlugin):
"""Specific loader of Alembic for the avalon.animation family"""
families = ["animation",
@@ -43,7 +43,7 @@ class SetFrameRangeLoader(api.Loader):
animationEndTime=end)
-class SetFrameRangeWithHandlesLoader(api.Loader):
+class SetFrameRangeWithHandlesLoader(load.LoaderPlugin):
"""Specific loader of Alembic for the avalon.animation family"""
families = ["animation",
@@ -72,9 +72,8 @@ class SetFrameRangeWithHandlesLoader(api.Loader):
return
# Include handles
- handles = version_data.get("handles", 0)
- start -= handles
- end += handles
+ start -= version_data.get("handleStart", 0)
+ end += version_data.get("handleEnd", 0)
cmds.playbackOptions(minTime=start,
maxTime=end,
@@ -82,7 +81,7 @@ class SetFrameRangeWithHandlesLoader(api.Loader):
animationEndTime=end)
-class ImportMayaLoader(api.Loader):
+class ImportMayaLoader(load.LoaderPlugin):
"""Import action for Maya (unmanaged)
Warning:
diff --git a/openpype/hosts/maya/plugins/load/load_ass.py b/openpype/hosts/maya/plugins/load/load_ass.py
index 18b34d2233..18de4df3b1 100644
--- a/openpype/hosts/maya/plugins/load/load_ass.py
+++ b/openpype/hosts/maya/plugins/load/load_ass.py
@@ -1,8 +1,11 @@
import os
import clique
-from avalon import api
from openpype.api import get_project_settings
+from openpype.pipeline import (
+ load,
+ get_representation_path
+)
import openpype.hosts.maya.api.plugin
from openpype.hosts.maya.api.plugin import get_reference_node
from openpype.hosts.maya.api.lib import (
@@ -106,7 +109,7 @@ class AssProxyLoader(openpype.hosts.maya.api.plugin.ReferenceLoader):
node = container["objectName"]
representation["context"].pop("frame", None)
- path = api.get_representation_path(representation)
+ path = get_representation_path(representation)
print(path)
# path = self.fname
print(self.fname)
@@ -164,7 +167,7 @@ class AssProxyLoader(openpype.hosts.maya.api.plugin.ReferenceLoader):
type="string")
-class AssStandinLoader(api.Loader):
+class AssStandinLoader(load.LoaderPlugin):
"""Load .ASS file as standin"""
families = ["ass"]
@@ -240,7 +243,7 @@ class AssStandinLoader(api.Loader):
import pymel.core as pm
- path = api.get_representation_path(representation)
+ path = get_representation_path(representation)
files_in_path = os.listdir(os.path.split(path)[0])
sequence = 0
diff --git a/openpype/hosts/maya/plugins/load/load_assembly.py b/openpype/hosts/maya/plugins/load/load_assembly.py
index 0151da7253..902f38695c 100644
--- a/openpype/hosts/maya/plugins/load/load_assembly.py
+++ b/openpype/hosts/maya/plugins/load/load_assembly.py
@@ -1,7 +1,10 @@
-from avalon import api
+from openpype.pipeline import (
+ load,
+ remove_container
+)
-class AssemblyLoader(api.Loader):
+class AssemblyLoader(load.LoaderPlugin):
families = ["assembly"]
representations = ["json"]
@@ -48,13 +51,11 @@ class AssemblyLoader(api.Loader):
def update(self, container, representation):
from openpype import setdress
- return setdress.update_package(container,
- representation)
+ return setdress.update_package(container, representation)
def remove(self, container):
"""Remove all sub containers"""
- from avalon import api
from openpype import setdress
import maya.cmds as cmds
@@ -63,7 +64,7 @@ class AssemblyLoader(api.Loader):
for member_container in member_containers:
self.log.info("Removing container %s",
member_container['objectName'])
- api.remove(member_container)
+ remove_container(member_container)
# Remove alembic hierarchy reference
# TODO: Check whether removing all contained references is safe enough
diff --git a/openpype/hosts/maya/plugins/load/load_audio.py b/openpype/hosts/maya/plugins/load/load_audio.py
index 99f1f7c172..d8844ffea6 100644
--- a/openpype/hosts/maya/plugins/load/load_audio.py
+++ b/openpype/hosts/maya/plugins/load/load_audio.py
@@ -1,10 +1,14 @@
from maya import cmds, mel
-from avalon import api, io
+from avalon import io
+from openpype.pipeline import (
+ load,
+ get_representation_path
+)
from openpype.hosts.maya.api.pipeline import containerise
from openpype.hosts.maya.api.lib import unique_namespace
-class AudioLoader(api.Loader):
+class AudioLoader(load.LoaderPlugin):
"""Specific loader of audio."""
families = ["audio"]
@@ -51,7 +55,7 @@ class AudioLoader(api.Loader):
assert audio_node is not None, "Audio node not found."
- path = api.get_representation_path(representation)
+ path = get_representation_path(representation)
audio_node.filename.set(path)
cmds.setAttr(
container["objectName"] + ".representation",
diff --git a/openpype/hosts/maya/plugins/load/load_gpucache.py b/openpype/hosts/maya/plugins/load/load_gpucache.py
index 2e0b7bb810..591e568e4c 100644
--- a/openpype/hosts/maya/plugins/load/load_gpucache.py
+++ b/openpype/hosts/maya/plugins/load/load_gpucache.py
@@ -1,9 +1,13 @@
import os
-from avalon import api
+
+from openpype.pipeline import (
+ load,
+ get_representation_path
+)
from openpype.api import get_project_settings
-class GpuCacheLoader(api.Loader):
+class GpuCacheLoader(load.LoaderPlugin):
"""Load model Alembic as gpuCache"""
families = ["model"]
@@ -73,7 +77,7 @@ class GpuCacheLoader(api.Loader):
import maya.cmds as cmds
- path = api.get_representation_path(representation)
+ path = get_representation_path(representation)
# Update the cache
members = cmds.sets(container['objectName'], query=True)
diff --git a/openpype/hosts/maya/plugins/load/load_image_plane.py b/openpype/hosts/maya/plugins/load/load_image_plane.py
index 8e33f51389..b250986489 100644
--- a/openpype/hosts/maya/plugins/load/load_image_plane.py
+++ b/openpype/hosts/maya/plugins/load/load_image_plane.py
@@ -1,6 +1,10 @@
from Qt import QtWidgets, QtCore
-from avalon import api, io
+from avalon import io
+from openpype.pipeline import (
+ load,
+ get_representation_path
+)
from openpype.hosts.maya.api.pipeline import containerise
from openpype.hosts.maya.api.lib import unique_namespace
@@ -74,7 +78,7 @@ class CameraWindow(QtWidgets.QDialog):
self.close()
-class ImagePlaneLoader(api.Loader):
+class ImagePlaneLoader(load.LoaderPlugin):
"""Specific loader of plate for image planes on selected camera."""
families = ["image", "plate", "render"]
@@ -203,7 +207,7 @@ class ImagePlaneLoader(api.Loader):
assert image_plane_shape is not None, "Image plane not found."
- path = api.get_representation_path(representation)
+ path = get_representation_path(representation)
image_plane_shape.imageName.set(path)
cmds.setAttr(
container["objectName"] + ".representation",
diff --git a/openpype/hosts/maya/plugins/load/load_look.py b/openpype/hosts/maya/plugins/load/load_look.py
index 96c1ecbb20..8f02ed59b8 100644
--- a/openpype/hosts/maya/plugins/load/load_look.py
+++ b/openpype/hosts/maya/plugins/load/load_look.py
@@ -5,7 +5,8 @@ from collections import defaultdict
from Qt import QtWidgets
-from avalon import api, io
+from avalon import io
+from openpype.pipeline import get_representation_path
import openpype.hosts.maya.api.plugin
from openpype.hosts.maya.api import lib
from openpype.widgets.message_window import ScrollMessageBox
@@ -77,7 +78,7 @@ class LookLoader(openpype.hosts.maya.api.plugin.ReferenceLoader):
})
# Load relationships
- shader_relation = api.get_representation_path(json_representation)
+ shader_relation = get_representation_path(json_representation)
with open(shader_relation, "r") as f:
json_data = json.load(f)
diff --git a/openpype/hosts/maya/plugins/load/load_matchmove.py b/openpype/hosts/maya/plugins/load/load_matchmove.py
index abc702cde8..ee3332bd09 100644
--- a/openpype/hosts/maya/plugins/load/load_matchmove.py
+++ b/openpype/hosts/maya/plugins/load/load_matchmove.py
@@ -1,8 +1,8 @@
-from avalon import api
from maya import mel
+from openpype.pipeline import load
-class MatchmoveLoader(api.Loader):
+class MatchmoveLoader(load.LoaderPlugin):
"""
This will run matchmove script to create track in scene.
diff --git a/openpype/hosts/maya/plugins/load/load_redshift_proxy.py b/openpype/hosts/maya/plugins/load/load_redshift_proxy.py
index fd2ae0f1d3..d93a9f02a2 100644
--- a/openpype/hosts/maya/plugins/load/load_redshift_proxy.py
+++ b/openpype/hosts/maya/plugins/load/load_redshift_proxy.py
@@ -5,8 +5,11 @@ import clique
import maya.cmds as cmds
-from avalon import api
from openpype.api import get_project_settings
+from openpype.pipeline import (
+ load,
+ get_representation_path
+)
from openpype.hosts.maya.api.lib import (
namespaced,
maintained_selection,
@@ -15,7 +18,7 @@ from openpype.hosts.maya.api.lib import (
from openpype.hosts.maya.api.pipeline import containerise
-class RedshiftProxyLoader(api.Loader):
+class RedshiftProxyLoader(load.LoaderPlugin):
"""Load Redshift proxy"""
families = ["redshiftproxy"]
@@ -78,7 +81,7 @@ class RedshiftProxyLoader(api.Loader):
rs_meshes = cmds.ls(members, type="RedshiftProxyMesh")
assert rs_meshes, "Cannot find RedshiftProxyMesh in container"
- filename = api.get_representation_path(representation)
+ filename = get_representation_path(representation)
for rs_mesh in rs_meshes:
cmds.setAttr("{}.fileName".format(rs_mesh),
diff --git a/openpype/hosts/maya/plugins/load/load_reference.py b/openpype/hosts/maya/plugins/load/load_reference.py
index 0565b0b95c..04a25f6493 100644
--- a/openpype/hosts/maya/plugins/load/load_reference.py
+++ b/openpype/hosts/maya/plugins/load/load_reference.py
@@ -1,8 +1,10 @@
import os
from maya import cmds
from avalon import api
+
from openpype.api import get_project_settings
from openpype.lib import get_creator_by_name
+from openpype.pipeline import legacy_create
import openpype.hosts.maya.api.plugin
from openpype.hosts.maya.api.lib import maintained_selection
@@ -119,10 +121,8 @@ class ReferenceLoader(openpype.hosts.maya.api.plugin.ReferenceLoader):
if family == "rig":
self._post_process_rig(name, namespace, context, options)
else:
-
if "translate" in options:
cmds.setAttr(group_name + ".t", *options["translate"])
-
return new_nodes
def switch(self, container, representation):
@@ -151,7 +151,7 @@ class ReferenceLoader(openpype.hosts.maya.api.plugin.ReferenceLoader):
creator_plugin = get_creator_by_name(self.animation_creator_name)
with maintained_selection():
cmds.select([output, controls] + roots, noExpand=True)
- api.create(
+ legacy_create(
creator_plugin,
name=namespace,
asset=asset,
diff --git a/openpype/hosts/maya/plugins/load/load_rendersetup.py b/openpype/hosts/maya/plugins/load/load_rendersetup.py
index efeff2f193..7a2d8b1002 100644
--- a/openpype/hosts/maya/plugins/load/load_rendersetup.py
+++ b/openpype/hosts/maya/plugins/load/load_rendersetup.py
@@ -7,10 +7,13 @@ instance.
"""
import json
-import six
import sys
+import six
-from avalon import api
+from openpype.pipeline import (
+ load,
+ get_representation_path
+)
from openpype.hosts.maya.api import lib
from openpype.hosts.maya.api.pipeline import containerise
@@ -18,7 +21,7 @@ from maya import cmds
import maya.app.renderSetup.model.renderSetup as renderSetup
-class RenderSetupLoader(api.Loader):
+class RenderSetupLoader(load.LoaderPlugin):
"""Load json preset for RenderSetup overwriting current one."""
families = ["rendersetup"]
@@ -87,7 +90,7 @@ class RenderSetupLoader(api.Loader):
"Render setup setting will be overwritten by new version. All "
"setting specified by user not included in loaded version "
"will be lost.")
- path = api.get_representation_path(representation)
+ path = get_representation_path(representation)
with open(path, "r") as file:
try:
renderSetup.instance().decode(
diff --git a/openpype/hosts/maya/plugins/load/load_vdb_to_redshift.py b/openpype/hosts/maya/plugins/load/load_vdb_to_redshift.py
index 3e1d67ae9a..70bd9d22e2 100644
--- a/openpype/hosts/maya/plugins/load/load_vdb_to_redshift.py
+++ b/openpype/hosts/maya/plugins/load/load_vdb_to_redshift.py
@@ -1,9 +1,10 @@
import os
-from avalon import api
+
from openpype.api import get_project_settings
+from openpype.pipeline import load
-class LoadVDBtoRedShift(api.Loader):
+class LoadVDBtoRedShift(load.LoaderPlugin):
"""Load OpenVDB in a Redshift Volume Shape"""
families = ["vdbcache"]
diff --git a/openpype/hosts/maya/plugins/load/load_vdb_to_vray.py b/openpype/hosts/maya/plugins/load/load_vdb_to_vray.py
index 099c020093..4f14235bfb 100644
--- a/openpype/hosts/maya/plugins/load/load_vdb_to_vray.py
+++ b/openpype/hosts/maya/plugins/load/load_vdb_to_vray.py
@@ -1,6 +1,10 @@
import os
-from avalon import api
+
from openpype.api import get_project_settings
+from openpype.pipeline import (
+ load,
+ get_representation_path
+)
from maya import cmds
@@ -69,7 +73,7 @@ def _fix_duplicate_vvg_callbacks():
matched.add(callback)
-class LoadVDBtoVRay(api.Loader):
+class LoadVDBtoVRay(load.LoaderPlugin):
families = ["vdbcache"]
representations = ["vdb"]
@@ -174,7 +178,7 @@ class LoadVDBtoVRay(api.Loader):
fname = files[0]
else:
# Sequence
- from avalon.vendor import clique
+ import clique
# todo: check support for negative frames as input
collections, remainder = clique.assemble(files)
assert len(collections) == 1, (
@@ -252,7 +256,7 @@ class LoadVDBtoVRay(api.Loader):
def update(self, container, representation):
- path = api.get_representation_path(representation)
+ path = get_representation_path(representation)
# Find VRayVolumeGrid
members = cmds.sets(container['objectName'], query=True)
diff --git a/openpype/hosts/maya/plugins/load/load_vrayproxy.py b/openpype/hosts/maya/plugins/load/load_vrayproxy.py
index ac2fe635b3..5b79b1efb3 100644
--- a/openpype/hosts/maya/plugins/load/load_vrayproxy.py
+++ b/openpype/hosts/maya/plugins/load/load_vrayproxy.py
@@ -9,8 +9,12 @@ import os
import maya.cmds as cmds
-from avalon import api, io
+from avalon import io
from openpype.api import get_project_settings
+from openpype.pipeline import (
+ load,
+ get_representation_path
+)
from openpype.hosts.maya.api.lib import (
maintained_selection,
namespaced,
@@ -19,7 +23,7 @@ from openpype.hosts.maya.api.lib import (
from openpype.hosts.maya.api.pipeline import containerise
-class VRayProxyLoader(api.Loader):
+class VRayProxyLoader(load.LoaderPlugin):
"""Load VRay Proxy with Alembic or VrayMesh."""
families = ["vrayproxy", "model", "pointcache", "animation"]
@@ -100,7 +104,10 @@ class VRayProxyLoader(api.Loader):
assert vraymeshes, "Cannot find VRayMesh in container"
# get all representations for this version
- filename = self._get_abc(representation["parent"]) or api.get_representation_path(representation) # noqa: E501
+ filename = (
+ self._get_abc(representation["parent"])
+ or get_representation_path(representation)
+ )
for vray_mesh in vraymeshes:
cmds.setAttr("{}.fileName".format(vray_mesh),
@@ -185,7 +192,7 @@ class VRayProxyLoader(api.Loader):
if abc_rep:
self.log.debug("Found, we'll link alembic to vray proxy.")
- file_name = api.get_representation_path(abc_rep)
+ file_name = get_representation_path(abc_rep)
self.log.debug("File: {}".format(self.fname))
return file_name
diff --git a/openpype/hosts/maya/plugins/load/load_vrayscene.py b/openpype/hosts/maya/plugins/load/load_vrayscene.py
index 2e85514938..61132088cc 100644
--- a/openpype/hosts/maya/plugins/load/load_vrayscene.py
+++ b/openpype/hosts/maya/plugins/load/load_vrayscene.py
@@ -1,7 +1,11 @@
+# -*- coding: utf-8 -*-
import os
-import maya.cmds as cmds
-from avalon import api
+import maya.cmds as cmds # noqa
from openpype.api import get_project_settings
+from openpype.pipeline import (
+ load,
+ get_representation_path
+)
from openpype.hosts.maya.api.lib import (
maintained_selection,
namespaced,
@@ -10,7 +14,7 @@ from openpype.hosts.maya.api.lib import (
from openpype.hosts.maya.api.pipeline import containerise
-class VRaySceneLoader(api.Loader):
+class VRaySceneLoader(load.LoaderPlugin):
"""Load Vray scene"""
families = ["vrayscene_layer"]
@@ -42,20 +46,20 @@ class VRaySceneLoader(api.Loader):
with maintained_selection():
cmds.namespace(addNamespace=namespace)
with namespaced(namespace, new=False):
- nodes, group_node = self.create_vray_scene(name,
- filename=self.fname)
+ nodes, root_node = self.create_vray_scene(name,
+ filename=self.fname)
self[:] = nodes
if not nodes:
return
# colour the group node
- presets = get_project_settings(os.environ['AVALON_PROJECT'])
- colors = presets['maya']['load']['colors']
+ settings = get_project_settings(os.environ['AVALON_PROJECT'])
+ colors = settings['maya']['load']['colors']
c = colors.get(family)
if c is not None:
- cmds.setAttr("{0}.useOutlinerColor".format(group_node), 1)
- cmds.setAttr("{0}.outlinerColor".format(group_node),
+ cmds.setAttr("{0}.useOutlinerColor".format(root_node), 1)
+ cmds.setAttr("{0}.outlinerColor".format(root_node),
(float(c[0])/255),
(float(c[1])/255),
(float(c[2])/255)
@@ -77,7 +81,7 @@ class VRaySceneLoader(api.Loader):
vraymeshes = cmds.ls(members, type="VRayScene")
assert vraymeshes, "Cannot find VRayScene in container"
- filename = api.get_representation_path(representation)
+ filename = get_representation_path(representation)
for vray_mesh in vraymeshes:
cmds.setAttr("{}.FilePath".format(vray_mesh),
@@ -123,17 +127,21 @@ class VRaySceneLoader(api.Loader):
mesh_node_name = "VRayScene_{}".format(name)
trans = cmds.createNode(
- "transform", name="{}".format(mesh_node_name))
- mesh = cmds.createNode(
- "mesh", name="{}_Shape".format(mesh_node_name), parent=trans)
+ "transform", name=mesh_node_name)
vray_scene = cmds.createNode(
"VRayScene", name="{}_VRSCN".format(mesh_node_name), parent=trans)
+ mesh = cmds.createNode(
+ "mesh", name="{}_Shape".format(mesh_node_name), parent=trans)
cmds.connectAttr(
"{}.outMesh".format(vray_scene), "{}.inMesh".format(mesh))
cmds.setAttr("{}.FilePath".format(vray_scene), filename, type="string")
+ # Lock the shape nodes so the user cannot delete these
+ cmds.lockNode(mesh, lock=True)
+ cmds.lockNode(vray_scene, lock=True)
+
# Create important connections
cmds.connectAttr("time1.outTime",
"{0}.inputTime".format(trans))
@@ -141,11 +149,9 @@ class VRaySceneLoader(api.Loader):
# Connect mesh to initialShadingGroup
cmds.sets([mesh], forceElement="initialShadingGroup")
- group_node = cmds.group(empty=True, name="{}_GRP".format(name))
- cmds.parent(trans, group_node)
- nodes = [trans, vray_scene, mesh, group_node]
+ nodes = [trans, vray_scene, mesh]
# Fix: Force refresh so the mesh shows correctly after creation
cmds.refresh()
- return nodes, group_node
+ return nodes, trans
diff --git a/openpype/hosts/maya/plugins/load/load_yeti_cache.py b/openpype/hosts/maya/plugins/load/load_yeti_cache.py
index dfe75173ac..c64e1c540b 100644
--- a/openpype/hosts/maya/plugins/load/load_yeti_cache.py
+++ b/openpype/hosts/maya/plugins/load/load_yeti_cache.py
@@ -7,13 +7,17 @@ from pprint import pprint
from maya import cmds
-from avalon import api, io
+from avalon import io
from openpype.api import get_project_settings
+from openpype.pipeline import (
+ load,
+ get_representation_path
+)
from openpype.hosts.maya.api import lib
from openpype.hosts.maya.api.pipeline import containerise
-class YetiCacheLoader(api.Loader):
+class YetiCacheLoader(load.LoaderPlugin):
families = ["yeticache", "yetiRig"]
representations = ["fur"]
@@ -121,8 +125,8 @@ class YetiCacheLoader(api.Loader):
"cannot find fursettings representation"
)
- settings_fname = api.get_representation_path(fur_settings)
- path = api.get_representation_path(representation)
+ settings_fname = get_representation_path(fur_settings)
+ path = get_representation_path(representation)
# Get all node data
with open(settings_fname, "r") as fp:
settings = json.load(fp)
diff --git a/openpype/hosts/maya/plugins/publish/collect_look.py b/openpype/hosts/maya/plugins/publish/collect_look.py
index d39750e917..b6a76f1e21 100644
--- a/openpype/hosts/maya/plugins/publish/collect_look.py
+++ b/openpype/hosts/maya/plugins/publish/collect_look.py
@@ -320,7 +320,7 @@ class CollectLook(pyblish.api.InstancePlugin):
# Collect file nodes used by shading engines (if we have any)
files = []
- look_sets = sets.keys()
+ look_sets = list(sets.keys())
shader_attrs = [
"surfaceShader",
"volumeShader",
diff --git a/openpype/hosts/maya/plugins/publish/collect_render.py b/openpype/hosts/maya/plugins/publish/collect_render.py
index 13ae1924b9..a525b562f3 100644
--- a/openpype/hosts/maya/plugins/publish/collect_render.py
+++ b/openpype/hosts/maya/plugins/publish/collect_render.py
@@ -50,6 +50,7 @@ import maya.app.renderSetup.model.renderSetup as renderSetup
import pyblish.api
from avalon import api
+from openpype.lib import get_formatted_current_time
from openpype.hosts.maya.api.lib_renderproducts import get as get_layer_render_products # noqa: E501
from openpype.hosts.maya.api import lib
@@ -234,13 +235,14 @@ class CollectMayaRender(pyblish.api.ContextPlugin):
publish_meta_path = None
for aov in exp_files:
full_paths = []
- for file in aov[aov.keys()[0]]:
+ aov_first_key = list(aov.keys())[0]
+ for file in aov[aov_first_key]:
full_path = os.path.join(workspace, default_render_file,
file)
full_path = full_path.replace("\\", "/")
full_paths.append(full_path)
publish_meta_path = os.path.dirname(full_path)
- aov_dict[aov.keys()[0]] = full_paths
+ aov_dict[aov_first_key] = full_paths
frame_start_render = int(self.get_render_attribute(
"startFrame", layer=layer_name))
@@ -327,7 +329,7 @@ class CollectMayaRender(pyblish.api.ContextPlugin):
"family": "renderlayer",
"families": ["renderlayer"],
"asset": asset,
- "time": api.time(),
+ "time": get_formatted_current_time(),
"author": context.data["user"],
# Add source to allow tracing back to the scene from
# which was submitted originally
diff --git a/openpype/hosts/maya/plugins/publish/collect_vrayscene.py b/openpype/hosts/maya/plugins/publish/collect_vrayscene.py
index c1e5d388af..327fc836dc 100644
--- a/openpype/hosts/maya/plugins/publish/collect_vrayscene.py
+++ b/openpype/hosts/maya/plugins/publish/collect_vrayscene.py
@@ -7,6 +7,7 @@ from maya import cmds
import pyblish.api
from avalon import api
+from openpype.lib import get_formatted_current_time
from openpype.hosts.maya.api import lib
@@ -117,7 +118,7 @@ class CollectVrayScene(pyblish.api.InstancePlugin):
"family": "vrayscene_layer",
"families": ["vrayscene_layer"],
"asset": api.Session["AVALON_ASSET"],
- "time": api.time(),
+ "time": get_formatted_current_time(),
"author": context.data["user"],
# Add source to allow tracing back to the scene from
# which was submitted originally
diff --git a/openpype/hosts/maya/plugins/publish/extract_animation.py b/openpype/hosts/maya/plugins/publish/extract_animation.py
index 269972d996..8a8bd67cd8 100644
--- a/openpype/hosts/maya/plugins/publish/extract_animation.py
+++ b/openpype/hosts/maya/plugins/publish/extract_animation.py
@@ -38,12 +38,8 @@ class ExtractAnimation(openpype.api.Extractor):
fullPath=True) or []
# Collect the start and end including handles
- start = instance.data["frameStart"]
- end = instance.data["frameEnd"]
- handles = instance.data.get("handles", 0) or 0
- if handles:
- start -= handles
- end += handles
+ start = instance.data["frameStartHandle"]
+ end = instance.data["frameEndHandle"]
self.log.info("Extracting animation..")
dirname = self.staging_dir(instance)
diff --git a/openpype/hosts/maya/plugins/publish/extract_ass.py b/openpype/hosts/maya/plugins/publish/extract_ass.py
index ab149de700..760f410f91 100644
--- a/openpype/hosts/maya/plugins/publish/extract_ass.py
+++ b/openpype/hosts/maya/plugins/publish/extract_ass.py
@@ -38,13 +38,9 @@ class ExtractAssStandin(openpype.api.Extractor):
self.log.info("Extracting ass sequence")
# Collect the start and end including handles
- start = instance.data.get("frameStart", 1)
- end = instance.data.get("frameEnd", 1)
- handles = instance.data.get("handles", 0)
+ start = instance.data.get("frameStartHandle", 1)
+ end = instance.data.get("frameEndHandle", 1)
step = instance.data.get("step", 0)
- if handles:
- start -= handles
- end += handles
exported_files = cmds.arnoldExportAss(filename=file_path,
selected=True,
diff --git a/openpype/hosts/maya/plugins/publish/extract_camera_alembic.py b/openpype/hosts/maya/plugins/publish/extract_camera_alembic.py
index 806a079940..5ad6b79d5c 100644
--- a/openpype/hosts/maya/plugins/publish/extract_camera_alembic.py
+++ b/openpype/hosts/maya/plugins/publish/extract_camera_alembic.py
@@ -21,17 +21,9 @@ class ExtractCameraAlembic(openpype.api.Extractor):
def process(self, instance):
- # get settings
- framerange = [instance.data.get("frameStart", 1),
- instance.data.get("frameEnd", 1)]
- handle_start = instance.data.get("handleStart", 0)
- handle_end = instance.data.get("handleEnd", 0)
-
- # TODO: deprecated attribute "handles"
-
- if handle_start is None:
- handle_start = instance.data.get("handles", 0)
- handle_end = instance.data.get("handles", 0)
+ # Collect the start and end including handles
+ start = instance.data["frameStartHandle"]
+ end = instance.data["frameEndHandle"]
step = instance.data.get("step", 1.0)
bake_to_worldspace = instance.data("bakeToWorldSpace", True)
@@ -61,10 +53,7 @@ class ExtractCameraAlembic(openpype.api.Extractor):
job_str = ' -selection -dataFormat "ogawa" '
job_str += ' -attrPrefix cb'
- job_str += ' -frameRange {0} {1} '.format(framerange[0]
- - handle_start,
- framerange[1]
- + handle_end)
+ job_str += ' -frameRange {0} {1} '.format(start, end)
job_str += ' -step {0} '.format(step)
if bake_to_worldspace:
diff --git a/openpype/hosts/maya/plugins/publish/extract_camera_mayaScene.py b/openpype/hosts/maya/plugins/publish/extract_camera_mayaScene.py
index 9d25b147de..49c156f9cd 100644
--- a/openpype/hosts/maya/plugins/publish/extract_camera_mayaScene.py
+++ b/openpype/hosts/maya/plugins/publish/extract_camera_mayaScene.py
@@ -43,7 +43,8 @@ def grouper(iterable, n, fillvalue=None):
"""
args = [iter(iterable)] * n
- return itertools.izip_longest(fillvalue=fillvalue, *args)
+ from six.moves import zip_longest
+ return zip_longest(fillvalue=fillvalue, *args)
def unlock(plug):
@@ -117,19 +118,9 @@ class ExtractCameraMayaScene(openpype.api.Extractor):
# no preset found
pass
- framerange = [instance.data.get("frameStart", 1),
- instance.data.get("frameEnd", 1)]
- handle_start = instance.data.get("handleStart", 0)
- handle_end = instance.data.get("handleEnd", 0)
-
- # TODO: deprecated attribute "handles"
-
- if handle_start is None:
- handle_start = instance.data.get("handles", 0)
- handle_end = instance.data.get("handles", 0)
-
- range_with_handles = [framerange[0] - handle_start,
- framerange[1] + handle_end]
+ # Collect the start and end including handles
+ start = instance.data["frameStartHandle"]
+ end = instance.data["frameEndHandle"]
step = instance.data.get("step", 1.0)
bake_to_worldspace = instance.data("bakeToWorldSpace", True)
@@ -164,7 +155,7 @@ class ExtractCameraMayaScene(openpype.api.Extractor):
"Performing camera bakes: {}".format(transform))
baked = lib.bake_to_world_space(
transform,
- frame_range=range_with_handles,
+ frame_range=[start, end],
step=step
)
baked_shapes = cmds.ls(baked,
diff --git a/openpype/hosts/maya/plugins/publish/extract_fbx.py b/openpype/hosts/maya/plugins/publish/extract_fbx.py
index 844084b9ab..a2adcb3091 100644
--- a/openpype/hosts/maya/plugins/publish/extract_fbx.py
+++ b/openpype/hosts/maya/plugins/publish/extract_fbx.py
@@ -168,12 +168,8 @@ class ExtractFBX(openpype.api.Extractor):
self.log.info("Export options: {0}".format(options))
# Collect the start and end including handles
- start = instance.data["frameStart"]
- end = instance.data["frameEnd"]
- handles = instance.data.get("handles", 0)
- if handles:
- start -= handles
- end += handles
+ start = instance.data["frameStartHandle"]
+ end = instance.data["frameEndHandle"]
options['bakeComplexStart'] = start
options['bakeComplexEnd'] = end
diff --git a/openpype/hosts/maya/plugins/publish/extract_look.py b/openpype/hosts/maya/plugins/publish/extract_look.py
index fe89038a24..a8893072d0 100644
--- a/openpype/hosts/maya/plugins/publish/extract_look.py
+++ b/openpype/hosts/maya/plugins/publish/extract_look.py
@@ -62,6 +62,7 @@ def maketx(source, destination, *args):
from openpype.lib import get_oiio_tools_path
maketx_path = get_oiio_tools_path("maketx")
+
if not os.path.exists(maketx_path):
print(
"OIIO tool not found in {}".format(maketx_path))
@@ -216,7 +217,7 @@ class ExtractLook(openpype.api.Extractor):
self.log.info("Extract sets (%s) ..." % _scene_type)
lookdata = instance.data["lookData"]
relationships = lookdata["relationships"]
- sets = relationships.keys()
+ sets = list(relationships.keys())
if not sets:
self.log.info("No sets found")
return
diff --git a/openpype/hosts/maya/plugins/publish/extract_maya_scene_raw.py b/openpype/hosts/maya/plugins/publish/extract_maya_scene_raw.py
index 9c432cbc67..389995d30c 100644
--- a/openpype/hosts/maya/plugins/publish/extract_maya_scene_raw.py
+++ b/openpype/hosts/maya/plugins/publish/extract_maya_scene_raw.py
@@ -6,6 +6,7 @@ from maya import cmds
import openpype.api
from openpype.hosts.maya.api.lib import maintained_selection
+from avalon.pipeline import AVALON_CONTAINER_ID
class ExtractMayaSceneRaw(openpype.api.Extractor):
@@ -57,10 +58,16 @@ class ExtractMayaSceneRaw(openpype.api.Extractor):
else:
members = instance[:]
+ selection = members
+ if set(self.add_for_families).intersection(
+ set(instance.data.get("families", []))) or \
+ instance.data.get("family") in self.add_for_families:
+ selection += self._get_loaded_containers(members)
+
# Perform extraction
self.log.info("Performing extraction ...")
with maintained_selection():
- cmds.select(members, noExpand=True)
+ cmds.select(selection, noExpand=True)
cmds.file(path,
force=True,
typ="mayaAscii" if self.scene_type == "ma" else "mayaBinary", # noqa: E501
@@ -83,3 +90,33 @@ class ExtractMayaSceneRaw(openpype.api.Extractor):
instance.data["representations"].append(representation)
self.log.info("Extracted instance '%s' to: %s" % (instance.name, path))
+
+ @staticmethod
+ def _get_loaded_containers(members):
+ # type: (list) -> list
+ refs_to_include = {
+ cmds.referenceQuery(node, referenceNode=True)
+ for node in members
+ if cmds.referenceQuery(node, isNodeReferenced=True)
+ }
+
+ members_with_refs = refs_to_include.union(members)
+
+ obj_sets = cmds.ls("*.id", long=True, type="objectSet", recursive=True,
+ objectsOnly=True)
+
+ loaded_containers = []
+ for obj_set in obj_sets:
+
+ if not cmds.attributeQuery("id", node=obj_set, exists=True):
+ continue
+
+ id_attr = "{}.id".format(obj_set)
+ if cmds.getAttr(id_attr) != AVALON_CONTAINER_ID:
+ continue
+
+ set_content = set(cmds.sets(obj_set, query=True))
+ if set_content.intersection(members_with_refs):
+ loaded_containers.append(obj_set)
+
+ return loaded_containers
diff --git a/openpype/hosts/maya/plugins/publish/extract_vrayproxy.py b/openpype/hosts/maya/plugins/publish/extract_vrayproxy.py
index 615bc27878..562ca078e1 100644
--- a/openpype/hosts/maya/plugins/publish/extract_vrayproxy.py
+++ b/openpype/hosts/maya/plugins/publish/extract_vrayproxy.py
@@ -28,14 +28,19 @@ class ExtractVRayProxy(openpype.api.Extractor):
if not anim_on:
# Remove animation information because it is not required for
# non-animated subsets
- instance.data.pop("frameStart", None)
- instance.data.pop("frameEnd", None)
+ keys = ["frameStart", "frameEnd",
+ "handleStart", "handleEnd",
+ "frameStartHandle", "frameEndHandle",
+ # Backwards compatibility
+ "handles"]
+ for key in keys:
+ instance.data.pop(key, None)
start_frame = 1
end_frame = 1
else:
- start_frame = instance.data["frameStart"]
- end_frame = instance.data["frameEnd"]
+ start_frame = instance.data["frameStartHandle"]
+ end_frame = instance.data["frameEndHandle"]
vertex_colors = instance.data.get("vertexColors", False)
diff --git a/openpype/hosts/maya/plugins/publish/extract_yeti_cache.py b/openpype/hosts/maya/plugins/publish/extract_yeti_cache.py
index 05fe79ecc5..0d85708789 100644
--- a/openpype/hosts/maya/plugins/publish/extract_yeti_cache.py
+++ b/openpype/hosts/maya/plugins/publish/extract_yeti_cache.py
@@ -29,8 +29,8 @@ class ExtractYetiCache(openpype.api.Extractor):
data_file = os.path.join(dirname, "yeti.fursettings")
# Collect information for writing cache
- start_frame = instance.data.get("frameStart")
- end_frame = instance.data.get("frameEnd")
+ start_frame = instance.data.get("frameStartHandle")
+ end_frame = instance.data.get("frameEndHandle")
preroll = instance.data.get("preroll")
if preroll > 0:
start_frame -= preroll
diff --git a/openpype/hosts/maya/plugins/publish/validate_ass_relative_paths.py b/openpype/hosts/maya/plugins/publish/validate_ass_relative_paths.py
index 3625d4ab32..5fb9bd98b1 100644
--- a/openpype/hosts/maya/plugins/publish/validate_ass_relative_paths.py
+++ b/openpype/hosts/maya/plugins/publish/validate_ass_relative_paths.py
@@ -110,9 +110,9 @@ class ValidateAssRelativePaths(pyblish.api.InstancePlugin):
Maya API will return a list of values, which need to be properly
handled to evaluate properly.
"""
- if isinstance(attr_val, types.BooleanType):
+ if isinstance(attr_val, bool):
return attr_val
- elif isinstance(attr_val, (types.ListType, types.GeneratorType)):
+ elif isinstance(attr_val, (list, types.GeneratorType)):
return any(attr_val)
else:
return bool(attr_val)
diff --git a/openpype/hosts/maya/plugins/publish/validate_mesh_overlapping_uvs.py b/openpype/hosts/maya/plugins/publish/validate_mesh_overlapping_uvs.py
index 5ce422239d..bf95d8ba09 100644
--- a/openpype/hosts/maya/plugins/publish/validate_mesh_overlapping_uvs.py
+++ b/openpype/hosts/maya/plugins/publish/validate_mesh_overlapping_uvs.py
@@ -5,6 +5,8 @@ import math
import maya.api.OpenMaya as om
import pymel.core as pm
+from six.moves import xrange
+
class GetOverlappingUVs(object):
diff --git a/openpype/hosts/maya/plugins/publish/validate_vray_referenced_aovs.py b/openpype/hosts/maya/plugins/publish/validate_vray_referenced_aovs.py
index 6cfbd4049b..7a48c29b7d 100644
--- a/openpype/hosts/maya/plugins/publish/validate_vray_referenced_aovs.py
+++ b/openpype/hosts/maya/plugins/publish/validate_vray_referenced_aovs.py
@@ -82,9 +82,9 @@ class ValidateVrayReferencedAOVs(pyblish.api.InstancePlugin):
bool: cast Maya attribute to Pythons boolean value.
"""
- if isinstance(attr_val, types.BooleanType):
+ if isinstance(attr_val, bool):
return attr_val
- elif isinstance(attr_val, (types.ListType, types.GeneratorType)):
+ elif isinstance(attr_val, (list, types.GeneratorType)):
return any(attr_val)
else:
return bool(attr_val)
diff --git a/openpype/hosts/nuke/api/lib.py b/openpype/hosts/nuke/api/lib.py
index 6faf6cd108..dba7ec1b85 100644
--- a/openpype/hosts/nuke/api/lib.py
+++ b/openpype/hosts/nuke/api/lib.py
@@ -1,6 +1,5 @@
import os
import re
-import sys
import six
import platform
import contextlib
@@ -679,10 +678,10 @@ def get_render_path(node):
}
nuke_imageio_writes = get_created_node_imageio_setting(**data_preset)
+ host_name = os.environ.get("AVALON_APP")
- application = lib.get_application(os.environ["AVALON_APP_NAME"])
data.update({
- "application": application,
+ "app": host_name,
"nuke_imageio_writes": nuke_imageio_writes
})
@@ -805,18 +804,14 @@ def create_write_node(name, data, input=None, prenodes=None,
'''
imageio_writes = get_created_node_imageio_setting(**data)
- app_manager = ApplicationManager()
- app_name = os.environ.get("AVALON_APP_NAME")
- if app_name:
- app = app_manager.applications.get(app_name)
-
for knob in imageio_writes["knobs"]:
if knob["name"] == "file_type":
representation = knob["value"]
+ host_name = os.environ.get("AVALON_APP")
try:
data.update({
- "app": app.host_name,
+ "app": host_name,
"imageio_writes": imageio_writes,
"representation": representation,
})
diff --git a/openpype/hosts/nuke/api/pipeline.py b/openpype/hosts/nuke/api/pipeline.py
index 8c6c9ca55b..fd2e16b8d3 100644
--- a/openpype/hosts/nuke/api/pipeline.py
+++ b/openpype/hosts/nuke/api/pipeline.py
@@ -14,6 +14,12 @@ from openpype.api import (
BuildWorkfile,
get_current_project_settings
)
+from openpype.lib import register_event_callback
+from openpype.pipeline import (
+ LegacyCreator,
+ register_loader_plugin_path,
+ deregister_loader_plugin_path,
+)
from openpype.tools.utils import host_tools
from .command import viewer_update_and_undo_stop
@@ -97,13 +103,13 @@ def install():
log.info("Registering Nuke plug-ins..")
pyblish.api.register_plugin_path(PUBLISH_PATH)
- avalon.api.register_plugin_path(avalon.api.Loader, LOAD_PATH)
- avalon.api.register_plugin_path(avalon.api.Creator, CREATE_PATH)
+ register_loader_plugin_path(LOAD_PATH)
+ avalon.api.register_plugin_path(LegacyCreator, CREATE_PATH)
avalon.api.register_plugin_path(avalon.api.InventoryAction, INVENTORY_PATH)
# Register Avalon event for workfiles loading.
- avalon.api.on("workio.open_file", check_inventory_versions)
- avalon.api.on("taskChanged", change_context_label)
+ register_event_callback("workio.open_file", check_inventory_versions)
+ register_event_callback("taskChanged", change_context_label)
pyblish.api.register_callback(
"instanceToggled", on_pyblish_instance_toggled)
@@ -123,8 +129,8 @@ def uninstall():
log.info("Deregistering Nuke plug-ins..")
pyblish.deregister_host("nuke")
pyblish.api.deregister_plugin_path(PUBLISH_PATH)
- avalon.api.deregister_plugin_path(avalon.api.Loader, LOAD_PATH)
- avalon.api.deregister_plugin_path(avalon.api.Creator, CREATE_PATH)
+ deregister_loader_plugin_path(LOAD_PATH)
+ avalon.api.deregister_plugin_path(LegacyCreator, CREATE_PATH)
pyblish.api.deregister_callback(
"instanceToggled", on_pyblish_instance_toggled)
@@ -226,7 +232,7 @@ def _uninstall_menu():
menu.removeItem(item.name())
-def change_context_label(*args):
+def change_context_label():
menubar = nuke.menu("Nuke")
menu = menubar.findItem(MENU_LABEL)
diff --git a/openpype/hosts/nuke/api/plugin.py b/openpype/hosts/nuke/api/plugin.py
index fd754203d4..d0bb45a05d 100644
--- a/openpype/hosts/nuke/api/plugin.py
+++ b/openpype/hosts/nuke/api/plugin.py
@@ -4,11 +4,10 @@ import string
import nuke
-import avalon.api
-
-from openpype.api import (
- get_current_project_settings,
- PypeCreatorMixin
+from openpype.api import get_current_project_settings
+from openpype.pipeline import (
+ LegacyCreator,
+ LoaderPlugin,
)
from .lib import (
Knobby,
@@ -20,7 +19,7 @@ from .lib import (
)
-class OpenPypeCreator(PypeCreatorMixin, avalon.api.Creator):
+class OpenPypeCreator(LegacyCreator):
"""Pype Nuke Creator class wrapper"""
node_color = "0xdfea5dff"
@@ -87,7 +86,7 @@ def get_review_presets_config():
return [str(name) for name, _prop in outputs.items()]
-class NukeLoader(avalon.api.Loader):
+class NukeLoader(LoaderPlugin):
container_id_knob = "containerId"
container_id = None
@@ -152,6 +151,7 @@ class ExporterReview(object):
"""
data = None
+ publish_on_farm = False
def __init__(self,
klass,
@@ -210,6 +210,9 @@ class ExporterReview(object):
if self.multiple_presets:
repre["outputName"] = self.name
+ if self.publish_on_farm:
+ repre["tags"].append("publish_on_farm")
+
self.data["representations"].append(repre)
def get_view_input_process_node(self):
@@ -446,6 +449,9 @@ class ExporterReviewMov(ExporterReview):
return path
def generate_mov(self, farm=False, **kwargs):
+ self.publish_on_farm = farm
+ reformat_node_add = kwargs["reformat_node_add"]
+ reformat_node_config = kwargs["reformat_node_config"]
bake_viewer_process = kwargs["bake_viewer_process"]
bake_viewer_input_process_node = kwargs[
"bake_viewer_input_process"]
@@ -483,6 +489,30 @@ class ExporterReviewMov(ExporterReview):
self.previous_node = r_node
self.log.debug("Read... `{}`".format(self._temp_nodes[subset]))
+ # add reformat node
+ if reformat_node_add:
+ # append reformated tag
+ add_tags.append("reformated")
+
+ rf_node = nuke.createNode("Reformat")
+ for kn_conf in reformat_node_config:
+ _type = kn_conf["type"]
+ k_name = str(kn_conf["name"])
+ k_value = kn_conf["value"]
+
+ # to remove unicode as nuke doesn't like it
+ if _type == "string":
+ k_value = str(kn_conf["value"])
+
+ rf_node[k_name].setValue(k_value)
+
+ # connect
+ rf_node.setInput(0, self.previous_node)
+ self._temp_nodes[subset].append(rf_node)
+ self.previous_node = rf_node
+ self.log.debug(
+ "Reformat... `{}`".format(self._temp_nodes[subset]))
+
# only create colorspace baking if toggled on
if bake_viewer_process:
if bake_viewer_input_process_node:
@@ -537,7 +567,7 @@ class ExporterReviewMov(ExporterReview):
# ---------- end nodes creation
# ---------- render or save to nk
- if farm:
+ if self.publish_on_farm:
nuke.scriptSave()
path_nk = self.save_file()
self.data.update({
@@ -547,11 +577,12 @@ class ExporterReviewMov(ExporterReview):
})
else:
self.render(write_node.name())
- # ---------- generate representation data
- self.get_representation_data(
- tags=["review", "delete"] + add_tags,
- range=True
- )
+
+ # ---------- generate representation data
+ self.get_representation_data(
+ tags=["review", "delete"] + add_tags,
+ range=True
+ )
self.log.debug("Representation... `{}`".format(self.data))
diff --git a/openpype/hosts/nuke/plugins/inventory/repair_old_loaders.py b/openpype/hosts/nuke/plugins/inventory/repair_old_loaders.py
index 49405fd213..5f834be557 100644
--- a/openpype/hosts/nuke/plugins/inventory/repair_old_loaders.py
+++ b/openpype/hosts/nuke/plugins/inventory/repair_old_loaders.py
@@ -1,4 +1,4 @@
-from avalon import api, style
+from avalon import api
from openpype.api import Logger
from openpype.hosts.nuke.api.lib import set_avalon_knob_data
@@ -7,7 +7,7 @@ class RepairOldLoaders(api.InventoryAction):
label = "Repair Old Loaders"
icon = "gears"
- color = style.colors.alert
+ color = "#cc0000"
log = Logger.get_logger(__name__)
diff --git a/openpype/hosts/nuke/plugins/load/actions.py b/openpype/hosts/nuke/plugins/load/actions.py
index 07dcf2d8e1..81840b3a38 100644
--- a/openpype/hosts/nuke/plugins/load/actions.py
+++ b/openpype/hosts/nuke/plugins/load/actions.py
@@ -2,13 +2,13 @@
"""
-from avalon import api
from openpype.api import Logger
+from openpype.pipeline import load
log = Logger().get_logger(__name__)
-class SetFrameRangeLoader(api.Loader):
+class SetFrameRangeLoader(load.LoaderPlugin):
"""Specific loader of Alembic for the avalon.animation family"""
families = ["animation",
@@ -42,7 +42,7 @@ class SetFrameRangeLoader(api.Loader):
lib.update_frame_range(start, end)
-class SetFrameRangeWithHandlesLoader(api.Loader):
+class SetFrameRangeWithHandlesLoader(load.LoaderPlugin):
"""Specific loader of Alembic for the avalon.animation family"""
families = ["animation",
diff --git a/openpype/hosts/nuke/plugins/load/load_backdrop.py b/openpype/hosts/nuke/plugins/load/load_backdrop.py
index 6619cfb414..36cec6f4c5 100644
--- a/openpype/hosts/nuke/plugins/load/load_backdrop.py
+++ b/openpype/hosts/nuke/plugins/load/load_backdrop.py
@@ -1,7 +1,11 @@
-from avalon import api, style, io
+from avalon import io
import nuke
import nukescripts
+from openpype.pipeline import (
+ load,
+ get_representation_path,
+)
from openpype.hosts.nuke.api.lib import (
find_free_space_to_paste_nodes,
maintained_selection,
@@ -14,7 +18,7 @@ from openpype.hosts.nuke.api.commands import viewer_update_and_undo_stop
from openpype.hosts.nuke.api import containerise, update_container
-class LoadBackdropNodes(api.Loader):
+class LoadBackdropNodes(load.LoaderPlugin):
"""Loading Published Backdrop nodes (workfile, nukenodes)"""
representations = ["nk"]
@@ -23,7 +27,7 @@ class LoadBackdropNodes(api.Loader):
label = "Iport Nuke Nodes"
order = 0
icon = "eye"
- color = style.colors.light
+ color = "white"
node_color = "0x7533c1ff"
def load(self, context, name, namespace, data):
@@ -191,7 +195,7 @@ class LoadBackdropNodes(api.Loader):
# get corresponding node
GN = nuke.toNode(container['objectName'])
- file = api.get_representation_path(representation).replace("\\", "/")
+ file = get_representation_path(representation).replace("\\", "/")
context = representation["context"]
name = container['name']
version_data = version.get("data", {})
diff --git a/openpype/hosts/nuke/plugins/load/load_camera_abc.py b/openpype/hosts/nuke/plugins/load/load_camera_abc.py
index 9610940619..fb5f7f8ede 100644
--- a/openpype/hosts/nuke/plugins/load/load_camera_abc.py
+++ b/openpype/hosts/nuke/plugins/load/load_camera_abc.py
@@ -1,6 +1,10 @@
import nuke
-from avalon import api, io
+from avalon import io
+from openpype.pipeline import (
+ load,
+ get_representation_path,
+)
from openpype.hosts.nuke.api import (
containerise,
update_container,
@@ -11,7 +15,7 @@ from openpype.hosts.nuke.api.lib import (
)
-class AlembicCameraLoader(api.Loader):
+class AlembicCameraLoader(load.LoaderPlugin):
"""
This will load alembic camera into script.
"""
@@ -127,7 +131,7 @@ class AlembicCameraLoader(api.Loader):
data_imprint.update({k: version_data[k]})
# getting file path
- file = api.get_representation_path(representation).replace("\\", "/")
+ file = get_representation_path(representation).replace("\\", "/")
with maintained_selection():
camera_node = nuke.toNode(object_name)
diff --git a/openpype/hosts/nuke/plugins/load/load_clip.py b/openpype/hosts/nuke/plugins/load/load_clip.py
index 21b7a6a816..2b4315a830 100644
--- a/openpype/hosts/nuke/plugins/load/load_clip.py
+++ b/openpype/hosts/nuke/plugins/load/load_clip.py
@@ -1,7 +1,8 @@
import nuke
-from avalon.vendor import qargparse
-from avalon import api, io
+import qargparse
+from avalon import io
+from openpype.pipeline import get_representation_path
from openpype.hosts.nuke.api.lib import (
get_imageio_input_colorspace,
maintained_selection
@@ -41,6 +42,9 @@ class LoadClip(plugin.NukeLoader):
icon = "file-video-o"
color = "white"
+ # Loaded from settings
+ _representations = []
+
script_start = int(nuke.root()["first_frame"].value())
# option gui
@@ -186,7 +190,7 @@ class LoadClip(plugin.NukeLoader):
is_sequence = len(representation["files"]) > 1
read_node = nuke.toNode(container['objectName'])
- file = api.get_representation_path(representation).replace("\\", "/")
+ file = get_representation_path(representation).replace("\\", "/")
start_at_workfile = bool("start at" in read_node['frame_mode'].value())
diff --git a/openpype/hosts/nuke/plugins/load/load_effects.py b/openpype/hosts/nuke/plugins/load/load_effects.py
index f636c6b510..68c3952942 100644
--- a/openpype/hosts/nuke/plugins/load/load_effects.py
+++ b/openpype/hosts/nuke/plugins/load/load_effects.py
@@ -1,7 +1,13 @@
import json
from collections import OrderedDict
import nuke
-from avalon import api, style, io
+
+from avalon import io
+
+from openpype.pipeline import (
+ load,
+ get_representation_path,
+)
from openpype.hosts.nuke.api import (
containerise,
update_container,
@@ -9,7 +15,7 @@ from openpype.hosts.nuke.api import (
)
-class LoadEffects(api.Loader):
+class LoadEffects(load.LoaderPlugin):
"""Loading colorspace soft effect exported from nukestudio"""
representations = ["effectJson"]
@@ -18,7 +24,7 @@ class LoadEffects(api.Loader):
label = "Load Effects - nodes"
order = 0
icon = "cc"
- color = style.colors.light
+ color = "white"
ignore_attr = ["useLifetime"]
@@ -149,7 +155,7 @@ class LoadEffects(api.Loader):
# get corresponding node
GN = nuke.toNode(container['objectName'])
- file = api.get_representation_path(representation).replace("\\", "/")
+ file = get_representation_path(representation).replace("\\", "/")
name = container['name']
version_data = version.get("data", {})
vname = version.get("name", None)
diff --git a/openpype/hosts/nuke/plugins/load/load_effects_ip.py b/openpype/hosts/nuke/plugins/load/load_effects_ip.py
index 990bce54f1..9c4fd4c2c6 100644
--- a/openpype/hosts/nuke/plugins/load/load_effects_ip.py
+++ b/openpype/hosts/nuke/plugins/load/load_effects_ip.py
@@ -3,7 +3,12 @@ from collections import OrderedDict
import nuke
-from avalon import api, style, io
+from avalon import io
+
+from openpype.pipeline import (
+ load,
+ get_representation_path,
+)
from openpype.hosts.nuke.api import lib
from openpype.hosts.nuke.api import (
containerise,
@@ -12,7 +17,7 @@ from openpype.hosts.nuke.api import (
)
-class LoadEffectsInputProcess(api.Loader):
+class LoadEffectsInputProcess(load.LoaderPlugin):
"""Loading colorspace soft effect exported from nukestudio"""
representations = ["effectJson"]
@@ -21,7 +26,7 @@ class LoadEffectsInputProcess(api.Loader):
label = "Load Effects - Input Process"
order = 0
icon = "eye"
- color = style.colors.alert
+ color = "#cc0000"
ignore_attr = ["useLifetime"]
def load(self, context, name, namespace, data):
@@ -156,7 +161,7 @@ class LoadEffectsInputProcess(api.Loader):
# get corresponding node
GN = nuke.toNode(container['objectName'])
- file = api.get_representation_path(representation).replace("\\", "/")
+ file = get_representation_path(representation).replace("\\", "/")
name = container['name']
version_data = version.get("data", {})
vname = version.get("name", None)
diff --git a/openpype/hosts/nuke/plugins/load/load_gizmo.py b/openpype/hosts/nuke/plugins/load/load_gizmo.py
index 659977d789..6f2b191be9 100644
--- a/openpype/hosts/nuke/plugins/load/load_gizmo.py
+++ b/openpype/hosts/nuke/plugins/load/load_gizmo.py
@@ -1,5 +1,11 @@
import nuke
-from avalon import api, style, io
+
+from avalon import io
+
+from openpype.pipeline import (
+ load,
+ get_representation_path,
+)
from openpype.hosts.nuke.api.lib import (
maintained_selection,
get_avalon_knob_data,
@@ -12,7 +18,7 @@ from openpype.hosts.nuke.api import (
)
-class LoadGizmo(api.Loader):
+class LoadGizmo(load.LoaderPlugin):
"""Loading nuke Gizmo"""
representations = ["gizmo"]
@@ -21,7 +27,7 @@ class LoadGizmo(api.Loader):
label = "Load Gizmo"
order = 0
icon = "dropbox"
- color = style.colors.light
+ color = "white"
node_color = "0x75338eff"
def load(self, context, name, namespace, data):
@@ -103,7 +109,7 @@ class LoadGizmo(api.Loader):
# get corresponding node
GN = nuke.toNode(container['objectName'])
- file = api.get_representation_path(representation).replace("\\", "/")
+ file = get_representation_path(representation).replace("\\", "/")
name = container['name']
version_data = version.get("data", {})
vname = version.get("name", None)
diff --git a/openpype/hosts/nuke/plugins/load/load_gizmo_ip.py b/openpype/hosts/nuke/plugins/load/load_gizmo_ip.py
index 240bfd467d..87bebce15b 100644
--- a/openpype/hosts/nuke/plugins/load/load_gizmo_ip.py
+++ b/openpype/hosts/nuke/plugins/load/load_gizmo_ip.py
@@ -1,5 +1,11 @@
-from avalon import api, style, io
import nuke
+
+from avalon import io
+
+from openpype.pipeline import (
+ load,
+ get_representation_path,
+)
from openpype.hosts.nuke.api.lib import (
maintained_selection,
create_backdrop,
@@ -13,7 +19,7 @@ from openpype.hosts.nuke.api import (
)
-class LoadGizmoInputProcess(api.Loader):
+class LoadGizmoInputProcess(load.LoaderPlugin):
"""Loading colorspace soft effect exported from nukestudio"""
representations = ["gizmo"]
@@ -22,7 +28,7 @@ class LoadGizmoInputProcess(api.Loader):
label = "Load Gizmo - Input Process"
order = 0
icon = "eye"
- color = style.colors.alert
+ color = "#cc0000"
node_color = "0x7533c1ff"
def load(self, context, name, namespace, data):
@@ -109,7 +115,7 @@ class LoadGizmoInputProcess(api.Loader):
# get corresponding node
GN = nuke.toNode(container['objectName'])
- file = api.get_representation_path(representation).replace("\\", "/")
+ file = get_representation_path(representation).replace("\\", "/")
name = container['name']
version_data = version.get("data", {})
vname = version.get("name", None)
diff --git a/openpype/hosts/nuke/plugins/load/load_image.py b/openpype/hosts/nuke/plugins/load/load_image.py
index d36226b139..9a175a0cba 100644
--- a/openpype/hosts/nuke/plugins/load/load_image.py
+++ b/openpype/hosts/nuke/plugins/load/load_image.py
@@ -1,9 +1,12 @@
-import re
import nuke
-from avalon.vendor import qargparse
-from avalon import api, io
+import qargparse
+from avalon import io
+from openpype.pipeline import (
+ load,
+ get_representation_path,
+)
from openpype.hosts.nuke.api.lib import (
get_imageio_input_colorspace
)
@@ -14,7 +17,7 @@ from openpype.hosts.nuke.api import (
)
-class LoadImage(api.Loader):
+class LoadImage(load.LoaderPlugin):
"""Load still image into Nuke"""
families = [
@@ -33,6 +36,9 @@ class LoadImage(api.Loader):
icon = "image"
color = "white"
+ # Loaded from settings
+ _representations = []
+
node_name_template = "{class_name}_{ext}"
options = [
@@ -162,7 +168,7 @@ class LoadImage(api.Loader):
repr_cont = representation["context"]
- file = api.get_representation_path(representation)
+ file = get_representation_path(representation)
if not file:
repr_id = representation["_id"]
diff --git a/openpype/hosts/nuke/plugins/load/load_matchmove.py b/openpype/hosts/nuke/plugins/load/load_matchmove.py
index 60d5dc026f..f5a90706c7 100644
--- a/openpype/hosts/nuke/plugins/load/load_matchmove.py
+++ b/openpype/hosts/nuke/plugins/load/load_matchmove.py
@@ -1,8 +1,8 @@
-from avalon import api
import nuke
+from openpype.pipeline import load
-class MatchmoveLoader(api.Loader):
+class MatchmoveLoader(load.LoaderPlugin):
"""
This will run matchmove script to create track in script.
"""
diff --git a/openpype/hosts/nuke/plugins/load/load_model.py b/openpype/hosts/nuke/plugins/load/load_model.py
index 2b52bbf00f..e445beca05 100644
--- a/openpype/hosts/nuke/plugins/load/load_model.py
+++ b/openpype/hosts/nuke/plugins/load/load_model.py
@@ -1,5 +1,9 @@
import nuke
-from avalon import api, io
+from avalon import io
+from openpype.pipeline import (
+ load,
+ get_representation_path,
+)
from openpype.hosts.nuke.api.lib import maintained_selection
from openpype.hosts.nuke.api import (
containerise,
@@ -8,7 +12,7 @@ from openpype.hosts.nuke.api import (
)
-class AlembicModelLoader(api.Loader):
+class AlembicModelLoader(load.LoaderPlugin):
"""
This will load alembic model into script.
"""
@@ -124,7 +128,7 @@ class AlembicModelLoader(api.Loader):
data_imprint.update({k: version_data[k]})
# getting file path
- file = api.get_representation_path(representation).replace("\\", "/")
+ file = get_representation_path(representation).replace("\\", "/")
with maintained_selection():
model_node = nuke.toNode(object_name)
diff --git a/openpype/hosts/nuke/plugins/load/load_script_precomp.py b/openpype/hosts/nuke/plugins/load/load_script_precomp.py
index aa48b631c5..779f101682 100644
--- a/openpype/hosts/nuke/plugins/load/load_script_precomp.py
+++ b/openpype/hosts/nuke/plugins/load/load_script_precomp.py
@@ -1,5 +1,11 @@
import nuke
-from avalon import api, style, io
+
+from avalon import io
+
+from openpype.pipeline import (
+ load,
+ get_representation_path,
+)
from openpype.hosts.nuke.api.lib import get_avalon_knob_data
from openpype.hosts.nuke.api import (
containerise,
@@ -8,7 +14,7 @@ from openpype.hosts.nuke.api import (
)
-class LinkAsGroup(api.Loader):
+class LinkAsGroup(load.LoaderPlugin):
"""Copy the published file to be pasted at the desired location"""
representations = ["nk"]
@@ -17,7 +23,7 @@ class LinkAsGroup(api.Loader):
label = "Load Precomp"
order = 0
icon = "file"
- color = style.colors.alert
+ color = "#cc0000"
def load(self, context, name, namespace, data):
# for k, v in context.items():
@@ -108,7 +114,7 @@ class LinkAsGroup(api.Loader):
"""
node = nuke.toNode(container['objectName'])
- root = api.get_representation_path(representation).replace("\\", "/")
+ root = get_representation_path(representation).replace("\\", "/")
# Get start frame from version data
version = io.find_one({
diff --git a/openpype/hosts/nuke/plugins/publish/extract_review_data_mov.py b/openpype/hosts/nuke/plugins/publish/extract_review_data_mov.py
index 5bbc88266a..544b9e04da 100644
--- a/openpype/hosts/nuke/plugins/publish/extract_review_data_mov.py
+++ b/openpype/hosts/nuke/plugins/publish/extract_review_data_mov.py
@@ -1,4 +1,5 @@
import os
+import re
import pyblish.api
import openpype
from openpype.hosts.nuke.api import plugin
@@ -25,6 +26,7 @@ class ExtractReviewDataMov(openpype.api.Extractor):
def process(self, instance):
families = instance.data["families"]
task_type = instance.context.data["taskType"]
+ subset = instance.data["subset"]
self.log.info("Creating staging dir...")
if "representations" not in instance.data:
@@ -46,6 +48,7 @@ class ExtractReviewDataMov(openpype.api.Extractor):
for o_name, o_data in self.outputs.items():
f_families = o_data["filter"]["families"]
f_task_types = o_data["filter"]["task_types"]
+ f_subsets = o_data["filter"]["sebsets"]
# test if family found in context
test_families = any([
@@ -69,11 +72,25 @@ class ExtractReviewDataMov(openpype.api.Extractor):
bool(not f_task_types)
])
+ # test subsets from filter
+ test_subsets = any([
+ # check if any of subset filter inputs
+ # converted to regex patern is not found in subset
+ # we keep strict case sensitivity
+ bool(next((
+ s for s in f_subsets
+ if re.search(re.compile(s), subset)
+ ), None)),
+ # but if no subsets were set then make this acuntable too
+ bool(not f_subsets)
+ ])
+
# we need all filters to be positive for this
# preset to be activated
test_all = all([
test_families,
- test_task_types
+ test_task_types,
+ test_subsets
])
# if it is not positive then skip this preset
@@ -113,13 +130,22 @@ class ExtractReviewDataMov(openpype.api.Extractor):
})
else:
data = exporter.generate_mov(**o_data)
- generated_repres.extend(data["representations"])
- self.log.info(generated_repres)
+ # add representation generated by exporter
+ generated_repres.extend(data["representations"])
+ self.log.debug(
+ "__ generated_repres: {}".format(generated_repres))
if generated_repres:
# assign to representations
instance.data["representations"] += generated_repres
+ else:
+ instance.data["families"].remove("review")
+ self.log.info((
+ "Removing `review` from families. "
+ "Not available baking profile."
+ ))
+ self.log.debug(instance.data["families"])
self.log.debug(
"_ representations: {}".format(
diff --git a/openpype/hosts/nuke/plugins/publish/extract_slate_frame.py b/openpype/hosts/nuke/plugins/publish/extract_slate_frame.py
index 50e5f995f4..e917a28046 100644
--- a/openpype/hosts/nuke/plugins/publish/extract_slate_frame.py
+++ b/openpype/hosts/nuke/plugins/publish/extract_slate_frame.py
@@ -48,8 +48,13 @@ class ExtractSlateFrame(openpype.api.Extractor):
self.log.info(
"StagingDir `{0}`...".format(instance.data["stagingDir"]))
+ frame_start = instance.data["frameStart"]
+ frame_end = instance.data["frameEnd"]
+ handle_start = instance.data["handleStart"]
+ handle_end = instance.data["handleEnd"]
+
frame_length = int(
- instance.data["frameEnd"] - instance.data["frameStart"] + 1
+ (frame_end - frame_start + 1) + (handle_start + handle_end)
)
temporary_nodes = []
diff --git a/openpype/hosts/nuke/plugins/publish/precollect_instances.py b/openpype/hosts/nuke/plugins/publish/precollect_instances.py
index 97ddef0a59..29c706f302 100644
--- a/openpype/hosts/nuke/plugins/publish/precollect_instances.py
+++ b/openpype/hosts/nuke/plugins/publish/precollect_instances.py
@@ -80,7 +80,7 @@ class PreCollectNukeInstances(pyblish.api.ContextPlugin):
# Add all nodes in group instances.
if node.Class() == "Group":
# only alter families for render family
- if "write" in families_ak.lower():
+ if families_ak and "write" in families_ak.lower():
target = node["render"].value()
if target == "Use existing frames":
# Local rendering
diff --git a/openpype/hosts/nuke/plugins/publish/precollect_writes.py b/openpype/hosts/nuke/plugins/publish/precollect_writes.py
index 189f28f7c6..85e98db7ed 100644
--- a/openpype/hosts/nuke/plugins/publish/precollect_writes.py
+++ b/openpype/hosts/nuke/plugins/publish/precollect_writes.py
@@ -3,8 +3,9 @@ import re
from pprint import pformat
import nuke
import pyblish.api
+from avalon import io
import openpype.api as pype
-from avalon import io, api
+from openpype.pipeline import get_representation_path
@pyblish.api.log
@@ -182,7 +183,7 @@ class CollectNukeWrites(pyblish.api.InstancePlugin):
if repre_doc:
instance.data["audio"] = [{
"offset": 0,
- "filename": api.get_representation_path(repre_doc)
+ "filename": get_representation_path(repre_doc)
}]
self.log.debug("instance.data: {}".format(pformat(instance.data)))
diff --git a/openpype/hosts/nuke/plugins/publish/validate_read_legacy.py b/openpype/hosts/nuke/plugins/publish/validate_read_legacy.py
index 22a9b3678e..2bf1ff81f8 100644
--- a/openpype/hosts/nuke/plugins/publish/validate_read_legacy.py
+++ b/openpype/hosts/nuke/plugins/publish/validate_read_legacy.py
@@ -1,12 +1,16 @@
import os
-import toml
import nuke
+import toml
import pyblish.api
-from avalon import api
from bson.objectid import ObjectId
+from openpype.pipeline import (
+ discover_loader_plugins,
+ load_container,
+)
+
class RepairReadLegacyAction(pyblish.api.Action):
@@ -49,13 +53,13 @@ class RepairReadLegacyAction(pyblish.api.Action):
loader_name = "LoadMov"
loader_plugin = None
- for Loader in api.discover(api.Loader):
+ for Loader in discover_loader_plugins():
if Loader.__name__ != loader_name:
continue
loader_plugin = Loader
- api.load(
+ load_container(
Loader=loader_plugin,
representation=ObjectId(data["representation"])
)
diff --git a/openpype/hosts/nuke/plugins/publish/validate_write_legacy.py b/openpype/hosts/nuke/plugins/publish/validate_write_legacy.py
index a73bed8edd..08f09f8097 100644
--- a/openpype/hosts/nuke/plugins/publish/validate_write_legacy.py
+++ b/openpype/hosts/nuke/plugins/publish/validate_write_legacy.py
@@ -34,9 +34,9 @@ class ValidateWriteLegacy(pyblish.api.InstancePlugin):
# test if render in family test knob
# and only one item should be available
assert len(family_test) == 1, msg + " > More avalon attributes"
- assert "render" in node[family_test[0]].value(), msg + \
+ assert "render" in node[family_test[0]].value() \
+ or "still" in node[family_test[0]].value(), msg + \
" > Not correct family"
-
# test if `file` knob in node, this way old
# non-group-node write could be detected
assert "file" not in node.knobs(), msg + \
@@ -74,6 +74,8 @@ class ValidateWriteLegacy(pyblish.api.InstancePlugin):
Create_name = "CreateWriteRender"
elif family == "prerender":
Create_name = "CreateWritePrerender"
+ elif family == "still":
+ Create_name = "CreateWriteStill"
# get appropriate plugin class
creator_plugin = None
diff --git a/openpype/hosts/photoshop/api/README.md b/openpype/hosts/photoshop/api/README.md
index b958f53803..80792a4da0 100644
--- a/openpype/hosts/photoshop/api/README.md
+++ b/openpype/hosts/photoshop/api/README.md
@@ -195,11 +195,12 @@ class ExtractImage(openpype.api.Extractor):
#### Loader Plugin
```python
from avalon import api, photoshop
+from openpype.pipeline import load, get_representation_path
stub = photoshop.stub()
-class ImageLoader(api.Loader):
+class ImageLoader(load.LoaderPlugin):
"""Load images
Stores the imported asset in a container named after the asset.
@@ -227,7 +228,7 @@ class ImageLoader(api.Loader):
with photoshop.maintained_selection():
stub.replace_smart_object(
- layer, api.get_representation_path(representation)
+ layer, get_representation_path(representation)
)
stub.imprint(
@@ -245,7 +246,7 @@ https://community.adobe.com/t5/download-install/adobe-extension-debuger-problem/
Add --enable-blink-features=ShadowDOMV0,CustomElementsV0 when starting Chrome
then localhost:8078 (port set in `photoshop\extension\.debug`)
-Or use Visual Studio Code https://medium.com/adobetech/extendscript-debugger-for-visual-studio-code-public-release-a2ff6161fa01
+Or use Visual Studio Code https://medium.com/adobetech/extendscript-debugger-for-visual-studio-code-public-release-a2ff6161fa01
Or install CEF client from https://github.com/Adobe-CEP/CEP-Resources/tree/master/CEP_9.x
## Resources
diff --git a/openpype/hosts/photoshop/api/__init__.py b/openpype/hosts/photoshop/api/__init__.py
index 4cc2aa2c78..17ea957066 100644
--- a/openpype/hosts/photoshop/api/__init__.py
+++ b/openpype/hosts/photoshop/api/__init__.py
@@ -16,7 +16,6 @@ from .pipeline import (
)
from .plugin import (
PhotoshopLoader,
- Creator,
get_unique_layer_name
)
from .workio import (
@@ -42,11 +41,11 @@ __all__ = [
"list_instances",
"remove_instance",
"install",
+ "uninstall",
"containerise",
# Plugin
"PhotoshopLoader",
- "Creator",
"get_unique_layer_name",
# workfiles
diff --git a/openpype/hosts/photoshop/api/launch_logic.py b/openpype/hosts/photoshop/api/launch_logic.py
index 16a1d23244..0021905cb5 100644
--- a/openpype/hosts/photoshop/api/launch_logic.py
+++ b/openpype/hosts/photoshop/api/launch_logic.py
@@ -14,7 +14,7 @@ from openpype.api import Logger
from openpype.tools.utils import host_tools
from avalon import api
-from avalon.tools.webserver.app import WebServerTool
+from openpype.tools.adobe_webserver.app import WebServerTool
from .ws_stub import PhotoshopServerStub
@@ -175,7 +175,7 @@ class ProcessLauncher(QtCore.QObject):
def start(self):
if self._started:
return
- self.log.info("Started launch logic of AfterEffects")
+ self.log.info("Started launch logic of Photoshop")
self._started = True
self._start_process_timer.start()
diff --git a/openpype/hosts/photoshop/api/pipeline.py b/openpype/hosts/photoshop/api/pipeline.py
index 25983f2471..e814e1ca4d 100644
--- a/openpype/hosts/photoshop/api/pipeline.py
+++ b/openpype/hosts/photoshop/api/pipeline.py
@@ -1,5 +1,4 @@
import os
-import sys
from Qt import QtWidgets
import pyblish.api
@@ -7,6 +6,12 @@ import avalon.api
from avalon import pipeline, io
from openpype.api import Logger
+from openpype.lib import register_event_callback
+from openpype.pipeline import (
+ LegacyCreator,
+ register_loader_plugin_path,
+ deregister_loader_plugin_path,
+)
import openpype.hosts.photoshop
from . import lib
@@ -67,21 +72,21 @@ def install():
pyblish.api.register_host("photoshop")
pyblish.api.register_plugin_path(PUBLISH_PATH)
- avalon.api.register_plugin_path(avalon.api.Loader, LOAD_PATH)
- avalon.api.register_plugin_path(avalon.api.Creator, CREATE_PATH)
+ register_loader_plugin_path(LOAD_PATH)
+ avalon.api.register_plugin_path(LegacyCreator, CREATE_PATH)
log.info(PUBLISH_PATH)
pyblish.api.register_callback(
"instanceToggled", on_pyblish_instance_toggled
)
- avalon.api.on("application.launched", on_application_launch)
+ register_event_callback("application.launched", on_application_launch)
def uninstall():
pyblish.api.deregister_plugin_path(PUBLISH_PATH)
- avalon.api.deregister_plugin_path(avalon.api.Loader, LOAD_PATH)
- avalon.api.deregister_plugin_path(avalon.api.Creator, CREATE_PATH)
+ deregister_loader_plugin_path(LOAD_PATH)
+ avalon.api.deregister_plugin_path(LegacyCreator, CREATE_PATH)
def ls():
diff --git a/openpype/hosts/photoshop/api/plugin.py b/openpype/hosts/photoshop/api/plugin.py
index e0db67de2c..c80e6bbd06 100644
--- a/openpype/hosts/photoshop/api/plugin.py
+++ b/openpype/hosts/photoshop/api/plugin.py
@@ -1,6 +1,6 @@
import re
-import avalon.api
+from openpype.pipeline import LoaderPlugin
from .launch_logic import stub
@@ -29,41 +29,7 @@ def get_unique_layer_name(layers, asset_name, subset_name):
return "{}_{:0>3d}".format(name, occurrences + 1)
-class PhotoshopLoader(avalon.api.Loader):
+class PhotoshopLoader(LoaderPlugin):
@staticmethod
def get_stub():
return stub()
-
-
-class Creator(avalon.api.Creator):
- """Creator plugin to create instances in Photoshop
-
- A LayerSet is created to support any number of layers in an instance. If
- the selection is used, these layers will be added to the LayerSet.
- """
-
- def process(self):
- # Photoshop can have multiple LayerSets with the same name, which does
- # not work with Avalon.
- msg = "Instance with name \"{}\" already exists.".format(self.name)
- stub = lib.stub() # only after Photoshop is up
- for layer in stub.get_layers():
- if self.name.lower() == layer.Name.lower():
- msg = QtWidgets.QMessageBox()
- msg.setIcon(QtWidgets.QMessageBox.Warning)
- msg.setText(msg)
- msg.exec_()
- return False
-
- # Store selection because adding a group will change selection.
- with lib.maintained_selection():
-
- # Add selection to group.
- if (self.options or {}).get("useSelection"):
- group = stub.group_selected_layers(self.name)
- else:
- group = stub.create_group(self.name)
-
- stub.imprint(group, self.data)
-
- return group
diff --git a/openpype/hosts/photoshop/api/ws_stub.py b/openpype/hosts/photoshop/api/ws_stub.py
index fd8377d4e0..64d89f5420 100644
--- a/openpype/hosts/photoshop/api/ws_stub.py
+++ b/openpype/hosts/photoshop/api/ws_stub.py
@@ -2,12 +2,11 @@
Stub handling connection from server to client.
Used anywhere solution is calling client methods.
"""
-import sys
import json
import attr
from wsrpc_aiohttp import WebSocketAsync
-from avalon.tools.webserver.app import WebServerTool
+from openpype.tools.adobe_webserver.app import WebServerTool
@attr.s
@@ -344,6 +343,28 @@ class PhotoshopServerStub:
)
)
+ def hide_all_others_layers(self, layers):
+ """hides all layers that are not part of the list or that are not
+ children of this list
+
+ Args:
+ layers (list): list of PSItem - highest hierarchy
+ """
+ extract_ids = set([ll.id for ll in self.get_layers_in_layers(layers)])
+
+ self.hide_all_others_layers_ids(extract_ids)
+
+ def hide_all_others_layers_ids(self, extract_ids):
+ """hides all layers that are not part of the list or that are not
+ children of this list
+
+ Args:
+ extract_ids (list): list of integer that should be visible
+ """
+ for layer in self.get_layers():
+ if layer.visible and layer.id not in extract_ids:
+ self.set_visible(layer.id, False)
+
def get_layers_metadata(self):
"""Reads layers metadata from Headline from active document in PS.
(Headline accessible by File > File Info)
diff --git a/openpype/hosts/photoshop/plugins/create/create_image.py b/openpype/hosts/photoshop/plugins/create/create_image.py
index 344a53f47e..a001b5f171 100644
--- a/openpype/hosts/photoshop/plugins/create/create_image.py
+++ b/openpype/hosts/photoshop/plugins/create/create_image.py
@@ -1,9 +1,9 @@
from Qt import QtWidgets
-import openpype.api
+from openpype.pipeline import create
from openpype.hosts.photoshop import api as photoshop
-class CreateImage(openpype.api.Creator):
+class CreateImage(create.LegacyCreator):
"""Image folder for publish."""
name = "imageDefault"
diff --git a/openpype/hosts/photoshop/plugins/load/load_image.py b/openpype/hosts/photoshop/plugins/load/load_image.py
index 3b1cfe9636..0a9421b8f2 100644
--- a/openpype/hosts/photoshop/plugins/load/load_image.py
+++ b/openpype/hosts/photoshop/plugins/load/load_image.py
@@ -1,6 +1,6 @@
import re
-from avalon import api
+from openpype.pipeline import get_representation_path
from openpype.hosts.photoshop import api as photoshop
from openpype.hosts.photoshop.api import get_unique_layer_name
@@ -54,7 +54,7 @@ class ImageLoader(photoshop.PhotoshopLoader):
else: # switching version - keep same name
layer_name = container["namespace"]
- path = api.get_representation_path(representation)
+ path = get_representation_path(representation)
with photoshop.maintained_selection():
stub.replace_smart_object(
layer, path, layer_name
diff --git a/openpype/hosts/photoshop/plugins/load/load_image_from_sequence.py b/openpype/hosts/photoshop/plugins/load/load_image_from_sequence.py
index 6627aded51..5f39121ae1 100644
--- a/openpype/hosts/photoshop/plugins/load/load_image_from_sequence.py
+++ b/openpype/hosts/photoshop/plugins/load/load_image_from_sequence.py
@@ -1,8 +1,8 @@
import os
-from avalon.pipeline import get_representation_path_from_context
-from avalon.vendor import qargparse
+import qargparse
+from openpype.pipeline import get_representation_path_from_context
from openpype.hosts.photoshop import api as photoshop
from openpype.hosts.photoshop.api import get_unique_layer_name
@@ -92,4 +92,3 @@ class ImageFromSequenceLoader(photoshop.PhotoshopLoader):
def remove(self, container):
"""No update possible, not containerized."""
pass
-
diff --git a/openpype/hosts/photoshop/plugins/load/load_reference.py b/openpype/hosts/photoshop/plugins/load/load_reference.py
index 60142d4a1f..f5f0545d39 100644
--- a/openpype/hosts/photoshop/plugins/load/load_reference.py
+++ b/openpype/hosts/photoshop/plugins/load/load_reference.py
@@ -1,7 +1,6 @@
import re
-from avalon import api
-
+from openpype.pipeline import get_representation_path
from openpype.hosts.photoshop import api as photoshop
from openpype.hosts.photoshop.api import get_unique_layer_name
@@ -55,7 +54,7 @@ class ReferenceLoader(photoshop.PhotoshopLoader):
else: # switching version - keep same name
layer_name = container["namespace"]
- path = api.get_representation_path(representation)
+ path = get_representation_path(representation)
with photoshop.maintained_selection():
stub.replace_smart_object(
layer, path, layer_name
diff --git a/openpype/hosts/photoshop/plugins/publish/collect_color_coded_instances.py b/openpype/hosts/photoshop/plugins/publish/collect_color_coded_instances.py
index c1ae88fbbb..7d44d55a80 100644
--- a/openpype/hosts/photoshop/plugins/publish/collect_color_coded_instances.py
+++ b/openpype/hosts/photoshop/plugins/publish/collect_color_coded_instances.py
@@ -38,10 +38,15 @@ class CollectColorCodedInstances(pyblish.api.ContextPlugin):
def process(self, context):
self.log.info("CollectColorCodedInstances")
- self.log.debug("mapping:: {}".format(self.color_code_mapping))
+ batch_dir = os.environ.get("OPENPYPE_PUBLISH_DATA")
+ if (os.environ.get("IS_TEST") and
+ (not batch_dir or not os.path.exists(batch_dir))):
+ self.log.debug("Automatic testing, no batch data, skipping")
+ return
existing_subset_names = self._get_existing_subset_names(context)
- asset_name, task_name, variant = self._parse_batch()
+
+ asset_name, task_name, variant = self._parse_batch(batch_dir)
stub = photoshop.stub()
layers = stub.get_layers()
@@ -125,9 +130,8 @@ class CollectColorCodedInstances(pyblish.api.ContextPlugin):
return existing_subset_names
- def _parse_batch(self):
+ def _parse_batch(self, batch_dir):
"""Parses asset_name, task_name, variant from batch manifest."""
- batch_dir = os.environ.get("OPENPYPE_PUBLISH_DATA")
task_data = None
if batch_dir and os.path.exists(batch_dir):
task_data = parse_json(os.path.join(batch_dir,
diff --git a/openpype/hosts/photoshop/plugins/publish/extract_image.py b/openpype/hosts/photoshop/plugins/publish/extract_image.py
index beb904215b..04ce77ee34 100644
--- a/openpype/hosts/photoshop/plugins/publish/extract_image.py
+++ b/openpype/hosts/photoshop/plugins/publish/extract_image.py
@@ -26,7 +26,6 @@ class ExtractImage(openpype.api.Extractor):
with photoshop.maintained_selection():
self.log.info("Extracting %s" % str(list(instance)))
with photoshop.maintained_visibility():
- # Hide all other layers.
layer = instance.data.get("layer")
ids = set([layer.id])
add_ids = instance.data.pop("ids", None)
@@ -34,11 +33,7 @@ class ExtractImage(openpype.api.Extractor):
ids.update(set(add_ids))
extract_ids = set([ll.id for ll in stub.
get_layers_in_layers_ids(ids)])
-
- for layer in stub.get_layers():
- # limit unnecessary calls to client
- if layer.visible and layer.id not in extract_ids:
- stub.set_visible(layer.id, False)
+ stub.hide_all_others_layers_ids(extract_ids)
file_basename = os.path.splitext(
stub.get_active_document_name()
diff --git a/openpype/hosts/photoshop/plugins/publish/extract_review.py b/openpype/hosts/photoshop/plugins/publish/extract_review.py
index b6c7e2d189..b8f4470c7b 100644
--- a/openpype/hosts/photoshop/plugins/publish/extract_review.py
+++ b/openpype/hosts/photoshop/plugins/publish/extract_review.py
@@ -1,4 +1,5 @@
import os
+import shutil
import openpype.api
import openpype.lib
@@ -7,7 +8,7 @@ from openpype.hosts.photoshop import api as photoshop
class ExtractReview(openpype.api.Extractor):
"""
- Produce a flattened image file from all 'image' instances.
+ Produce a flattened or sequence image file from all 'image' instances.
If no 'image' instance is created, it produces flattened image from
all visible layers.
@@ -20,54 +21,58 @@ class ExtractReview(openpype.api.Extractor):
# Extract Options
jpg_options = None
mov_options = None
+ make_image_sequence = None
def process(self, instance):
staging_dir = self.staging_dir(instance)
self.log.info("Outputting image to {}".format(staging_dir))
+ fps = instance.data.get("fps", 25)
stub = photoshop.stub()
+ self.output_seq_filename = os.path.splitext(
+ stub.get_active_document_name())[0] + ".%04d.jpg"
- layers = []
- for image_instance in instance.context:
- if image_instance.data["family"] != "image":
- continue
- layers.append(image_instance.data.get("layer"))
+ layers = self._get_layers_from_image_instances(instance)
+ self.log.info("Layers image instance found: {}".format(layers))
- # Perform extraction
- output_image = "{}.jpg".format(
- os.path.splitext(stub.get_active_document_name())[0]
- )
- output_image_path = os.path.join(staging_dir, output_image)
- with photoshop.maintained_visibility():
- if layers:
- # Hide all other layers.
- extract_ids = set([ll.id for ll in stub.
- get_layers_in_layers(layers)])
- self.log.debug("extract_ids {}".format(extract_ids))
- for layer in stub.get_layers():
- # limit unnecessary calls to client
- if layer.visible and layer.id not in extract_ids:
- stub.set_visible(layer.id, False)
+ if self.make_image_sequence and len(layers) > 1:
+ self.log.info("Extract layers to image sequence.")
+ img_list = self._saves_sequences_layers(staging_dir, layers)
- stub.saveAs(output_image_path, 'jpg', True)
+ instance.data["representations"].append({
+ "name": "jpg",
+ "ext": "jpg",
+ "files": img_list,
+ "frameStart": 0,
+ "frameEnd": len(img_list),
+ "fps": fps,
+ "stagingDir": staging_dir,
+ "tags": self.jpg_options['tags'],
+ })
+
+ else:
+ self.log.info("Extract layers to flatten image.")
+ img_list = self._saves_flattened_layers(staging_dir, layers)
+
+ instance.data["representations"].append({
+ "name": "jpg",
+ "ext": "jpg",
+ "files": img_list,
+ "stagingDir": staging_dir,
+ "tags": self.jpg_options['tags']
+ })
ffmpeg_path = openpype.lib.get_ffmpeg_tool_path("ffmpeg")
- instance.data["representations"].append({
- "name": "jpg",
- "ext": "jpg",
- "files": output_image,
- "stagingDir": staging_dir,
- "tags": self.jpg_options['tags']
- })
instance.data["stagingDir"] = staging_dir
# Generate thumbnail.
thumbnail_path = os.path.join(staging_dir, "thumbnail.jpg")
+ self.log.info(f"Generate thumbnail {thumbnail_path}")
args = [
ffmpeg_path,
"-y",
- "-i", output_image_path,
+ "-i", os.path.join(staging_dir, self.output_seq_filename),
"-vf", "scale=300:-1",
"-vframes", "1",
thumbnail_path
@@ -81,14 +86,17 @@ class ExtractReview(openpype.api.Extractor):
"stagingDir": staging_dir,
"tags": ["thumbnail"]
})
+
# Generate mov.
mov_path = os.path.join(staging_dir, "review.mov")
+ self.log.info(f"Generate mov review: {mov_path}")
+ img_number = len(img_list)
args = [
ffmpeg_path,
"-y",
- "-i", output_image_path,
+ "-i", os.path.join(staging_dir, self.output_seq_filename),
"-vf", "pad=ceil(iw/2)*2:ceil(ih/2)*2",
- "-vframes", "1",
+ "-vframes", str(img_number),
mov_path
]
output = openpype.lib.run_subprocess(args)
@@ -99,15 +107,86 @@ class ExtractReview(openpype.api.Extractor):
"files": os.path.basename(mov_path),
"stagingDir": staging_dir,
"frameStart": 1,
- "frameEnd": 1,
- "fps": 25,
+ "frameEnd": img_number,
+ "fps": fps,
"preview": True,
"tags": self.mov_options['tags']
})
# Required for extract_review plugin (L222 onwards).
instance.data["frameStart"] = 1
- instance.data["frameEnd"] = 1
+ instance.data["frameEnd"] = img_number
instance.data["fps"] = 25
self.log.info(f"Extracted {instance} to {staging_dir}")
+
+ def _get_image_path_from_instances(self, instance):
+ img_list = []
+
+ for instance in sorted(instance.context):
+ if instance.data["family"] != "image":
+ continue
+
+ for rep in instance.data["representations"]:
+ img_path = os.path.join(
+ rep["stagingDir"],
+ rep["files"]
+ )
+ img_list.append(img_path)
+
+ return img_list
+
+ def _copy_image_to_staging_dir(self, staging_dir, img_list):
+ copy_files = []
+ for i, img_src in enumerate(img_list):
+ img_filename = self.output_seq_filename % i
+ img_dst = os.path.join(staging_dir, img_filename)
+
+ self.log.debug(
+ "Copying file .. {} -> {}".format(img_src, img_dst)
+ )
+ shutil.copy(img_src, img_dst)
+ copy_files.append(img_filename)
+
+ return copy_files
+
+ def _get_layers_from_image_instances(self, instance):
+ layers = []
+ for image_instance in instance.context:
+ if image_instance.data["family"] != "image":
+ continue
+ layers.append(image_instance.data.get("layer"))
+
+ return sorted(layers)
+
+ def _saves_flattened_layers(self, staging_dir, layers):
+ img_filename = self.output_seq_filename % 0
+ output_image_path = os.path.join(staging_dir, img_filename)
+ stub = photoshop.stub()
+
+ with photoshop.maintained_visibility():
+ self.log.info("Extracting {}".format(layers))
+ if layers:
+ stub.hide_all_others_layers(layers)
+
+ stub.saveAs(output_image_path, 'jpg', True)
+
+ return img_filename
+
+ def _saves_sequences_layers(self, staging_dir, layers):
+ stub = photoshop.stub()
+
+ list_img_filename = []
+ with photoshop.maintained_visibility():
+ for i, layer in enumerate(layers):
+ self.log.info("Extracting {}".format(layer))
+
+ img_filename = self.output_seq_filename % i
+ output_image_path = os.path.join(staging_dir, img_filename)
+ list_img_filename.append(img_filename)
+
+ with photoshop.maintained_visibility():
+ stub.hide_all_others_layers([layer])
+ stub.saveAs(output_image_path, 'jpg', True)
+
+ return list_img_filename
diff --git a/openpype/hosts/resolve/api/pipeline.py b/openpype/hosts/resolve/api/pipeline.py
index 2dc5136c8a..fa309e3503 100644
--- a/openpype/hosts/resolve/api/pipeline.py
+++ b/openpype/hosts/resolve/api/pipeline.py
@@ -9,6 +9,11 @@ from avalon import schema
from avalon.pipeline import AVALON_CONTAINER_ID
from pyblish import api as pyblish
from openpype.api import Logger
+from openpype.pipeline import (
+ LegacyCreator,
+ register_loader_plugin_path,
+ deregister_loader_plugin_path,
+)
from . import lib
from . import PLUGINS_DIR
from openpype.tools.utils import host_tools
@@ -41,8 +46,8 @@ def install():
pyblish.register_plugin_path(PUBLISH_PATH)
log.info("Registering DaVinci Resovle plug-ins..")
- avalon.register_plugin_path(avalon.Loader, LOAD_PATH)
- avalon.register_plugin_path(avalon.Creator, CREATE_PATH)
+ register_loader_plugin_path(LOAD_PATH)
+ avalon.register_plugin_path(LegacyCreator, CREATE_PATH)
avalon.register_plugin_path(avalon.InventoryAction, INVENTORY_PATH)
# register callback for switching publishable
@@ -66,8 +71,8 @@ def uninstall():
pyblish.deregister_plugin_path(PUBLISH_PATH)
log.info("Deregistering DaVinci Resovle plug-ins..")
- avalon.deregister_plugin_path(avalon.Loader, LOAD_PATH)
- avalon.deregister_plugin_path(avalon.Creator, CREATE_PATH)
+ deregister_loader_plugin_path(LOAD_PATH)
+ avalon.deregister_plugin_path(LegacyCreator, CREATE_PATH)
avalon.deregister_plugin_path(avalon.InventoryAction, INVENTORY_PATH)
# register callback for switching publishable
diff --git a/openpype/hosts/resolve/api/plugin.py b/openpype/hosts/resolve/api/plugin.py
index 8612cf82ec..8e1436021c 100644
--- a/openpype/hosts/resolve/api/plugin.py
+++ b/openpype/hosts/resolve/api/plugin.py
@@ -1,13 +1,17 @@
import re
import uuid
-from avalon import api
-import openpype.api as pype
-from openpype.hosts import resolve
-from avalon.vendor import qargparse
-from . import lib
+import qargparse
from Qt import QtWidgets, QtCore
+import openpype.api as pype
+from openpype.pipeline import (
+ LegacyCreator,
+ LoaderPlugin,
+)
+from openpype.hosts import resolve
+from . import lib
+
class CreatorWidget(QtWidgets.QDialog):
@@ -289,7 +293,7 @@ class ClipLoader:
""" Initialize object
Arguments:
- cls (avalon.api.Loader): plugin object
+ cls (openpype.pipeline.load.LoaderPlugin): plugin object
context (dict): loader plugin context
options (dict)[optional]: possible keys:
projectBinPath: "path/to/binItem"
@@ -445,7 +449,7 @@ class ClipLoader:
return timeline_item
-class TimelineItemLoader(api.Loader):
+class TimelineItemLoader(LoaderPlugin):
"""A basic SequenceLoader for Resolve
This will implement the basic behavior for a loader to inherit from that
@@ -493,7 +497,7 @@ class TimelineItemLoader(api.Loader):
pass
-class Creator(pype.PypeCreatorMixin, api.Creator):
+class Creator(LegacyCreator):
"""Creator class wrapper
"""
marker_color = "Purple"
diff --git a/openpype/hosts/resolve/api/utils.py b/openpype/hosts/resolve/api/utils.py
index 3dee17cb01..9b3762f328 100644
--- a/openpype/hosts/resolve/api/utils.py
+++ b/openpype/hosts/resolve/api/utils.py
@@ -70,9 +70,9 @@ def get_resolve_module():
sys.exit()
# assign global var and return
bmdvr = bmd.scriptapp("Resolve")
- # bmdvf = bmd.scriptapp("Fusion")
+ bmdvf = bmd.scriptapp("Fusion")
resolve.api.bmdvr = bmdvr
- resolve.api.bmdvf = bmdvr.Fusion()
+ resolve.api.bmdvf = bmdvf
log.info(("Assigning resolve module to "
f"`pype.hosts.resolve.api.bmdvr`: {resolve.api.bmdvr}"))
log.info(("Assigning resolve module to "
diff --git a/openpype/hosts/resolve/plugins/load/load_clip.py b/openpype/hosts/resolve/plugins/load/load_clip.py
index e20384ee6c..71850d95f6 100644
--- a/openpype/hosts/resolve/plugins/load/load_clip.py
+++ b/openpype/hosts/resolve/plugins/load/load_clip.py
@@ -1,11 +1,14 @@
-from avalon import io, api
-from openpype.hosts import resolve
from copy import deepcopy
from importlib import reload
+
+from avalon import io
+from openpype.hosts import resolve
+from openpype.pipeline import get_representation_path
from openpype.hosts.resolve.api import lib, plugin
reload(plugin)
reload(lib)
+
class LoadClip(resolve.TimelineItemLoader):
"""Load a subset to timeline as clip
@@ -99,7 +102,7 @@ class LoadClip(resolve.TimelineItemLoader):
version_name = version.get("name", None)
colorspace = version_data.get("colorspace", None)
object_name = "{}_{}".format(name, namespace)
- self.fname = api.get_representation_path(representation)
+ self.fname = get_representation_path(representation)
context["version"] = {"data": version_data}
loader = resolve.ClipLoader(self, context)
diff --git a/openpype/hosts/standalonepublisher/plugins/publish/collect_texture.py b/openpype/hosts/standalonepublisher/plugins/publish/collect_texture.py
index 596a8ccfd2..ea0b6cdf41 100644
--- a/openpype/hosts/standalonepublisher/plugins/publish/collect_texture.py
+++ b/openpype/hosts/standalonepublisher/plugins/publish/collect_texture.py
@@ -81,14 +81,10 @@ class CollectTextures(pyblish.api.ContextPlugin):
parsed_subset = instance.data["subset"].replace(
instance.data["family"], '')
- fill_pairs = {
+ explicit_data = {
"subset": parsed_subset
}
- fill_pairs = prepare_template_data(fill_pairs)
- workfile_subset = format_template_with_optional_keys(
- fill_pairs, self.workfile_subset_template)
-
processed_instance = False
for repre in instance.data["representations"]:
ext = repre["ext"].replace('.', '')
@@ -102,6 +98,21 @@ class CollectTextures(pyblish.api.ContextPlugin):
if ext in self.main_workfile_extensions or \
ext in self.other_workfile_extensions:
+ formatting_data = self._get_parsed_groups(
+ repre_file,
+ self.input_naming_patterns["workfile"],
+ self.input_naming_groups["workfile"],
+ self.color_space
+ )
+ self.log.info("Parsed groups from workfile "
+ "name '{}': {}".format(repre_file,
+ formatting_data))
+
+ formatting_data.update(explicit_data)
+ fill_pairs = prepare_template_data(formatting_data)
+ workfile_subset = format_template_with_optional_keys(
+ fill_pairs, self.workfile_subset_template)
+
asset_build = self._get_asset_build(
repre_file,
self.input_naming_patterns["workfile"],
@@ -148,11 +159,23 @@ class CollectTextures(pyblish.api.ContextPlugin):
resource_files[workfile_subset].append(item)
if ext in self.texture_extensions:
+ formatting_data = self._get_parsed_groups(
+ repre_file,
+ self.input_naming_patterns["textures"],
+ self.input_naming_groups["textures"],
+ self.color_space
+ )
+
+ self.log.info("Parsed groups from texture "
+ "name '{}': {}".format(repre_file,
+ formatting_data))
+
c_space = self._get_color_space(
repre_file,
self.color_space
)
+ # optional value
channel = self._get_channel_name(
repre_file,
self.input_naming_patterns["textures"],
@@ -160,6 +183,7 @@ class CollectTextures(pyblish.api.ContextPlugin):
self.color_space
)
+ # optional value
shader = self._get_shader_name(
repre_file,
self.input_naming_patterns["textures"],
@@ -167,13 +191,15 @@ class CollectTextures(pyblish.api.ContextPlugin):
self.color_space
)
- formatting_data = {
+ explicit_data = {
"color_space": c_space or '', # None throws exception
"channel": channel or '',
"shader": shader or '',
"subset": parsed_subset or ''
}
+ formatting_data.update(explicit_data)
+
fill_pairs = prepare_template_data(formatting_data)
subset = format_template_with_optional_keys(
fill_pairs, self.texture_subset_template)
@@ -243,6 +269,13 @@ class CollectTextures(pyblish.api.ContextPlugin):
for asset_build, version, subset, family in asset_builds:
if not main_version:
main_version = version
+
+ try:
+ version_int = int(version or main_version or 1)
+ except ValueError:
+ self.log.error("Parsed version {} is not "
+ "an number".format(version))
+
new_instance = context.create_instance(subset)
new_instance.data.update(
{
@@ -251,7 +284,7 @@ class CollectTextures(pyblish.api.ContextPlugin):
"label": subset,
"name": subset,
"family": family,
- "version": int(version or main_version or 1),
+ "version": version_int,
"asset_build": asset_build # remove in validator
}
)
@@ -320,13 +353,14 @@ class CollectTextures(pyblish.api.ContextPlugin):
"""
asset_name = "NOT_AVAIL"
- return self._parse(name, input_naming_patterns, input_naming_groups,
- color_spaces, 'asset') or asset_name
+ return (self._parse_key(name, input_naming_patterns,
+ input_naming_groups, color_spaces, 'asset') or
+ asset_name)
def _get_version(self, name, input_naming_patterns, input_naming_groups,
color_spaces):
- found = self._parse(name, input_naming_patterns, input_naming_groups,
- color_spaces, 'version')
+ found = self._parse_key(name, input_naming_patterns,
+ input_naming_groups, color_spaces, 'version')
if found:
return found.replace('v', '')
@@ -336,8 +370,8 @@ class CollectTextures(pyblish.api.ContextPlugin):
def _get_udim(self, name, input_naming_patterns, input_naming_groups,
color_spaces):
"""Parses from 'name' udim value."""
- found = self._parse(name, input_naming_patterns, input_naming_groups,
- color_spaces, 'udim')
+ found = self._parse_key(name, input_naming_patterns,
+ input_naming_groups, color_spaces, 'udim')
if found:
return found
@@ -375,12 +409,15 @@ class CollectTextures(pyblish.api.ContextPlugin):
Unknown format of channel name and color spaces >> cs are known
list - 'color_space' used as a placeholder
"""
- found = self._parse(name, input_naming_patterns, input_naming_groups,
- color_spaces, 'shader')
- if found:
- return found
+ found = None
+ try:
+ found = self._parse_key(name, input_naming_patterns,
+ input_naming_groups, color_spaces,
+ 'shader')
+ except ValueError:
+ self.log.warning("Didn't find shader in {}".format(name))
- self.log.warning("Didn't find shader in {}".format(name))
+ return found
def _get_channel_name(self, name, input_naming_patterns,
input_naming_groups, color_spaces):
@@ -389,15 +426,18 @@ class CollectTextures(pyblish.api.ContextPlugin):
Unknown format of channel name and color spaces >> cs are known
list - 'color_space' used as a placeholder
"""
- found = self._parse(name, input_naming_patterns, input_naming_groups,
- color_spaces, 'channel')
- if found:
- return found
+ found = None
+ try:
+ found = self._parse_key(name, input_naming_patterns,
+ input_naming_groups, color_spaces,
+ 'channel')
+ except ValueError:
+ self.log.warning("Didn't find channel in {}".format(name))
- self.log.warning("Didn't find channel in {}".format(name))
+ return found
- def _parse(self, name, input_naming_patterns, input_naming_groups,
- color_spaces, key):
+ def _parse_key(self, name, input_naming_patterns, input_naming_groups,
+ color_spaces, key):
"""Universal way to parse 'name' with configurable regex groups.
Args:
@@ -411,23 +451,47 @@ class CollectTextures(pyblish.api.ContextPlugin):
Raises:
ValueError - if broken 'input_naming_groups'
"""
+ parsed_groups = self._get_parsed_groups(name,
+ input_naming_patterns,
+ input_naming_groups,
+ color_spaces)
+
+ try:
+ parsed_value = parsed_groups[key]
+ return parsed_value
+ except (IndexError, KeyError):
+ msg = ("'Textures group positions' must " +
+ "have '{}' key".format(key))
+ raise ValueError(msg)
+
+ def _get_parsed_groups(self, name, input_naming_patterns,
+ input_naming_groups, color_spaces):
+ """Universal way to parse 'name' with configurable regex groups.
+
+ Args:
+ name (str): workfile name or texture name
+ input_naming_patterns (list):
+ [workfile_pattern] or [texture_pattern]
+ input_naming_groups (list)
+ ordinal position of regex groups matching to input_naming..
+ color_spaces (list) - predefined color spaces
+
+ Returns:
+ (dict) {group_name:parsed_value}
+ """
for input_pattern in input_naming_patterns:
for cs in color_spaces:
pattern = input_pattern.replace('{color_space}', cs)
regex_result = re.findall(pattern, name)
if regex_result:
- idx = list(input_naming_groups).index(key)
- if idx < 0:
- msg = "input_naming_groups must " +\
- "have '{}' key".format(key)
- raise ValueError(msg)
+ if len(regex_result[0]) == len(input_naming_groups):
+ return dict(zip(input_naming_groups, regex_result[0]))
+ else:
+ self.log.warning("No of parsed groups doesn't match "
+ "no of group labels")
- try:
- parsed_value = regex_result[0][idx]
- return parsed_value
- except IndexError:
- self.log.warning("Wrong index, probably "
- "wrong name {}".format(name))
+ raise ValueError("Name '{}' cannot be parsed by any "
+ "'{}' patterns".format(name, input_naming_patterns))
def _update_representations(self, upd_representations):
"""Frames dont have sense for textures, add collected udims instead."""
diff --git a/openpype/hosts/standalonepublisher/plugins/publish/extract_trim_video_audio.py b/openpype/hosts/standalonepublisher/plugins/publish/extract_trim_video_audio.py
index c18de5bc1c..f327895b83 100644
--- a/openpype/hosts/standalonepublisher/plugins/publish/extract_trim_video_audio.py
+++ b/openpype/hosts/standalonepublisher/plugins/publish/extract_trim_video_audio.py
@@ -2,6 +2,9 @@ import os
import pyblish.api
import openpype.api
+from openpype.lib import (
+ get_ffmpeg_tool_path,
+)
from pprint import pformat
@@ -27,7 +30,7 @@ class ExtractTrimVideoAudio(openpype.api.Extractor):
instance.data["representations"] = list()
# get ffmpet path
- ffmpeg_path = openpype.lib.get_ffmpeg_tool_path("ffmpeg")
+ ffmpeg_path = get_ffmpeg_tool_path("ffmpeg")
# get staging dir
staging_dir = self.staging_dir(instance)
@@ -44,7 +47,7 @@ class ExtractTrimVideoAudio(openpype.api.Extractor):
clip_trimed_path = os.path.join(
staging_dir, instance.data["name"] + ext)
# # check video file metadata
- # input_data = plib.ffprobe_streams(video_file_path)[0]
+ # input_data = plib.get_ffprobe_streams(video_file_path)[0]
# self.log.debug(f"__ input_data: `{input_data}`")
start = float(instance.data["clipInH"])
diff --git a/openpype/hosts/standalonepublisher/plugins/publish/help/validate_editorial_resources.xml b/openpype/hosts/standalonepublisher/plugins/publish/help/validate_editorial_resources.xml
new file mode 100644
index 0000000000..803de6bf11
--- /dev/null
+++ b/openpype/hosts/standalonepublisher/plugins/publish/help/validate_editorial_resources.xml
@@ -0,0 +1,17 @@
+
+
+
+Missing source video file
+
+## No attached video file found
+
+Process expects presence of source video file with same name prefix as an editorial file in same folder.
+(example `simple_editorial_setup_Layer1.edl` expects `simple_editorial_setup.mp4` in same folder)
+
+
+### How to repair?
+
+Copy source video file to the folder next to `.edl` file. (On a disk, do not put it into Standalone Publisher.)
+
+
+
diff --git a/openpype/hosts/standalonepublisher/plugins/publish/help/validate_frame_ranges.xml b/openpype/hosts/standalonepublisher/plugins/publish/help/validate_frame_ranges.xml
new file mode 100644
index 0000000000..933df1c7c5
--- /dev/null
+++ b/openpype/hosts/standalonepublisher/plugins/publish/help/validate_frame_ranges.xml
@@ -0,0 +1,15 @@
+
+
+
+Invalid frame range
+
+## Invalid frame range
+
+Expected duration or '{duration}' frames set in database, workfile contains only '{found}' frames.
+
+### How to repair?
+
+Modify configuration in the database or tweak frame range in the workfile.
+
+
+
\ No newline at end of file
diff --git a/openpype/hosts/standalonepublisher/plugins/publish/help/validate_shot_duplicates.xml b/openpype/hosts/standalonepublisher/plugins/publish/help/validate_shot_duplicates.xml
new file mode 100644
index 0000000000..77b8727162
--- /dev/null
+++ b/openpype/hosts/standalonepublisher/plugins/publish/help/validate_shot_duplicates.xml
@@ -0,0 +1,15 @@
+
+
+
+Duplicate shots
+
+## Duplicate shot names
+
+Process contains duplicated shot names '{duplicates_str}'.
+
+### How to repair?
+
+Remove shot duplicates.
+
+
+
\ No newline at end of file
diff --git a/openpype/hosts/standalonepublisher/plugins/publish/help/validate_sources.xml b/openpype/hosts/standalonepublisher/plugins/publish/help/validate_sources.xml
new file mode 100644
index 0000000000..d527d2173e
--- /dev/null
+++ b/openpype/hosts/standalonepublisher/plugins/publish/help/validate_sources.xml
@@ -0,0 +1,16 @@
+
+
+
+Files not found
+
+## Source files not found
+
+Process contains duplicated shot names:
+'{files_not_found}'
+
+### How to repair?
+
+Add missing files or run Publish again to collect new publishable files.
+
+
+
\ No newline at end of file
diff --git a/openpype/hosts/standalonepublisher/plugins/publish/help/validate_task_existence.xml b/openpype/hosts/standalonepublisher/plugins/publish/help/validate_task_existence.xml
new file mode 100644
index 0000000000..a943f560d0
--- /dev/null
+++ b/openpype/hosts/standalonepublisher/plugins/publish/help/validate_task_existence.xml
@@ -0,0 +1,16 @@
+
+
+
+Task not found
+
+## Task not found in database
+
+Process contains tasks that don't exist in database:
+'{task_not_found}'
+
+### How to repair?
+
+Remove set task or add task into database into proper place.
+
+
+
\ No newline at end of file
diff --git a/openpype/hosts/standalonepublisher/plugins/publish/help/validate_texture_batch.xml b/openpype/hosts/standalonepublisher/plugins/publish/help/validate_texture_batch.xml
new file mode 100644
index 0000000000..a645df8d02
--- /dev/null
+++ b/openpype/hosts/standalonepublisher/plugins/publish/help/validate_texture_batch.xml
@@ -0,0 +1,15 @@
+
+
+
+No texture files found
+
+## Batch doesn't contain texture files
+
+Batch must contain at least one texture file.
+
+### How to repair?
+
+Add texture file to the batch or check name if it follows naming convention to match texture files to the batch.
+
+
+
\ No newline at end of file
diff --git a/openpype/hosts/standalonepublisher/plugins/publish/help/validate_texture_has_workfile.xml b/openpype/hosts/standalonepublisher/plugins/publish/help/validate_texture_has_workfile.xml
new file mode 100644
index 0000000000..077987a96d
--- /dev/null
+++ b/openpype/hosts/standalonepublisher/plugins/publish/help/validate_texture_has_workfile.xml
@@ -0,0 +1,15 @@
+
+
+
+No workfile found
+
+## Batch should contain workfile
+
+It is expected that published contains workfile that served as a source for textures.
+
+### How to repair?
+
+Add workfile to the batch, or disable this validator if you do not want workfile published.
+
+
+
\ No newline at end of file
diff --git a/openpype/hosts/standalonepublisher/plugins/publish/help/validate_texture_name.xml b/openpype/hosts/standalonepublisher/plugins/publish/help/validate_texture_name.xml
new file mode 100644
index 0000000000..2610917736
--- /dev/null
+++ b/openpype/hosts/standalonepublisher/plugins/publish/help/validate_texture_name.xml
@@ -0,0 +1,32 @@
+
+
+
+Asset name not found
+
+## Couldn't parse asset name from a file
+
+Unable to parse asset name from '{file_name}'. File name doesn't match configured naming convention.
+
+### How to repair?
+
+Check Settings: project_settings/standalonepublisher/publish/CollectTextures for naming convention.
+
+
+### __Detailed Info__ (optional)
+
+This error happens when parsing cannot figure out name of asset texture files belong under.
+
+
+
+Missing keys
+
+## Texture file name is missing some required keys
+
+Texture '{file_name}' is missing values for {missing_str} keys.
+
+### How to repair?
+
+Fix name of texture file and Publish again.
+
+
+
diff --git a/openpype/hosts/standalonepublisher/plugins/publish/help/validate_texture_versions.xml b/openpype/hosts/standalonepublisher/plugins/publish/help/validate_texture_versions.xml
new file mode 100644
index 0000000000..1e536e604f
--- /dev/null
+++ b/openpype/hosts/standalonepublisher/plugins/publish/help/validate_texture_versions.xml
@@ -0,0 +1,35 @@
+
+
+
+Texture version
+
+## Texture version mismatch with workfile
+
+Workfile '{file_name}' version doesn't match with '{version}' of a texture.
+
+### How to repair?
+
+Rename either workfile or texture to contain matching versions
+
+
+### __Detailed Info__ (optional)
+
+This might happen if you are trying to publish textures for older version of workfile (or the other way).
+(Eg. publishing 'workfile_v001' and 'texture_file_v002')
+
+
+
+Too many versions
+
+## Too many versions published at same time
+
+It is currently expected to publish only batch with single version.
+
+Found {found} versions.
+
+### How to repair?
+
+Please remove files with different version and split publishing into multiple steps.
+
+
+
diff --git a/openpype/hosts/standalonepublisher/plugins/publish/help/validate_texture_workfiles.xml b/openpype/hosts/standalonepublisher/plugins/publish/help/validate_texture_workfiles.xml
new file mode 100644
index 0000000000..8187eb0bc8
--- /dev/null
+++ b/openpype/hosts/standalonepublisher/plugins/publish/help/validate_texture_workfiles.xml
@@ -0,0 +1,23 @@
+
+
+
+No secondary workfile
+
+## No secondary workfile found
+
+Current process expects that primary workfile (for example with a extension '{extension}') will contain also 'secondary' workfile.
+
+Secondary workfile for '{file_name}' wasn't found.
+
+### How to repair?
+
+Attach secondary workfile or disable this validator and Publish again.
+
+
+### __Detailed Info__ (optional)
+
+This process was implemented for a possible use case of first workfile coming from Mari, secondary workfile for textures from Substance.
+Publish should contain both if primary workfile is present.
+
+
+
diff --git a/openpype/hosts/standalonepublisher/plugins/publish/validate_editorial_resources.py b/openpype/hosts/standalonepublisher/plugins/publish/validate_editorial_resources.py
index 6759b87ceb..afb828474d 100644
--- a/openpype/hosts/standalonepublisher/plugins/publish/validate_editorial_resources.py
+++ b/openpype/hosts/standalonepublisher/plugins/publish/validate_editorial_resources.py
@@ -1,5 +1,6 @@
import pyblish.api
import openpype.api
+from openpype.pipeline import PublishXmlValidationError
class ValidateEditorialResources(pyblish.api.InstancePlugin):
@@ -19,5 +20,7 @@ class ValidateEditorialResources(pyblish.api.InstancePlugin):
f"Instance: {instance}, Families: "
f"{[instance.data['family']] + instance.data['families']}")
check_file = instance.data["editorialSourcePath"]
- msg = f"Missing \"{check_file}\"."
- assert check_file, msg
+ msg = "Missing source video file."
+
+ if not check_file:
+ raise PublishXmlValidationError(self, msg)
diff --git a/openpype/hosts/standalonepublisher/plugins/publish/validate_frame_ranges.py b/openpype/hosts/standalonepublisher/plugins/publish/validate_frame_ranges.py
index 943cb73b98..005157af62 100644
--- a/openpype/hosts/standalonepublisher/plugins/publish/validate_frame_ranges.py
+++ b/openpype/hosts/standalonepublisher/plugins/publish/validate_frame_ranges.py
@@ -1,8 +1,10 @@
import re
import pyblish.api
+
import openpype.api
from openpype import lib
+from openpype.pipeline import PublishXmlValidationError
class ValidateFrameRange(pyblish.api.InstancePlugin):
@@ -48,9 +50,15 @@ class ValidateFrameRange(pyblish.api.InstancePlugin):
files = [files]
frames = len(files)
- err_msg = "Frame duration from DB:'{}' ". format(int(duration)) +\
- " doesn't match number of files:'{}'".format(frames) +\
- " Please change frame range for Asset or limit no. of files"
- assert frames == duration, err_msg
+ msg = "Frame duration from DB:'{}' ". format(int(duration)) +\
+ " doesn't match number of files:'{}'".format(frames) +\
+ " Please change frame range for Asset or limit no. of files"
- self.log.debug("Valid ranges {} - {}".format(int(duration), frames))
+ formatting_data = {"duration": duration,
+ "found": frames}
+ if frames != duration:
+ raise PublishXmlValidationError(self, msg,
+ formatting_data=formatting_data)
+
+ self.log.debug("Valid ranges expected '{}' - found '{}'".
+ format(int(duration), frames))
diff --git a/openpype/hosts/standalonepublisher/plugins/publish/validate_shot_duplicates.py b/openpype/hosts/standalonepublisher/plugins/publish/validate_shot_duplicates.py
index 85ec9379ce..fe655f6b74 100644
--- a/openpype/hosts/standalonepublisher/plugins/publish/validate_shot_duplicates.py
+++ b/openpype/hosts/standalonepublisher/plugins/publish/validate_shot_duplicates.py
@@ -1,6 +1,7 @@
import pyblish.api
-import openpype.api
+import openpype.api
+from openpype.pipeline import PublishXmlValidationError
class ValidateShotDuplicates(pyblish.api.ContextPlugin):
"""Validating no duplicate names are in context."""
@@ -20,4 +21,8 @@ class ValidateShotDuplicates(pyblish.api.ContextPlugin):
shot_names.append(name)
msg = "There are duplicate shot names:\n{}".format(duplicate_names)
- assert not duplicate_names, msg
+
+ formatting_data = {"duplicates_str": ','.join(duplicate_names)}
+ if duplicate_names:
+ raise PublishXmlValidationError(self, msg,
+ formatting_data=formatting_data)
diff --git a/openpype/hosts/standalonepublisher/plugins/publish/validate_sources.py b/openpype/hosts/standalonepublisher/plugins/publish/validate_sources.py
index eec675e97f..316f58988f 100644
--- a/openpype/hosts/standalonepublisher/plugins/publish/validate_sources.py
+++ b/openpype/hosts/standalonepublisher/plugins/publish/validate_sources.py
@@ -1,8 +1,10 @@
-import pyblish.api
-import openpype.api
-
import os
+import pyblish.api
+
+import openpype.api
+from openpype.pipeline import PublishXmlValidationError
+
class ValidateSources(pyblish.api.InstancePlugin):
"""Validates source files.
@@ -11,7 +13,6 @@ class ValidateSources(pyblish.api.InstancePlugin):
got deleted between starting of SP and now.
"""
-
order = openpype.api.ValidateContentsOrder
label = "Check source files"
@@ -22,6 +23,7 @@ class ValidateSources(pyblish.api.InstancePlugin):
def process(self, instance):
self.log.info("instance {}".format(instance.data))
+ missing_files = set()
for repre in instance.data.get("representations") or []:
files = []
if isinstance(repre["files"], str):
@@ -34,4 +36,10 @@ class ValidateSources(pyblish.api.InstancePlugin):
file_name)
if not os.path.exists(source_file):
- raise ValueError("File {} not found".format(source_file))
+ missing_files.add(source_file)
+
+ msg = "Files '{}' not found".format(','.join(missing_files))
+ formatting_data = {"files_not_found": ' - {}'.join(missing_files)}
+ if missing_files:
+ raise PublishXmlValidationError(self, msg,
+ formatting_data=formatting_data)
diff --git a/openpype/hosts/standalonepublisher/plugins/publish/validate_task_existence.py b/openpype/hosts/standalonepublisher/plugins/publish/validate_task_existence.py
index e3b2ae1646..825092c81b 100644
--- a/openpype/hosts/standalonepublisher/plugins/publish/validate_task_existence.py
+++ b/openpype/hosts/standalonepublisher/plugins/publish/validate_task_existence.py
@@ -1,6 +1,8 @@
import pyblish.api
from avalon import io
+from openpype.pipeline import PublishXmlValidationError
+
class ValidateTaskExistence(pyblish.api.ContextPlugin):
"""Validating tasks on instances are filled and existing."""
@@ -53,4 +55,9 @@ class ValidateTaskExistence(pyblish.api.ContextPlugin):
"Asset: \"{}\" Task: \"{}\"".format(*missing_pair)
)
- raise AssertionError(msg.format("\n".join(pair_msgs)))
+ msg = msg.format("\n".join(pair_msgs))
+
+ formatting_data = {"task_not_found": ' - {}'.join(pair_msgs)}
+ if pair_msgs:
+ raise PublishXmlValidationError(self, msg,
+ formatting_data=formatting_data)
diff --git a/openpype/hosts/standalonepublisher/plugins/publish/validate_texture_batch.py b/openpype/hosts/standalonepublisher/plugins/publish/validate_texture_batch.py
index d592a4a059..d66fb257bb 100644
--- a/openpype/hosts/standalonepublisher/plugins/publish/validate_texture_batch.py
+++ b/openpype/hosts/standalonepublisher/plugins/publish/validate_texture_batch.py
@@ -1,6 +1,8 @@
import pyblish.api
import openpype.api
+from openpype.pipeline import PublishXmlValidationError
+
class ValidateTextureBatch(pyblish.api.InstancePlugin):
"""Validates that some texture files are present."""
@@ -15,8 +17,10 @@ class ValidateTextureBatch(pyblish.api.InstancePlugin):
present = False
for instance in instance.context:
if instance.data["family"] == "textures":
- self.log.info("Some textures present.")
+ self.log.info("At least some textures present.")
return
- assert present, "No textures found in published batch!"
+ msg = "No textures found in published batch!"
+ if not present:
+ raise PublishXmlValidationError(self, msg)
diff --git a/openpype/hosts/standalonepublisher/plugins/publish/validate_texture_has_workfile.py b/openpype/hosts/standalonepublisher/plugins/publish/validate_texture_has_workfile.py
index 7cd540668c..0e67464f59 100644
--- a/openpype/hosts/standalonepublisher/plugins/publish/validate_texture_has_workfile.py
+++ b/openpype/hosts/standalonepublisher/plugins/publish/validate_texture_has_workfile.py
@@ -1,5 +1,7 @@
import pyblish.api
+
import openpype.api
+from openpype.pipeline import PublishXmlValidationError
class ValidateTextureHasWorkfile(pyblish.api.InstancePlugin):
@@ -17,4 +19,6 @@ class ValidateTextureHasWorkfile(pyblish.api.InstancePlugin):
def process(self, instance):
wfile = instance.data["versionData"].get("workfile")
- assert wfile, "Textures are missing attached workfile"
+ msg = "Textures are missing attached workfile"
+ if not wfile:
+ raise PublishXmlValidationError(self, msg)
diff --git a/openpype/hosts/standalonepublisher/plugins/publish/validate_texture_name.py b/openpype/hosts/standalonepublisher/plugins/publish/validate_texture_name.py
index 4bafe81020..751ad917ca 100644
--- a/openpype/hosts/standalonepublisher/plugins/publish/validate_texture_name.py
+++ b/openpype/hosts/standalonepublisher/plugins/publish/validate_texture_name.py
@@ -1,6 +1,7 @@
import pyblish.api
-import openpype.api
+import openpype.api
+from openpype.pipeline import PublishXmlValidationError
class ValidateTextureBatchNaming(pyblish.api.InstancePlugin):
"""Validates that all instances had properly formatted name."""
@@ -19,9 +20,13 @@ class ValidateTextureBatchNaming(pyblish.api.InstancePlugin):
msg = "Couldn't find asset name in '{}'\n".format(file_name) + \
"File name doesn't follow configured pattern.\n" + \
"Please rename the file."
- assert "NOT_AVAIL" not in instance.data["asset_build"], msg
- instance.data.pop("asset_build")
+ formatting_data = {"file_name": file_name}
+ if "NOT_AVAIL" in instance.data["asset_build"]:
+ raise PublishXmlValidationError(self, msg,
+ formatting_data=formatting_data)
+
+ instance.data.pop("asset_build") # not needed anymore
if instance.data["family"] == "textures":
file_name = instance.data["representations"][0]["files"][0]
@@ -47,4 +52,10 @@ class ValidateTextureBatchNaming(pyblish.api.InstancePlugin):
"Name of the texture file doesn't match expected pattern.\n" + \
"Please rename file(s) {}".format(file_name)
- assert not missing_key_values, msg
+ missing_str = ','.join(["'{}'".format(key)
+ for key in missing_key_values])
+ formatting_data = {"file_name": file_name,
+ "missing_str": missing_str}
+ if missing_key_values:
+ raise PublishXmlValidationError(self, msg, key="missing_values",
+ formatting_data=formatting_data)
diff --git a/openpype/hosts/standalonepublisher/plugins/publish/validate_texture_versions.py b/openpype/hosts/standalonepublisher/plugins/publish/validate_texture_versions.py
index 90d0e8e512..84d9def895 100644
--- a/openpype/hosts/standalonepublisher/plugins/publish/validate_texture_versions.py
+++ b/openpype/hosts/standalonepublisher/plugins/publish/validate_texture_versions.py
@@ -1,5 +1,7 @@
import pyblish.api
+
import openpype.api
+from openpype.pipeline import PublishXmlValidationError
class ValidateTextureBatchVersions(pyblish.api.InstancePlugin):
@@ -25,14 +27,21 @@ class ValidateTextureBatchVersions(pyblish.api.InstancePlugin):
self.log.info("No workfile present for textures")
return
- msg = "Not matching version: texture v{:03d} - workfile {}"
- assert version_str in wfile, \
+ if version_str not in wfile:
+ msg = "Not matching version: texture v{:03d} - workfile {}"
msg.format(
instance.data["version"], wfile
)
+ raise PublishXmlValidationError(self, msg)
present_versions = set()
for instance in instance.context:
present_versions.add(instance.data["version"])
- assert len(present_versions) == 1, "Too many versions in a batch!"
+ if len(present_versions) != 1:
+ msg = "Too many versions in a batch!"
+ found = ','.join(["'{}'".format(val) for val in present_versions])
+ formatting_data = {"found": found}
+
+ raise PublishXmlValidationError(self, msg, key="too_many",
+ formatting_data=formatting_data)
diff --git a/openpype/hosts/standalonepublisher/plugins/publish/validate_texture_workfiles.py b/openpype/hosts/standalonepublisher/plugins/publish/validate_texture_workfiles.py
index 25bb5aea4a..fa492a80d8 100644
--- a/openpype/hosts/standalonepublisher/plugins/publish/validate_texture_workfiles.py
+++ b/openpype/hosts/standalonepublisher/plugins/publish/validate_texture_workfiles.py
@@ -1,11 +1,13 @@
import pyblish.api
+
import openpype.api
+from openpype.pipeline import PublishXmlValidationError
class ValidateTextureBatchWorkfiles(pyblish.api.InstancePlugin):
"""Validates that textures workfile has collected resources (optional).
- Collected recourses means secondary workfiles (in most cases).
+ Collected resources means secondary workfiles (in most cases).
"""
label = "Validate Texture Workfile Has Resources"
@@ -24,6 +26,13 @@ class ValidateTextureBatchWorkfiles(pyblish.api.InstancePlugin):
self.log.warning("Only secondary workfile present!")
return
- msg = "No secondary workfiles present for workfile {}".\
- format(instance.data["name"])
- assert instance.data.get("resources"), msg
+ if not instance.data.get("resources"):
+ msg = "No secondary workfile present for workfile '{}'". \
+ format(instance.data["name"])
+ ext = self.main_workfile_extensions[0]
+ formatting_data = {"file_name": instance.data["name"],
+ "extension": ext}
+
+ raise PublishXmlValidationError(self, msg,
+ formatting_data=formatting_data
+ )
diff --git a/openpype/hosts/testhost/api/instances.json b/openpype/hosts/testhost/api/instances.json
index 84021eff91..d955012514 100644
--- a/openpype/hosts/testhost/api/instances.json
+++ b/openpype/hosts/testhost/api/instances.json
@@ -8,7 +8,7 @@
"asset": "sq01_sh0010",
"task": "Compositing",
"variant": "myVariant",
- "uuid": "a485f148-9121-46a5-8157-aa64df0fb449",
+ "instance_id": "a485f148-9121-46a5-8157-aa64df0fb449",
"creator_attributes": {
"number_key": 10,
"ha": 10
@@ -29,8 +29,8 @@
"asset": "sq01_sh0010",
"task": "Compositing",
"variant": "myVariant2",
- "uuid": "a485f148-9121-46a5-8157-aa64df0fb444",
"creator_attributes": {},
+ "instance_id": "a485f148-9121-46a5-8157-aa64df0fb444",
"publish_attributes": {
"CollectFtrackApi": {
"add_ftrack_family": true
@@ -47,8 +47,8 @@
"asset": "sq01_sh0010",
"task": "Compositing",
"variant": "Main",
- "uuid": "3607bc95-75f6-4648-a58d-e699f413d09f",
"creator_attributes": {},
+ "instance_id": "3607bc95-75f6-4648-a58d-e699f413d09f",
"publish_attributes": {
"CollectFtrackApi": {
"add_ftrack_family": true
@@ -65,7 +65,7 @@
"asset": "sq01_sh0020",
"task": "Compositing",
"variant": "Main2",
- "uuid": "4ccf56f6-9982-4837-967c-a49695dbe8eb",
+ "instance_id": "4ccf56f6-9982-4837-967c-a49695dbe8eb",
"creator_attributes": {},
"publish_attributes": {
"CollectFtrackApi": {
@@ -83,7 +83,7 @@
"asset": "sq01_sh0020",
"task": "Compositing",
"variant": "Main2",
- "uuid": "4ccf56f6-9982-4837-967c-a49695dbe8ec",
+ "instance_id": "4ccf56f6-9982-4837-967c-a49695dbe8ec",
"creator_attributes": {},
"publish_attributes": {
"CollectFtrackApi": {
@@ -101,7 +101,7 @@
"asset": "Alpaca_01",
"task": "modeling",
"variant": "Main",
- "uuid": "7c9ddfc7-9f9c-4c1c-b233-38c966735fb6",
+ "instance_id": "7c9ddfc7-9f9c-4c1c-b233-38c966735fb6",
"creator_attributes": {},
"publish_attributes": {}
}
diff --git a/openpype/hosts/testhost/api/pipeline.py b/openpype/hosts/testhost/api/pipeline.py
index 49f1d3f33d..1f5d680705 100644
--- a/openpype/hosts/testhost/api/pipeline.py
+++ b/openpype/hosts/testhost/api/pipeline.py
@@ -114,7 +114,7 @@ def update_instances(update_list):
instances = HostContext.get_instances()
for instance_data in instances:
- instance_id = instance_data["uuid"]
+ instance_id = instance_data["instance_id"]
if instance_id in updated_instances:
new_instance_data = updated_instances[instance_id]
old_keys = set(instance_data.keys())
@@ -132,10 +132,10 @@ def remove_instances(instances):
current_instances = HostContext.get_instances()
for instance in instances:
- instance_id = instance.data["uuid"]
+ instance_id = instance.data["instance_id"]
found_idx = None
for idx, _instance in enumerate(current_instances):
- if instance_id == _instance["uuid"]:
+ if instance_id == _instance["instance_id"]:
found_idx = idx
break
diff --git a/openpype/hosts/testhost/plugins/publish/collect_context.py b/openpype/hosts/testhost/plugins/publish/collect_context.py
index bbb8477cdf..0ab98fb84b 100644
--- a/openpype/hosts/testhost/plugins/publish/collect_context.py
+++ b/openpype/hosts/testhost/plugins/publish/collect_context.py
@@ -19,7 +19,7 @@ class CollectContextDataTestHost(
hosts = ["testhost"]
@classmethod
- def get_instance_attr_defs(cls):
+ def get_attribute_defs(cls):
return [
attribute_definitions.BoolDef(
"test_bool",
diff --git a/openpype/hosts/testhost/plugins/publish/collect_instance_1.py b/openpype/hosts/testhost/plugins/publish/collect_instance_1.py
index 979ab83f11..3c035eccb6 100644
--- a/openpype/hosts/testhost/plugins/publish/collect_instance_1.py
+++ b/openpype/hosts/testhost/plugins/publish/collect_instance_1.py
@@ -20,7 +20,7 @@ class CollectInstanceOneTestHost(
hosts = ["testhost"]
@classmethod
- def get_instance_attr_defs(cls):
+ def get_attribute_defs(cls):
return [
attribute_definitions.NumberDef(
"version",
diff --git a/openpype/hosts/traypublisher/api/__init__.py b/openpype/hosts/traypublisher/api/__init__.py
new file mode 100644
index 0000000000..c461c0c526
--- /dev/null
+++ b/openpype/hosts/traypublisher/api/__init__.py
@@ -0,0 +1,20 @@
+from .pipeline import (
+ install,
+ ls,
+
+ set_project_name,
+ get_context_title,
+ get_context_data,
+ update_context_data,
+)
+
+
+__all__ = (
+ "install",
+ "ls",
+
+ "set_project_name",
+ "get_context_title",
+ "get_context_data",
+ "update_context_data",
+)
diff --git a/openpype/hosts/traypublisher/api/pipeline.py b/openpype/hosts/traypublisher/api/pipeline.py
new file mode 100644
index 0000000000..a39e5641ae
--- /dev/null
+++ b/openpype/hosts/traypublisher/api/pipeline.py
@@ -0,0 +1,180 @@
+import os
+import json
+import tempfile
+import atexit
+
+from avalon import io
+import avalon.api
+import pyblish.api
+
+from openpype.pipeline import BaseCreator
+
+ROOT_DIR = os.path.dirname(os.path.dirname(
+ os.path.abspath(__file__)
+))
+PUBLISH_PATH = os.path.join(ROOT_DIR, "plugins", "publish")
+CREATE_PATH = os.path.join(ROOT_DIR, "plugins", "create")
+
+
+class HostContext:
+ _context_json_path = None
+
+ @staticmethod
+ def _on_exit():
+ if (
+ HostContext._context_json_path
+ and os.path.exists(HostContext._context_json_path)
+ ):
+ os.remove(HostContext._context_json_path)
+
+ @classmethod
+ def get_context_json_path(cls):
+ if cls._context_json_path is None:
+ output_file = tempfile.NamedTemporaryFile(
+ mode="w", prefix="traypub_", suffix=".json"
+ )
+ output_file.close()
+ cls._context_json_path = output_file.name
+ atexit.register(HostContext._on_exit)
+ print(cls._context_json_path)
+ return cls._context_json_path
+
+ @classmethod
+ def _get_data(cls, group=None):
+ json_path = cls.get_context_json_path()
+ data = {}
+ if not os.path.exists(json_path):
+ with open(json_path, "w") as json_stream:
+ json.dump(data, json_stream)
+ else:
+ with open(json_path, "r") as json_stream:
+ content = json_stream.read()
+ if content:
+ data = json.loads(content)
+ if group is None:
+ return data
+ return data.get(group)
+
+ @classmethod
+ def _save_data(cls, group, new_data):
+ json_path = cls.get_context_json_path()
+ data = cls._get_data()
+ data[group] = new_data
+ with open(json_path, "w") as json_stream:
+ json.dump(data, json_stream)
+
+ @classmethod
+ def add_instance(cls, instance):
+ instances = cls.get_instances()
+ instances.append(instance)
+ cls.save_instances(instances)
+
+ @classmethod
+ def get_instances(cls):
+ return cls._get_data("instances") or []
+
+ @classmethod
+ def save_instances(cls, instances):
+ cls._save_data("instances", instances)
+
+ @classmethod
+ def get_context_data(cls):
+ return cls._get_data("context") or {}
+
+ @classmethod
+ def save_context_data(cls, data):
+ cls._save_data("context", data)
+
+ @classmethod
+ def get_project_name(cls):
+ return cls._get_data("project_name")
+
+ @classmethod
+ def set_project_name(cls, project_name):
+ cls._save_data("project_name", project_name)
+
+ @classmethod
+ def get_data_to_store(cls):
+ return {
+ "project_name": cls.get_project_name(),
+ "instances": cls.get_instances(),
+ "context": cls.get_context_data(),
+ }
+
+
+def list_instances():
+ return HostContext.get_instances()
+
+
+def update_instances(update_list):
+ updated_instances = {}
+ for instance, _changes in update_list:
+ updated_instances[instance.id] = instance.data_to_store()
+
+ instances = HostContext.get_instances()
+ for instance_data in instances:
+ instance_id = instance_data["instance_id"]
+ if instance_id in updated_instances:
+ new_instance_data = updated_instances[instance_id]
+ old_keys = set(instance_data.keys())
+ new_keys = set(new_instance_data.keys())
+ instance_data.update(new_instance_data)
+ for key in (old_keys - new_keys):
+ instance_data.pop(key)
+
+ HostContext.save_instances(instances)
+
+
+def remove_instances(instances):
+ if not isinstance(instances, (tuple, list)):
+ instances = [instances]
+
+ current_instances = HostContext.get_instances()
+ for instance in instances:
+ instance_id = instance.data["instance_id"]
+ found_idx = None
+ for idx, _instance in enumerate(current_instances):
+ if instance_id == _instance["instance_id"]:
+ found_idx = idx
+ break
+
+ if found_idx is not None:
+ current_instances.pop(found_idx)
+ HostContext.save_instances(current_instances)
+
+
+def get_context_data():
+ return HostContext.get_context_data()
+
+
+def update_context_data(data, changes):
+ HostContext.save_context_data(data)
+
+
+def get_context_title():
+ return HostContext.get_project_name()
+
+
+def ls():
+ """Probably will never return loaded containers."""
+ return []
+
+
+def install():
+ """This is called before a project is known.
+
+ Project is defined with 'set_project_name'.
+ """
+ os.environ["AVALON_APP"] = "traypublisher"
+
+ pyblish.api.register_host("traypublisher")
+ pyblish.api.register_plugin_path(PUBLISH_PATH)
+ avalon.api.register_plugin_path(BaseCreator, CREATE_PATH)
+
+
+def set_project_name(project_name):
+ # TODO Deregister project specific plugins and register new project plugins
+ os.environ["AVALON_PROJECT"] = project_name
+ avalon.api.Session["AVALON_PROJECT"] = project_name
+ io.install()
+ HostContext.set_project_name(project_name)
diff --git a/openpype/hosts/traypublisher/plugins/create/create_workfile.py b/openpype/hosts/traypublisher/plugins/create/create_workfile.py
new file mode 100644
index 0000000000..2db4770bbc
--- /dev/null
+++ b/openpype/hosts/traypublisher/plugins/create/create_workfile.py
@@ -0,0 +1,97 @@
+from openpype.hosts.traypublisher.api import pipeline
+from openpype.pipeline import (
+ Creator,
+ CreatedInstance,
+ lib
+)
+
+
+class WorkfileCreator(Creator):
+ identifier = "workfile"
+ label = "Workfile"
+ family = "workfile"
+ description = "Publish backup of workfile"
+
+ create_allow_context_change = True
+
+ extensions = [
+ # Maya
+ ".ma", ".mb",
+ # Nuke
+ ".nk",
+ # Hiero
+ ".hrox",
+ # Houdini
+ ".hip", ".hiplc", ".hipnc",
+ # Blender
+ ".blend",
+ # Celaction
+ ".scn",
+ # TVPaint
+ ".tvpp",
+ # Fusion
+ ".comp",
+ # Harmony
+ ".zip",
+ # Premiere
+ ".prproj",
+ # Resolve
+ ".drp",
+ # Photoshop
+ ".psd", ".psb",
+ # Aftereffects
+ ".aep"
+ ]
+
+ def get_icon(self):
+ return "fa.file"
+
+ def collect_instances(self):
+ for instance_data in pipeline.list_instances():
+ creator_id = instance_data.get("creator_identifier")
+ if creator_id == self.identifier:
+ instance = CreatedInstance.from_existing(
+ instance_data, self
+ )
+ self._add_instance_to_context(instance)
+
+ def update_instances(self, update_list):
+ pipeline.update_instances(update_list)
+
+ def remove_instances(self, instances):
+ pipeline.remove_instances(instances)
+ for instance in instances:
+ self._remove_instance_from_context(instance)
+
+ def create(self, subset_name, data, pre_create_data):
+ # Pass precreate data to creator attributes
+ data["creator_attributes"] = pre_create_data
+ # Create new instance
+ new_instance = CreatedInstance(self.family, subset_name, data, self)
+ # Host implementation of storing metadata about instance
+ pipeline.HostContext.add_instance(new_instance.data_to_store())
+ # Add instance to current context
+ self._add_instance_to_context(new_instance)
+
+ def get_default_variants(self):
+ return [
+ "Main"
+ ]
+
+ def get_instance_attr_defs(self):
+ output = [
+ lib.FileDef(
+ "filepath",
+ folders=False,
+ extensions=self.extensions,
+ label="Filepath"
+ )
+ ]
+ return output
+
+ def get_pre_create_attr_defs(self):
+ # Use same attributes as for instance attrobites
+ return self.get_instance_attr_defs()
+
+ def get_detail_description(self):
+ return """# Publish workfile backup"""
diff --git a/openpype/hosts/traypublisher/plugins/publish/collect_source.py b/openpype/hosts/traypublisher/plugins/publish/collect_source.py
new file mode 100644
index 0000000000..6ff22be13a
--- /dev/null
+++ b/openpype/hosts/traypublisher/plugins/publish/collect_source.py
@@ -0,0 +1,24 @@
+import pyblish.api
+
+
+class CollectSource(pyblish.api.ContextPlugin):
+ """Collecting instances from traypublisher host."""
+
+ label = "Collect source"
+ order = pyblish.api.CollectorOrder - 0.49
+ hosts = ["traypublisher"]
+
+ def process(self, context):
+ # get json paths from os and load them
+ source_name = "traypublisher"
+ for instance in context:
+ source = instance.data.get("source")
+ if not source:
+ instance.data["source"] = source_name
+ self.log.info((
+ "Source of instance \"{}\" is changed to \"{}\""
+ ).format(instance.data["name"], source_name))
+ else:
+ self.log.info((
+ "Source of instance \"{}\" was already set to \"{}\""
+ ).format(instance.data["name"], source))
diff --git a/openpype/hosts/traypublisher/plugins/publish/collect_workfile.py b/openpype/hosts/traypublisher/plugins/publish/collect_workfile.py
new file mode 100644
index 0000000000..d48bace047
--- /dev/null
+++ b/openpype/hosts/traypublisher/plugins/publish/collect_workfile.py
@@ -0,0 +1,31 @@
+import os
+import pyblish.api
+
+
+class CollectWorkfile(pyblish.api.InstancePlugin):
+ """Collect representation of workfile instances."""
+
+ label = "Collect Workfile"
+ order = pyblish.api.CollectorOrder - 0.49
+ families = ["workfile"]
+ hosts = ["traypublisher"]
+
+ def process(self, instance):
+ if "representations" not in instance.data:
+ instance.data["representations"] = []
+ repres = instance.data["representations"]
+
+ creator_attributes = instance.data["creator_attributes"]
+ filepath = creator_attributes["filepath"]
+ instance.data["sourceFilepath"] = filepath
+
+ staging_dir = os.path.dirname(filepath)
+ filename = os.path.basename(filepath)
+ ext = os.path.splitext(filename)[-1]
+
+ repres.append({
+ "ext": ext,
+ "name": ext,
+ "stagingDir": staging_dir,
+ "files": filename
+ })
diff --git a/openpype/hosts/traypublisher/plugins/publish/validate_workfile.py b/openpype/hosts/traypublisher/plugins/publish/validate_workfile.py
new file mode 100644
index 0000000000..88339d2aac
--- /dev/null
+++ b/openpype/hosts/traypublisher/plugins/publish/validate_workfile.py
@@ -0,0 +1,24 @@
+import os
+import pyblish.api
+from openpype.pipeline import PublishValidationError
+
+
+class ValidateWorkfilePath(pyblish.api.InstancePlugin):
+ """Validate existence of workfile instance existence."""
+
+ label = "Collect Workfile"
+ order = pyblish.api.ValidatorOrder - 0.49
+ families = ["workfile"]
+ hosts = ["traypublisher"]
+
+ def process(self, instance):
+ filepath = instance.data["sourceFilepath"]
+ if not filepath:
+ raise PublishValidationError((
+ "Filepath of 'workfile' instance \"{}\" is not set"
+ ).format(instance.data["name"]))
+
+ if not os.path.exists(filepath):
+ raise PublishValidationError((
+ "Filepath of 'workfile' instance \"{}\" does not exist: {}"
+ ).format(instance.data["name"], filepath))
diff --git a/openpype/hosts/tvpaint/api/communication_server.py b/openpype/hosts/tvpaint/api/communication_server.py
index e9c5f4c73e..65cb9aa2f3 100644
--- a/openpype/hosts/tvpaint/api/communication_server.py
+++ b/openpype/hosts/tvpaint/api/communication_server.py
@@ -21,7 +21,7 @@ from aiohttp_json_rpc.protocol import (
)
from aiohttp_json_rpc.exceptions import RpcError
-from avalon import api
+from openpype.lib import emit_event
from openpype.hosts.tvpaint.tvpaint_plugin import get_plugin_files_path
log = logging.getLogger(__name__)
@@ -754,7 +754,7 @@ class BaseCommunicator:
self._on_client_connect()
- api.emit("application.launched")
+ emit_event("application.launched")
def _on_client_connect(self):
self._initial_textfile_write()
@@ -938,5 +938,5 @@ class QtCommunicator(BaseCommunicator):
def _exit(self, *args, **kwargs):
super()._exit(*args, **kwargs)
- api.emit("application.exit")
+ emit_event("application.exit")
self.qt_app.exit(self.exit_code)
diff --git a/openpype/hosts/tvpaint/api/pipeline.py b/openpype/hosts/tvpaint/api/pipeline.py
index 74eb41892c..46c9d3a1dd 100644
--- a/openpype/hosts/tvpaint/api/pipeline.py
+++ b/openpype/hosts/tvpaint/api/pipeline.py
@@ -14,6 +14,12 @@ from avalon.pipeline import AVALON_CONTAINER_ID
from openpype.hosts import tvpaint
from openpype.api import get_current_project_settings
+from openpype.lib import register_event_callback
+from openpype.pipeline import (
+ LegacyCreator,
+ register_loader_plugin_path,
+ deregister_loader_plugin_path,
+)
from .lib import (
execute_george,
@@ -75,8 +81,8 @@ def install():
pyblish.api.register_host("tvpaint")
pyblish.api.register_plugin_path(PUBLISH_PATH)
- avalon.api.register_plugin_path(avalon.api.Loader, LOAD_PATH)
- avalon.api.register_plugin_path(avalon.api.Creator, CREATE_PATH)
+ register_loader_plugin_path(LOAD_PATH)
+ avalon.api.register_plugin_path(LegacyCreator, CREATE_PATH)
registered_callbacks = (
pyblish.api.registered_callbacks().get("instanceToggled") or []
@@ -84,8 +90,8 @@ def install():
if on_instance_toggle not in registered_callbacks:
pyblish.api.register_callback("instanceToggled", on_instance_toggle)
- avalon.api.on("application.launched", initial_launch)
- avalon.api.on("application.exit", application_exit)
+ register_event_callback("application.launched", initial_launch)
+ register_event_callback("application.exit", application_exit)
def uninstall():
@@ -97,8 +103,8 @@ def uninstall():
log.info("OpenPype - Uninstalling TVPaint integration")
pyblish.api.deregister_host("tvpaint")
pyblish.api.deregister_plugin_path(PUBLISH_PATH)
- avalon.api.deregister_plugin_path(avalon.api.Loader, LOAD_PATH)
- avalon.api.deregister_plugin_path(avalon.api.Creator, CREATE_PATH)
+ deregister_loader_plugin_path(LOAD_PATH)
+ avalon.api.deregister_plugin_path(LegacyCreator, CREATE_PATH)
def containerise(
diff --git a/openpype/hosts/tvpaint/api/plugin.py b/openpype/hosts/tvpaint/api/plugin.py
index af80c9eae2..15ad8905e0 100644
--- a/openpype/hosts/tvpaint/api/plugin.py
+++ b/openpype/hosts/tvpaint/api/plugin.py
@@ -1,16 +1,17 @@
import re
import uuid
-import avalon.api
-
-from openpype.api import PypeCreatorMixin
+from openpype.pipeline import (
+ LegacyCreator,
+ LoaderPlugin,
+)
from openpype.hosts.tvpaint.api import (
pipeline,
lib
)
-class Creator(PypeCreatorMixin, avalon.api.Creator):
+class Creator(LegacyCreator):
def __init__(self, *args, **kwargs):
super(Creator, self).__init__(*args, **kwargs)
# Add unified identifier created with `uuid` module
@@ -74,7 +75,7 @@ class Creator(PypeCreatorMixin, avalon.api.Creator):
self.write_instances(data)
-class Loader(avalon.api.Loader):
+class Loader(LoaderPlugin):
hosts = ["tvpaint"]
@staticmethod
diff --git a/openpype/hosts/tvpaint/plugins/create/create_render_layer.py b/openpype/hosts/tvpaint/plugins/create/create_render_layer.py
index 40a7d15990..c1af9632b1 100644
--- a/openpype/hosts/tvpaint/plugins/create/create_render_layer.py
+++ b/openpype/hosts/tvpaint/plugins/create/create_render_layer.py
@@ -1,5 +1,4 @@
-from avalon.api import CreatorError
-
+from openpype.pipeline import CreatorError
from openpype.lib import prepare_template_data
from openpype.hosts.tvpaint.api import (
plugin,
diff --git a/openpype/hosts/tvpaint/plugins/create/create_render_pass.py b/openpype/hosts/tvpaint/plugins/create/create_render_pass.py
index af962052fc..a7f717ccec 100644
--- a/openpype/hosts/tvpaint/plugins/create/create_render_pass.py
+++ b/openpype/hosts/tvpaint/plugins/create/create_render_pass.py
@@ -1,4 +1,4 @@
-from avalon.api import CreatorError
+from openpype.pipeline import CreatorError
from openpype.lib import prepare_template_data
from openpype.hosts.tvpaint.api import (
plugin,
diff --git a/openpype/hosts/tvpaint/plugins/load/load_image.py b/openpype/hosts/tvpaint/plugins/load/load_image.py
index 7dba1e3619..f861d0119e 100644
--- a/openpype/hosts/tvpaint/plugins/load/load_image.py
+++ b/openpype/hosts/tvpaint/plugins/load/load_image.py
@@ -1,4 +1,4 @@
-from avalon.vendor import qargparse
+import qargparse
from openpype.hosts.tvpaint.api import lib, plugin
diff --git a/openpype/hosts/tvpaint/plugins/load/load_reference_image.py b/openpype/hosts/tvpaint/plugins/load/load_reference_image.py
index 0a85e5dc76..5e4e3965d2 100644
--- a/openpype/hosts/tvpaint/plugins/load/load_reference_image.py
+++ b/openpype/hosts/tvpaint/plugins/load/load_reference_image.py
@@ -1,6 +1,6 @@
import collections
+import qargparse
from avalon.pipeline import get_representation_context
-from avalon.vendor import qargparse
from openpype.hosts.tvpaint.api import lib, pipeline, plugin
diff --git a/openpype/hosts/tvpaint/plugins/publish/help/validate_asset_name.xml b/openpype/hosts/tvpaint/plugins/publish/help/validate_asset_name.xml
new file mode 100644
index 0000000000..33a9ca4247
--- /dev/null
+++ b/openpype/hosts/tvpaint/plugins/publish/help/validate_asset_name.xml
@@ -0,0 +1,22 @@
+
+
+
+Subset context
+## Invalid subset context
+
+Context of the given subset doesn't match your current scene.
+
+### How to repair?
+
+Yout can fix this with "Repair" button on the right. This will use '{expected_asset}' asset name and overwrite '{found_asset}' asset name in scene metadata.
+
+After that restart publishing with Reload button.
+
+
+### How could this happen?
+
+The subset was created in different scene with different context
+or the scene file was copy pasted from different context.
+
+
+
diff --git a/openpype/hosts/tvpaint/plugins/publish/help/validate_duplicated_layer_names.xml b/openpype/hosts/tvpaint/plugins/publish/help/validate_duplicated_layer_names.xml
new file mode 100644
index 0000000000..5d798544c0
--- /dev/null
+++ b/openpype/hosts/tvpaint/plugins/publish/help/validate_duplicated_layer_names.xml
@@ -0,0 +1,22 @@
+
+
+
+Layer names
+## Duplicated layer names
+
+Can't determine which layers should be published because there are duplicated layer names in the scene.
+
+### Duplicated layer names
+
+{layer_names}
+
+*Check layer names for all subsets in list on left side.*
+
+### How to repair?
+
+Hide/rename/remove layers that should not be published.
+
+If all of them should be published then you have duplicated subset names in the scene. In that case you have to recrete them and use different variant name.
+
+
+
diff --git a/openpype/hosts/tvpaint/plugins/publish/help/validate_layers_visibility.xml b/openpype/hosts/tvpaint/plugins/publish/help/validate_layers_visibility.xml
new file mode 100644
index 0000000000..e7be735888
--- /dev/null
+++ b/openpype/hosts/tvpaint/plugins/publish/help/validate_layers_visibility.xml
@@ -0,0 +1,20 @@
+
+
+
+Layers visiblity
+## All layers are not visible
+
+Layers visibility was changed during publishing which caused that all layers for subset "{instance_name}" are hidden.
+
+### Layer names for **{instance_name}**
+
+{layer_names}
+
+*Check layer names for all subsets in the list on the left side.*
+
+### How to repair?
+
+Reset publishing and do not change visibility of layers after hitting publish button.
+
+
+
diff --git a/openpype/hosts/tvpaint/plugins/publish/help/validate_marks.xml b/openpype/hosts/tvpaint/plugins/publish/help/validate_marks.xml
new file mode 100644
index 0000000000..f0e01ebaa7
--- /dev/null
+++ b/openpype/hosts/tvpaint/plugins/publish/help/validate_marks.xml
@@ -0,0 +1,21 @@
+
+
+
+Frame range
+## Invalid render frame range
+
+Scene frame range which will be rendered is defined by MarkIn and MarkOut. Expected frame range is {expected_frame_range} and current frame range is {current_frame_range}.
+
+It is also required that MarkIn and MarkOut are enabled in the scene. Their color is highlighted on timeline when are enabled.
+
+- MarkIn is {mark_in_enable_state}
+- MarkOut is {mark_out_enable_state}
+
+### How to repair?
+
+Yout can fix this with "Repair" button on the right. That will change MarkOut to {expected_mark_out}.
+
+Or you can manually modify MarkIn and MarkOut in the scene timeline.
+
+
+
diff --git a/openpype/hosts/tvpaint/plugins/publish/help/validate_missing_layer_names.xml b/openpype/hosts/tvpaint/plugins/publish/help/validate_missing_layer_names.xml
new file mode 100644
index 0000000000..e96e7c5044
--- /dev/null
+++ b/openpype/hosts/tvpaint/plugins/publish/help/validate_missing_layer_names.xml
@@ -0,0 +1,18 @@
+
+
+
+Missing layers
+## Missing layers for render pass
+
+Render pass subset "{instance_name}" has stored layer names that belong to it's rendering scope but layers were not found in scene.
+
+### Missing layer names
+
+{layer_names}
+
+### How to repair?
+
+Find layers that belong to subset {instance_name} and rename them back to expected layer names or remove the subset and create new with right layers.
+
+
+
diff --git a/openpype/hosts/tvpaint/plugins/publish/help/validate_render_pass_group.xml b/openpype/hosts/tvpaint/plugins/publish/help/validate_render_pass_group.xml
new file mode 100644
index 0000000000..df7bdf36e5
--- /dev/null
+++ b/openpype/hosts/tvpaint/plugins/publish/help/validate_render_pass_group.xml
@@ -0,0 +1,14 @@
+
+
+
+Render pass group
+## Invalid group of Render Pass layers
+
+Layers of Render Pass {instance_name} belong to Render Group which is defined by TVPaint color group {expected_group}. But the layers are not in the group.
+
+### How to repair?
+
+Change the color group to {expected_group} on layers {layer_names}.
+
+
+
diff --git a/openpype/hosts/tvpaint/plugins/publish/help/validate_scene_settings.xml b/openpype/hosts/tvpaint/plugins/publish/help/validate_scene_settings.xml
new file mode 100644
index 0000000000..f741c71456
--- /dev/null
+++ b/openpype/hosts/tvpaint/plugins/publish/help/validate_scene_settings.xml
@@ -0,0 +1,26 @@
+
+
+
+Scene settings
+## Invalid scene settings
+
+Scene settings do not match to expected values.
+
+**FPS**
+- Expected value: {expected_fps}
+- Current value: {current_fps}
+
+**Resolution**
+- Expected value: {expected_width}x{expected_height}
+- Current value: {current_width}x{current_height}
+
+**Pixel ratio**
+- Expected value: {expected_pixel_ratio}
+- Current value: {current_pixel_ratio}
+
+### How to repair?
+
+FPS and Pixel ratio can be modified in scene setting. Wrong resolution can be fixed with changing resolution of scene but due to TVPaint limitations it is possible that you will need to create new scene.
+
+
+
diff --git a/openpype/hosts/tvpaint/plugins/publish/help/validate_start_frame.xml b/openpype/hosts/tvpaint/plugins/publish/help/validate_start_frame.xml
new file mode 100644
index 0000000000..9052abf66c
--- /dev/null
+++ b/openpype/hosts/tvpaint/plugins/publish/help/validate_start_frame.xml
@@ -0,0 +1,14 @@
+
+
+
+First frame
+## MarkIn is not set to 0
+
+MarkIn in your scene must start from 0 fram index but MarkIn is set to {current_start_frame}.
+
+### How to repair?
+
+You can modify MarkIn manually or hit the "Repair" button on the right which will change MarkIn to 0 (does not change MarkOut).
+
+
+
diff --git a/openpype/hosts/tvpaint/plugins/publish/help/validate_workfile_metadata.xml b/openpype/hosts/tvpaint/plugins/publish/help/validate_workfile_metadata.xml
new file mode 100644
index 0000000000..7397f6ef0b
--- /dev/null
+++ b/openpype/hosts/tvpaint/plugins/publish/help/validate_workfile_metadata.xml
@@ -0,0 +1,19 @@
+
+
+
+Missing metadata
+## Your scene miss context metadata
+
+Your scene does not contain metadata about {missing_metadata}.
+
+### How to repair?
+
+Resave the scene using Workfiles tool or hit the "Repair" button on the right.
+
+
+### How this could happend?
+
+You're using scene file that was not created using Workfiles tool.
+
+
+
diff --git a/openpype/hosts/tvpaint/plugins/publish/help/validate_workfile_project_name.xml b/openpype/hosts/tvpaint/plugins/publish/help/validate_workfile_project_name.xml
new file mode 100644
index 0000000000..c4ffafc8b5
--- /dev/null
+++ b/openpype/hosts/tvpaint/plugins/publish/help/validate_workfile_project_name.xml
@@ -0,0 +1,24 @@
+
+
+
+Project name
+## Your scene is from different project
+
+It is not possible to publish into project "{workfile_project_name}" when TVPaint was opened with project "{env_project_name}" in context.
+
+### How to repair?
+
+If the workfile belongs to project "{env_project_name}" then use Workfiles tool to resave it.
+
+Otherwise close TVPaint and launch it again from project you want to publish in.
+
+
+### How this could happend?
+
+You've opened workfile from different project. You've opened TVPaint on a task from "{env_project_name}" then you've opened TVPaint again on task from "{workfile_project_name}" without closing the TVPaint. Because TVPaint can run only once the project didn't change.
+
+### Why it is important?
+Because project may affect how TVPaint works or change publishing behavior it is dangerous to allow change project context in many ways. For example publishing will not run as expected.
+
+
+
diff --git a/openpype/hosts/tvpaint/plugins/publish/validate_asset_name.py b/openpype/hosts/tvpaint/plugins/publish/validate_asset_name.py
index 0fdeba0a21..70816f9f18 100644
--- a/openpype/hosts/tvpaint/plugins/publish/validate_asset_name.py
+++ b/openpype/hosts/tvpaint/plugins/publish/validate_asset_name.py
@@ -1,4 +1,5 @@
import pyblish.api
+from openpype.pipeline import PublishXmlValidationError
from openpype.hosts.tvpaint.api import pipeline
@@ -27,7 +28,7 @@ class FixAssetNames(pyblish.api.Action):
pipeline._write_instances(new_instance_items)
-class ValidateMissingLayers(pyblish.api.ContextPlugin):
+class ValidateAssetNames(pyblish.api.ContextPlugin):
"""Validate assset name present on instance.
Asset name on instance should be the same as context's.
@@ -48,8 +49,18 @@ class ValidateMissingLayers(pyblish.api.ContextPlugin):
instance_label = (
instance.data.get("label") or instance.data["name"]
)
- raise AssertionError((
- "Different asset name on instance then context's."
- " Instance \"{}\" has asset name: \"{}\""
- " Context asset name is: \"{}\""
- ).format(instance_label, asset_name, context_asset_name))
+
+ raise PublishXmlValidationError(
+ self,
+ (
+ "Different asset name on instance then context's."
+ " Instance \"{}\" has asset name: \"{}\""
+ " Context asset name is: \"{}\""
+ ).format(
+ instance_label, asset_name, context_asset_name
+ ),
+ formatting_data={
+ "expected_asset": context_asset_name,
+ "found_asset": asset_name
+ }
+ )
diff --git a/openpype/hosts/tvpaint/plugins/publish/validate_duplicated_layer_names.py b/openpype/hosts/tvpaint/plugins/publish/validate_duplicated_layer_names.py
index efccf19ef9..9f61bdbcd0 100644
--- a/openpype/hosts/tvpaint/plugins/publish/validate_duplicated_layer_names.py
+++ b/openpype/hosts/tvpaint/plugins/publish/validate_duplicated_layer_names.py
@@ -1,4 +1,5 @@
import pyblish.api
+from openpype.pipeline import PublishXmlValidationError
class ValidateLayersGroup(pyblish.api.InstancePlugin):
@@ -30,14 +31,20 @@ class ValidateLayersGroup(pyblish.api.InstancePlugin):
"\"{}\"".format(layer_name)
for layer_name in duplicated_layer_names
])
-
- # Raise an error
- raise AssertionError(
+ detail_lines = [
+ "- {}".format(layer_name)
+ for layer_name in set(duplicated_layer_names)
+ ]
+ raise PublishXmlValidationError(
+ self,
(
"Layers have duplicated names for instance {}."
# Description what's wrong
" There are layers with same name and one of them is marked"
" for publishing so it is not possible to know which should"
" be published. Please look for layers with names: {}"
- ).format(instance.data["label"], layers_msg)
+ ).format(instance.data["label"], layers_msg),
+ formatting_data={
+ "layer_names": " ".join(detail_lines)
+ }
)
diff --git a/openpype/hosts/tvpaint/plugins/publish/validate_layers_visibility.py b/openpype/hosts/tvpaint/plugins/publish/validate_layers_visibility.py
index 74ef34169e..7ea0587b8f 100644
--- a/openpype/hosts/tvpaint/plugins/publish/validate_layers_visibility.py
+++ b/openpype/hosts/tvpaint/plugins/publish/validate_layers_visibility.py
@@ -1,6 +1,8 @@
import pyblish.api
+from openpype.pipeline import PublishXmlValidationError
+# TODO @iLLiCiTiT add repair action to disable instances?
class ValidateLayersVisiblity(pyblish.api.InstancePlugin):
"""Validate existence of renderPass layers."""
@@ -9,8 +11,26 @@ class ValidateLayersVisiblity(pyblish.api.InstancePlugin):
families = ["review", "renderPass", "renderLayer"]
def process(self, instance):
+ layer_names = set()
for layer in instance.data["layers"]:
+ layer_names.add(layer["name"])
if layer["visible"]:
return
- raise AssertionError("All layers of instance are not visible.")
+ instance_label = (
+ instance.data.get("label") or instance.data["name"]
+ )
+
+ raise PublishXmlValidationError(
+ self,
+ "All layers of instance \"{}\" are not visible.".format(
+ instance_label
+ ),
+ formatting_data={
+ "instance_name": instance_label,
+ "layer_names": " ".join([
+ "- {}".format(layer_name)
+ for layer_name in layer_names
+ ])
+ }
+ )
diff --git a/openpype/hosts/tvpaint/plugins/publish/validate_marks.py b/openpype/hosts/tvpaint/plugins/publish/validate_marks.py
index f45247ceac..d1f299e006 100644
--- a/openpype/hosts/tvpaint/plugins/publish/validate_marks.py
+++ b/openpype/hosts/tvpaint/plugins/publish/validate_marks.py
@@ -1,6 +1,7 @@
import json
import pyblish.api
+from openpype.pipeline import PublishXmlValidationError
from openpype.hosts.tvpaint.api import lib
@@ -73,9 +74,34 @@ class ValidateMarks(pyblish.api.ContextPlugin):
"expected": expected_data[k]
}
- if invalid:
- raise AssertionError(
- "Marks does not match database:\n{}".format(
- json.dumps(invalid, sort_keys=True, indent=4)
- )
- )
+ # Validation ends
+ if not invalid:
+ return
+
+ current_frame_range = (
+ (current_data["markOut"] - current_data["markIn"]) + 1
+ )
+ expected_frame_range = (
+ (expected_data["markOut"] - expected_data["markIn"]) + 1
+ )
+ mark_in_enable_state = "disabled"
+ if current_data["markInState"]:
+ mark_in_enable_state = "enabled"
+
+ mark_out_enable_state = "disabled"
+ if current_data["markOutState"]:
+ mark_out_enable_state = "enabled"
+
+ raise PublishXmlValidationError(
+ self,
+ "Marks does not match database:\n{}".format(
+ json.dumps(invalid, sort_keys=True, indent=4)
+ ),
+ formatting_data={
+ "current_frame_range": str(current_frame_range),
+ "expected_frame_range": str(expected_frame_range),
+ "mark_in_enable_state": mark_in_enable_state,
+ "mark_out_enable_state": mark_out_enable_state,
+ "expected_mark_out": expected_data["markOut"]
+ }
+ )
diff --git a/openpype/hosts/tvpaint/plugins/publish/validate_missing_layer_names.py b/openpype/hosts/tvpaint/plugins/publish/validate_missing_layer_names.py
index db9d354fcd..294ce6cf4f 100644
--- a/openpype/hosts/tvpaint/plugins/publish/validate_missing_layer_names.py
+++ b/openpype/hosts/tvpaint/plugins/publish/validate_missing_layer_names.py
@@ -1,4 +1,5 @@
import pyblish.api
+from openpype.pipeline import PublishXmlValidationError
class ValidateMissingLayers(pyblish.api.InstancePlugin):
@@ -30,13 +31,25 @@ class ValidateMissingLayers(pyblish.api.InstancePlugin):
"\"{}\"".format(layer_name)
for layer_name in missing_layer_names
])
+ instance_label = (
+ instance.data.get("label") or instance.data["name"]
+ )
+ description_layer_names = " ".join([
+ "- {}".format(layer_name)
+ for layer_name in missing_layer_names
+ ])
# Raise an error
- raise AssertionError(
+ raise PublishXmlValidationError(
+ self,
(
"Layers were not found by name for instance \"{}\"."
# Description what's wrong
" Layer names marked for publishing are not available"
" in layers list. Missing layer names: {}"
- ).format(instance.data["label"], layers_msg)
+ ).format(instance.data["label"], layers_msg),
+ formatting_data={
+ "instance_name": instance_label,
+ "layer_names": description_layer_names
+ }
)
diff --git a/openpype/hosts/tvpaint/plugins/publish/validate_project_settings.py b/openpype/hosts/tvpaint/plugins/publish/validate_project_settings.py
deleted file mode 100644
index 84c03a9857..0000000000
--- a/openpype/hosts/tvpaint/plugins/publish/validate_project_settings.py
+++ /dev/null
@@ -1,34 +0,0 @@
-import json
-
-import pyblish.api
-
-
-class ValidateProjectSettings(pyblish.api.ContextPlugin):
- """Validate project settings against database.
- """
-
- label = "Validate Project Settings"
- order = pyblish.api.ValidatorOrder
- optional = True
-
- def process(self, context):
- scene_data = {
- "fps": context.data.get("sceneFps"),
- "resolutionWidth": context.data.get("sceneWidth"),
- "resolutionHeight": context.data.get("sceneHeight"),
- "pixelAspect": context.data.get("scenePixelAspect")
- }
- invalid = {}
- for k in scene_data.keys():
- expected_value = context.data["assetEntity"]["data"][k]
- if scene_data[k] != expected_value:
- invalid[k] = {
- "current": scene_data[k], "expected": expected_value
- }
-
- if invalid:
- raise AssertionError(
- "Project settings does not match database:\n{}".format(
- json.dumps(invalid, sort_keys=True, indent=4)
- )
- )
diff --git a/openpype/hosts/tvpaint/plugins/publish/validate_render_pass_group.py b/openpype/hosts/tvpaint/plugins/publish/validate_render_pass_group.py
index 5047b8d729..0fbfca6c56 100644
--- a/openpype/hosts/tvpaint/plugins/publish/validate_render_pass_group.py
+++ b/openpype/hosts/tvpaint/plugins/publish/validate_render_pass_group.py
@@ -1,5 +1,6 @@
import collections
import pyblish.api
+from openpype.pipeline import PublishXmlValidationError
class ValidateLayersGroup(pyblish.api.InstancePlugin):
@@ -26,11 +27,13 @@ class ValidateLayersGroup(pyblish.api.InstancePlugin):
layer_names = instance.data["layer_names"]
# Check if all layers from render pass are in right group
invalid_layers_by_group_id = collections.defaultdict(list)
+ invalid_layer_names = set()
for layer_name in layer_names:
layer = layers_by_name.get(layer_name)
_group_id = layer["group_id"]
if _group_id != group_id:
invalid_layers_by_group_id[_group_id].append(layer)
+ invalid_layer_names.add(layer_name)
# Everything is OK and skip exception
if not invalid_layers_by_group_id:
@@ -61,16 +64,27 @@ class ValidateLayersGroup(pyblish.api.InstancePlugin):
)
# Raise an error
- raise AssertionError((
- # Short message
- "Layers in wrong group."
- # Description what's wrong
- " Layers from render pass \"{}\" must be in group {} (id: {})."
- # Detailed message
- " Layers in wrong group: {}"
- ).format(
- instance.data["label"],
- correct_group["name"],
- correct_group["group_id"],
- " | ".join(per_group_msgs)
- ))
+ raise PublishXmlValidationError(
+ self,
+ (
+ # Short message
+ "Layers in wrong group."
+ # Description what's wrong
+ " Layers from render pass \"{}\" must be in group {} (id: {})."
+ # Detailed message
+ " Layers in wrong group: {}"
+ ).format(
+ instance.data["label"],
+ correct_group["name"],
+ correct_group["group_id"],
+ " | ".join(per_group_msgs)
+ ),
+ formatting_data={
+ "instance_name": (
+ instance.data.get("label") or instance.data["name"]
+ ),
+ "expected_group": correct_group["name"],
+ "layer_names": ", ".join(invalid_layer_names)
+
+ }
+ )
diff --git a/openpype/hosts/tvpaint/plugins/publish/validate_scene_settings.py b/openpype/hosts/tvpaint/plugins/publish/validate_scene_settings.py
new file mode 100644
index 0000000000..d235215ac9
--- /dev/null
+++ b/openpype/hosts/tvpaint/plugins/publish/validate_scene_settings.py
@@ -0,0 +1,49 @@
+import json
+
+import pyblish.api
+from openpype.pipeline import PublishXmlValidationError
+
+
+# TODO @iLliCiTiT add fix action for fps
+class ValidateProjectSettings(pyblish.api.ContextPlugin):
+ """Validate scene settings against database."""
+
+ label = "Validate Scene Settings"
+ order = pyblish.api.ValidatorOrder
+ optional = True
+
+ def process(self, context):
+ expected_data = context.data["assetEntity"]["data"]
+ scene_data = {
+ "fps": context.data.get("sceneFps"),
+ "resolutionWidth": context.data.get("sceneWidth"),
+ "resolutionHeight": context.data.get("sceneHeight"),
+ "pixelAspect": context.data.get("scenePixelAspect")
+ }
+ invalid = {}
+ for k in scene_data.keys():
+ expected_value = expected_data[k]
+ if scene_data[k] != expected_value:
+ invalid[k] = {
+ "current": scene_data[k], "expected": expected_value
+ }
+
+ if not invalid:
+ return
+
+ raise PublishXmlValidationError(
+ self,
+ "Scene settings does not match database:\n{}".format(
+ json.dumps(invalid, sort_keys=True, indent=4)
+ ),
+ formatting_data={
+ "expected_fps": expected_data["fps"],
+ "current_fps": scene_data["fps"],
+ "expected_width": expected_data["resolutionWidth"],
+ "expected_height": expected_data["resolutionHeight"],
+ "current_width": scene_data["resolutionWidth"],
+ "current_height": scene_data["resolutionWidth"],
+ "expected_pixel_ratio": expected_data["pixelAspect"],
+ "current_pixel_ratio": scene_data["pixelAspect"]
+ }
+ )
diff --git a/openpype/hosts/tvpaint/plugins/publish/validate_start_frame.py b/openpype/hosts/tvpaint/plugins/publish/validate_start_frame.py
index e2f8386757..ddc738c6ed 100644
--- a/openpype/hosts/tvpaint/plugins/publish/validate_start_frame.py
+++ b/openpype/hosts/tvpaint/plugins/publish/validate_start_frame.py
@@ -1,4 +1,5 @@
import pyblish.api
+from openpype.pipeline import PublishXmlValidationError
from openpype.hosts.tvpaint.api import lib
@@ -24,4 +25,13 @@ class ValidateStartFrame(pyblish.api.ContextPlugin):
def process(self, context):
start_frame = lib.execute_george("tv_startframe")
- assert int(start_frame) == 0, "Start frame has to be frame 0."
+ if start_frame == 0:
+ return
+
+ raise PublishXmlValidationError(
+ self,
+ "Start frame has to be frame 0.",
+ formatting_data={
+ "current_start_frame": start_frame
+ }
+ )
diff --git a/openpype/hosts/tvpaint/plugins/publish/validate_workfile_metadata.py b/openpype/hosts/tvpaint/plugins/publish/validate_workfile_metadata.py
index 48fbeedb59..eac345f395 100644
--- a/openpype/hosts/tvpaint/plugins/publish/validate_workfile_metadata.py
+++ b/openpype/hosts/tvpaint/plugins/publish/validate_workfile_metadata.py
@@ -1,4 +1,5 @@
import pyblish.api
+from openpype.pipeline import PublishXmlValidationError
from openpype.hosts.tvpaint.api import save_file
@@ -42,8 +43,12 @@ class ValidateWorkfileMetadata(pyblish.api.ContextPlugin):
missing_keys.append(key)
if missing_keys:
- raise AssertionError(
+ raise PublishXmlValidationError(
+ self,
"Current workfile is missing metadata about {}.".format(
", ".join(missing_keys)
- )
+ ),
+ formatting_data={
+ "missing_metadata": ", ".join(missing_keys)
+ }
)
diff --git a/openpype/hosts/tvpaint/plugins/publish/validate_workfile_project_name.py b/openpype/hosts/tvpaint/plugins/publish/validate_workfile_project_name.py
index cc664d8030..0f25f2f7be 100644
--- a/openpype/hosts/tvpaint/plugins/publish/validate_workfile_project_name.py
+++ b/openpype/hosts/tvpaint/plugins/publish/validate_workfile_project_name.py
@@ -1,5 +1,6 @@
import os
import pyblish.api
+from openpype.pipeline import PublishXmlValidationError
class ValidateWorkfileProjectName(pyblish.api.ContextPlugin):
@@ -31,15 +32,23 @@ class ValidateWorkfileProjectName(pyblish.api.ContextPlugin):
return
# Raise an error
- raise AssertionError((
- # Short message
- "Workfile from different Project ({})."
- # Description what's wrong
- " It is not possible to publish when TVPaint was launched in"
- "context of different project. Current context project is \"{}\"."
- " Launch TVPaint in context of project \"{}\" and then publish."
- ).format(
- workfile_project_name,
- env_project_name,
- workfile_project_name,
- ))
+ raise PublishXmlValidationError(
+ self,
+ (
+ # Short message
+ "Workfile from different Project ({})."
+ # Description what's wrong
+ " It is not possible to publish when TVPaint was launched in"
+ "context of different project. Current context project is"
+ " \"{}\". Launch TVPaint in context of project \"{}\""
+ " and then publish."
+ ).format(
+ workfile_project_name,
+ env_project_name,
+ workfile_project_name,
+ ),
+ formatting_data={
+ "workfile_project_name": workfile_project_name,
+ "expected_project_name": env_project_name
+ }
+ )
diff --git a/openpype/hosts/unreal/__init__.py b/openpype/hosts/unreal/__init__.py
index 1280442916..533f315df3 100644
--- a/openpype/hosts/unreal/__init__.py
+++ b/openpype/hosts/unreal/__init__.py
@@ -1,13 +1,15 @@
import os
+import openpype.hosts
def add_implementation_envs(env, _app):
"""Modify environments to contain all required for implementation."""
- # Set AVALON_UNREAL_PLUGIN required for Unreal implementation
+ # Set OPENPYPE_UNREAL_PLUGIN required for Unreal implementation
unreal_plugin_path = os.path.join(
- os.environ["OPENPYPE_REPOS_ROOT"], "repos", "avalon-unreal-integration"
+ os.path.dirname(os.path.abspath(openpype.hosts.__file__)),
+ "unreal", "integration"
)
- env["AVALON_UNREAL_PLUGIN"] = unreal_plugin_path
+ env["OPENPYPE_UNREAL_PLUGIN"] = unreal_plugin_path
# Set default environments if are not set via settings
defaults = {
diff --git a/openpype/hosts/unreal/api/__init__.py b/openpype/hosts/unreal/api/__init__.py
index 38469e0ddb..ede71aa218 100644
--- a/openpype/hosts/unreal/api/__init__.py
+++ b/openpype/hosts/unreal/api/__init__.py
@@ -1,45 +1,40 @@
-import os
-import logging
+# -*- coding: utf-8 -*-
+"""Unreal Editor OpenPype host API."""
-from avalon import api as avalon
-from pyblish import api as pyblish
-import openpype.hosts.unreal
+from .plugin import (
+ Loader,
+ Creator
+)
+from .pipeline import (
+ install,
+ uninstall,
+ ls,
+ publish,
+ containerise,
+ show_creator,
+ show_loader,
+ show_publisher,
+ show_manager,
+ show_experimental_tools,
+ show_tools_dialog,
+ show_tools_popup,
+ instantiate,
+)
-logger = logging.getLogger("openpype.hosts.unreal")
-
-HOST_DIR = os.path.dirname(os.path.abspath(openpype.hosts.unreal.__file__))
-PLUGINS_DIR = os.path.join(HOST_DIR, "plugins")
-PUBLISH_PATH = os.path.join(PLUGINS_DIR, "publish")
-LOAD_PATH = os.path.join(PLUGINS_DIR, "load")
-CREATE_PATH = os.path.join(PLUGINS_DIR, "create")
-INVENTORY_PATH = os.path.join(PLUGINS_DIR, "inventory")
-
-
-def install():
- """Install Unreal configuration for Avalon."""
- print("-=" * 40)
- logo = '''.
-.
- ____________
- / \\ __ \\
- \\ \\ \\/_\\ \\
- \\ \\ _____/ ______
- \\ \\ \\___// \\ \\
- \\ \\____\\ \\ \\_____\\
- \\/_____/ \\/______/ PYPE Club .
-.
-'''
- print(logo)
- print("installing OpenPype for Unreal ...")
- print("-=" * 40)
- logger.info("installing OpenPype for Unreal")
- pyblish.register_plugin_path(str(PUBLISH_PATH))
- avalon.register_plugin_path(avalon.Loader, str(LOAD_PATH))
- avalon.register_plugin_path(avalon.Creator, str(CREATE_PATH))
-
-
-def uninstall():
- """Uninstall Unreal configuration for Avalon."""
- pyblish.deregister_plugin_path(str(PUBLISH_PATH))
- avalon.deregister_plugin_path(avalon.Loader, str(LOAD_PATH))
- avalon.deregister_plugin_path(avalon.Creator, str(CREATE_PATH))
+__all__ = [
+ "install",
+ "uninstall",
+ "Creator",
+ "Loader",
+ "ls",
+ "publish",
+ "containerise",
+ "show_creator",
+ "show_loader",
+ "show_publisher",
+ "show_manager",
+ "show_experimental_tools",
+ "show_tools_dialog",
+ "show_tools_popup",
+ "instantiate"
+]
diff --git a/openpype/hosts/unreal/api/helpers.py b/openpype/hosts/unreal/api/helpers.py
new file mode 100644
index 0000000000..0b6f07f52f
--- /dev/null
+++ b/openpype/hosts/unreal/api/helpers.py
@@ -0,0 +1,44 @@
+# -*- coding: utf-8 -*-
+import unreal # noqa
+
+
+class OpenPypeUnrealException(Exception):
+ pass
+
+
+@unreal.uclass()
+class OpenPypeHelpers(unreal.OpenPypeLib):
+ """Class wrapping some useful functions for OpenPype.
+
+ This class is extending native BP class in OpenPype Integration Plugin.
+
+ """
+
+ @unreal.ufunction(params=[str, unreal.LinearColor, bool])
+ def set_folder_color(self, path: str, color: unreal.LinearColor) -> None:
+ """Set color on folder in Content Browser.
+
+ This method sets color on folder in Content Browser. Unfortunately
+ there is no way to refresh Content Browser so new color isn't applied
+ immediately. They are saved to config file and appears correctly
+ only after Editor is restarted.
+
+ Args:
+ path (str): Path to folder
+ color (:class:`unreal.LinearColor`): Color of the folder
+
+ Example:
+
+ OpenPypeHelpers().set_folder_color(
+ "/Game/Path", unreal.LinearColor(a=1.0, r=1.0, g=0.5, b=0)
+ )
+
+ Note:
+ This will take effect only after Editor is restarted. I couldn't
+ find a way to refresh it. Also this saves the color definition
+ into the project config, binding this path with color. So if you
+ delete this path and later re-create, it will set this color
+ again.
+
+ """
+ self.c_set_folder_color(path, color, False)
diff --git a/openpype/hosts/unreal/api/pipeline.py b/openpype/hosts/unreal/api/pipeline.py
new file mode 100644
index 0000000000..9ec11b942d
--- /dev/null
+++ b/openpype/hosts/unreal/api/pipeline.py
@@ -0,0 +1,418 @@
+# -*- coding: utf-8 -*-
+import os
+import logging
+from typing import List
+
+import pyblish.api
+from avalon.pipeline import AVALON_CONTAINER_ID
+from avalon import api
+
+from openpype.pipeline import (
+ LegacyCreator,
+ register_loader_plugin_path,
+ deregister_loader_plugin_path,
+)
+from openpype.tools.utils import host_tools
+import openpype.hosts.unreal
+
+import unreal # noqa
+
+
+logger = logging.getLogger("openpype.hosts.unreal")
+OPENPYPE_CONTAINERS = "OpenPypeContainers"
+
+HOST_DIR = os.path.dirname(os.path.abspath(openpype.hosts.unreal.__file__))
+PLUGINS_DIR = os.path.join(HOST_DIR, "plugins")
+PUBLISH_PATH = os.path.join(PLUGINS_DIR, "publish")
+LOAD_PATH = os.path.join(PLUGINS_DIR, "load")
+CREATE_PATH = os.path.join(PLUGINS_DIR, "create")
+INVENTORY_PATH = os.path.join(PLUGINS_DIR, "inventory")
+
+
+def install():
+ """Install Unreal configuration for OpenPype."""
+ print("-=" * 40)
+ logo = '''.
+.
+ ____________
+ / \\ __ \\
+ \\ \\ \\/_\\ \\
+ \\ \\ _____/ ______
+ \\ \\ \\___// \\ \\
+ \\ \\____\\ \\ \\_____\\
+ \\/_____/ \\/______/ PYPE Club .
+.
+'''
+ print(logo)
+ print("installing OpenPype for Unreal ...")
+ print("-=" * 40)
+ logger.info("installing OpenPype for Unreal")
+ pyblish.api.register_plugin_path(str(PUBLISH_PATH))
+ register_loader_plugin_path(str(LOAD_PATH))
+ api.register_plugin_path(LegacyCreator, str(CREATE_PATH))
+ _register_callbacks()
+ _register_events()
+
+
+def uninstall():
+ """Uninstall Unreal configuration for Avalon."""
+ pyblish.api.deregister_plugin_path(str(PUBLISH_PATH))
+ deregister_loader_plugin_path(str(LOAD_PATH))
+ api.deregister_plugin_path(LegacyCreator, str(CREATE_PATH))
+
+
+def _register_callbacks():
+ """
+ TODO: Implement callbacks if supported by UE4
+ """
+ pass
+
+
+def _register_events():
+ """
+ TODO: Implement callbacks if supported by UE4
+ """
+ pass
+
+
+class Creator(LegacyCreator):
+ hosts = ["unreal"]
+ asset_types = []
+
+ def process(self):
+ nodes = list()
+
+ with unreal.ScopedEditorTransaction("OpenPype Creating Instance"):
+ if (self.options or {}).get("useSelection"):
+ self.log.info("setting ...")
+ print("settings ...")
+ nodes = unreal.EditorUtilityLibrary.get_selected_assets()
+
+ asset_paths = [a.get_path_name() for a in nodes]
+ self.name = move_assets_to_path(
+ "/Game", self.name, asset_paths
+ )
+
+ instance = create_publish_instance("/Game", self.name)
+ imprint(instance, self.data)
+
+ return instance
+
+
+def ls():
+ """List all containers.
+
+ List all found in *Content Manager* of Unreal and return
+ metadata from them. Adding `objectName` to set.
+
+ """
+ ar = unreal.AssetRegistryHelpers.get_asset_registry()
+ openpype_containers = ar.get_assets_by_class("AssetContainer", True)
+
+ # get_asset_by_class returns AssetData. To get all metadata we need to
+ # load asset. get_tag_values() work only on metadata registered in
+ # Asset Registry Project settings (and there is no way to set it with
+ # python short of editing ini configuration file).
+ for asset_data in openpype_containers:
+ asset = asset_data.get_asset()
+ data = unreal.EditorAssetLibrary.get_metadata_tag_values(asset)
+ data["objectName"] = asset_data.asset_name
+ data = cast_map_to_str_dict(data)
+
+ yield data
+
+
+def parse_container(container):
+ """To get data from container, AssetContainer must be loaded.
+
+ Args:
+ container(str): path to container
+
+ Returns:
+ dict: metadata stored on container
+ """
+ asset = unreal.EditorAssetLibrary.load_asset(container)
+ data = unreal.EditorAssetLibrary.get_metadata_tag_values(asset)
+ data["objectName"] = asset.get_name()
+ data = cast_map_to_str_dict(data)
+
+ return data
+
+
+def publish():
+ """Shorthand to publish from within host."""
+ import pyblish.util
+
+ return pyblish.util.publish()
+
+
+def containerise(name, namespace, nodes, context, loader=None, suffix="_CON"):
+ """Bundles *nodes* (assets) into a *container* and add metadata to it.
+
+ Unreal doesn't support *groups* of assets that you can add metadata to.
+ But it does support folders that helps to organize asset. Unfortunately
+ those folders are just that - you cannot add any additional information
+ to them. OpenPype Integration Plugin is providing way out - Implementing
+ `AssetContainer` Blueprint class. This class when added to folder can
+ handle metadata on it using standard
+ :func:`unreal.EditorAssetLibrary.set_metadata_tag()` and
+ :func:`unreal.EditorAssetLibrary.get_metadata_tag_values()`. It also
+ stores and monitor all changes in assets in path where it resides. List of
+ those assets is available as `assets` property.
+
+ This is list of strings starting with asset type and ending with its path:
+ `Material /Game/OpenPype/Test/TestMaterial.TestMaterial`
+
+ """
+ # 1 - create directory for container
+ root = "/Game"
+ container_name = "{}{}".format(name, suffix)
+ new_name = move_assets_to_path(root, container_name, nodes)
+
+ # 2 - create Asset Container there
+ path = "{}/{}".format(root, new_name)
+ create_container(container=container_name, path=path)
+
+ namespace = path
+
+ data = {
+ "schema": "openpype:container-2.0",
+ "id": AVALON_CONTAINER_ID,
+ "name": new_name,
+ "namespace": namespace,
+ "loader": str(loader),
+ "representation": context["representation"]["_id"],
+ }
+ # 3 - imprint data
+ imprint("{}/{}".format(path, container_name), data)
+ return path
+
+
+def instantiate(root, name, data, assets=None, suffix="_INS"):
+ """Bundles *nodes* into *container*.
+
+ Marking it with metadata as publishable instance. If assets are provided,
+ they are moved to new path where `OpenPypePublishInstance` class asset is
+ created and imprinted with metadata.
+
+ This can then be collected for publishing by Pyblish for example.
+
+ Args:
+ root (str): root path where to create instance container
+ name (str): name of the container
+ data (dict): data to imprint on container
+ assets (list of str): list of asset paths to include in publish
+ instance
+ suffix (str): suffix string to append to instance name
+
+ """
+ container_name = "{}{}".format(name, suffix)
+
+ # if we specify assets, create new folder and move them there. If not,
+ # just create empty folder
+ if assets:
+ new_name = move_assets_to_path(root, container_name, assets)
+ else:
+ new_name = create_folder(root, name)
+
+ path = "{}/{}".format(root, new_name)
+ create_publish_instance(instance=container_name, path=path)
+
+ imprint("{}/{}".format(path, container_name), data)
+
+
+def imprint(node, data):
+ loaded_asset = unreal.EditorAssetLibrary.load_asset(node)
+ for key, value in data.items():
+ # Support values evaluated at imprint
+ if callable(value):
+ value = value()
+ # Unreal doesn't support NoneType in metadata values
+ if value is None:
+ value = ""
+ unreal.EditorAssetLibrary.set_metadata_tag(
+ loaded_asset, key, str(value)
+ )
+
+ with unreal.ScopedEditorTransaction("OpenPype containerising"):
+ unreal.EditorAssetLibrary.save_asset(node)
+
+
+def show_tools_popup():
+ """Show popup with tools.
+
+ Popup will disappear on click or loosing focus.
+ """
+ from openpype.hosts.unreal.api import tools_ui
+
+ tools_ui.show_tools_popup()
+
+
+def show_tools_dialog():
+ """Show dialog with tools.
+
+ Dialog will stay visible.
+ """
+ from openpype.hosts.unreal.api import tools_ui
+
+ tools_ui.show_tools_dialog()
+
+
+def show_creator():
+ host_tools.show_creator()
+
+
+def show_loader():
+ host_tools.show_loader(use_context=True)
+
+
+def show_publisher():
+ host_tools.show_publish()
+
+
+def show_manager():
+ host_tools.show_scene_inventory()
+
+
+def show_experimental_tools():
+ host_tools.show_experimental_tools_dialog()
+
+
+def create_folder(root: str, name: str) -> str:
+ """Create new folder.
+
+ If folder exists, append number at the end and try again, incrementing
+ if needed.
+
+ Args:
+ root (str): path root
+ name (str): folder name
+
+ Returns:
+ str: folder name
+
+ Example:
+ >>> create_folder("/Game/Foo")
+ /Game/Foo
+ >>> create_folder("/Game/Foo")
+ /Game/Foo1
+
+ """
+ eal = unreal.EditorAssetLibrary
+ index = 1
+ while True:
+ if eal.does_directory_exist("{}/{}".format(root, name)):
+ name = "{}{}".format(name, index)
+ index += 1
+ else:
+ eal.make_directory("{}/{}".format(root, name))
+ break
+
+ return name
+
+
+def move_assets_to_path(root: str, name: str, assets: List[str]) -> str:
+ """Moving (renaming) list of asset paths to new destination.
+
+ Args:
+ root (str): root of the path (eg. `/Game`)
+ name (str): name of destination directory (eg. `Foo` )
+ assets (list of str): list of asset paths
+
+ Returns:
+ str: folder name
+
+ Example:
+ This will get paths of all assets under `/Game/Test` and move them
+ to `/Game/NewTest`. If `/Game/NewTest` already exists, then resulting
+ path will be `/Game/NewTest1`
+
+ >>> assets = unreal.EditorAssetLibrary.list_assets("/Game/Test")
+ >>> move_assets_to_path("/Game", "NewTest", assets)
+ NewTest
+
+ """
+ eal = unreal.EditorAssetLibrary
+ name = create_folder(root, name)
+
+ unreal.log(assets)
+ for asset in assets:
+ loaded = eal.load_asset(asset)
+ eal.rename_asset(
+ asset, "{}/{}/{}".format(root, name, loaded.get_name())
+ )
+
+ return name
+
+
+def create_container(container: str, path: str) -> unreal.Object:
+ """Helper function to create Asset Container class on given path.
+
+ This Asset Class helps to mark given path as Container
+ and enable asset version control on it.
+
+ Args:
+ container (str): Asset Container name
+ path (str): Path where to create Asset Container. This path should
+ point into container folder
+
+ Returns:
+ :class:`unreal.Object`: instance of created asset
+
+ Example:
+
+ create_container(
+ "/Game/modelingFooCharacter_CON",
+ "modelingFooCharacter_CON"
+ )
+
+ """
+ factory = unreal.AssetContainerFactory()
+ tools = unreal.AssetToolsHelpers().get_asset_tools()
+
+ asset = tools.create_asset(container, path, None, factory)
+ return asset
+
+
+def create_publish_instance(instance: str, path: str) -> unreal.Object:
+ """Helper function to create OpenPype Publish Instance on given path.
+
+ This behaves similarly as :func:`create_openpype_container`.
+
+ Args:
+ path (str): Path where to create Publish Instance.
+ This path should point into container folder
+ instance (str): Publish Instance name
+
+ Returns:
+ :class:`unreal.Object`: instance of created asset
+
+ Example:
+
+ create_publish_instance(
+ "/Game/modelingFooCharacter_INST",
+ "modelingFooCharacter_INST"
+ )
+
+ """
+ factory = unreal.OpenPypePublishInstanceFactory()
+ tools = unreal.AssetToolsHelpers().get_asset_tools()
+ asset = tools.create_asset(instance, path, None, factory)
+ return asset
+
+
+def cast_map_to_str_dict(umap) -> dict:
+ """Cast Unreal Map to dict.
+
+ Helper function to cast Unreal Map object to plain old python
+ dict. This will also cast values and keys to str. Useful for
+ metadata dicts.
+
+ Args:
+ umap: Unreal Map object
+
+ Returns:
+ dict
+
+ """
+ return {str(key): str(value) for (key, value) in umap.items()}
diff --git a/openpype/hosts/unreal/api/plugin.py b/openpype/hosts/unreal/api/plugin.py
index 5a6b236730..b24bab831d 100644
--- a/openpype/hosts/unreal/api/plugin.py
+++ b/openpype/hosts/unreal/api/plugin.py
@@ -1,12 +1,17 @@
-from avalon import api
-import openpype.api
+# -*- coding: utf-8 -*-
+from abc import ABC
+
+from openpype.pipeline import (
+ LegacyCreator,
+ LoaderPlugin,
+)
-class Creator(openpype.api.Creator):
+class Creator(LegacyCreator):
"""This serves as skeleton for future OpenPype specific functionality"""
defaults = ['Main']
-class Loader(api.Loader):
+class Loader(LoaderPlugin, ABC):
"""This serves as skeleton for future OpenPype specific functionality"""
pass
diff --git a/openpype/hosts/unreal/hooks/pre_workfile_preparation.py b/openpype/hosts/unreal/hooks/pre_workfile_preparation.py
index 880dba5cfb..f07e96551c 100644
--- a/openpype/hosts/unreal/hooks/pre_workfile_preparation.py
+++ b/openpype/hosts/unreal/hooks/pre_workfile_preparation.py
@@ -10,7 +10,7 @@ from openpype.lib import (
get_workdir_data,
get_workfile_template_key
)
-from openpype.hosts.unreal.api import lib as unreal_lib
+import openpype.hosts.unreal.lib as unreal_lib
class UnrealPrelaunchHook(PreLaunchHook):
@@ -136,9 +136,9 @@ class UnrealPrelaunchHook(PreLaunchHook):
f"{self.signature} creating unreal "
f"project [ {unreal_project_name} ]"
))
- # Set "AVALON_UNREAL_PLUGIN" to current process environment for
+ # Set "OPENPYPE_UNREAL_PLUGIN" to current process environment for
# execution of `create_unreal_project`
- env_key = "AVALON_UNREAL_PLUGIN"
+ env_key = "OPENPYPE_UNREAL_PLUGIN"
if self.launch_context.env.get(env_key):
os.environ[env_key] = self.launch_context.env[env_key]
diff --git a/openpype/hosts/unreal/integration/.gitignore b/openpype/hosts/unreal/integration/.gitignore
new file mode 100644
index 0000000000..b32a6f55e5
--- /dev/null
+++ b/openpype/hosts/unreal/integration/.gitignore
@@ -0,0 +1,35 @@
+# Prerequisites
+*.d
+
+# Compiled Object files
+*.slo
+*.lo
+*.o
+*.obj
+
+# Precompiled Headers
+*.gch
+*.pch
+
+# Compiled Dynamic libraries
+*.so
+*.dylib
+*.dll
+
+# Fortran module files
+*.mod
+*.smod
+
+# Compiled Static libraries
+*.lai
+*.la
+*.a
+*.lib
+
+# Executables
+*.exe
+*.out
+*.app
+
+/Binaries
+/Intermediate
diff --git a/openpype/hosts/unreal/integration/Content/Python/init_unreal.py b/openpype/hosts/unreal/integration/Content/Python/init_unreal.py
new file mode 100644
index 0000000000..2ecd301c25
--- /dev/null
+++ b/openpype/hosts/unreal/integration/Content/Python/init_unreal.py
@@ -0,0 +1,34 @@
+import unreal
+
+openpype_detected = True
+try:
+ from avalon import api
+except ImportError as exc:
+ api = None
+ openpype_detected = False
+ unreal.log_error("Avalon: cannot load Avalon [ {} ]".format(exc))
+
+try:
+ from openpype.hosts.unreal import api as openpype_host
+except ImportError as exc:
+ openpype_host = None
+ openpype_detected = False
+ unreal.log_error("OpenPype: cannot load OpenPype [ {} ]".format(exc))
+
+if openpype_detected:
+ api.install(openpype_host)
+
+
+@unreal.uclass()
+class OpenPypeIntegration(unreal.OpenPypePythonBridge):
+ @unreal.ufunction(override=True)
+ def RunInPython_Popup(self):
+ unreal.log_warning("OpenPype: showing tools popup")
+ if openpype_detected:
+ openpype_host.show_tools_popup()
+
+ @unreal.ufunction(override=True)
+ def RunInPython_Dialog(self):
+ unreal.log_warning("OpenPype: showing tools dialog")
+ if openpype_detected:
+ openpype_host.show_tools_dialog()
diff --git a/openpype/hosts/unreal/integration/OpenPype.uplugin b/openpype/hosts/unreal/integration/OpenPype.uplugin
new file mode 100644
index 0000000000..4c7a74403c
--- /dev/null
+++ b/openpype/hosts/unreal/integration/OpenPype.uplugin
@@ -0,0 +1,24 @@
+{
+ "FileVersion": 3,
+ "Version": 1,
+ "VersionName": "1.0",
+ "FriendlyName": "OpenPype",
+ "Description": "OpenPype Integration",
+ "Category": "OpenPype.Integration",
+ "CreatedBy": "Ondrej Samohel",
+ "CreatedByURL": "https://openpype.io",
+ "DocsURL": "https://openpype.io/docs/artist_hosts_unreal",
+ "MarketplaceURL": "",
+ "SupportURL": "https://pype.club/",
+ "CanContainContent": true,
+ "IsBetaVersion": true,
+ "IsExperimentalVersion": false,
+ "Installed": false,
+ "Modules": [
+ {
+ "Name": "OpenPype",
+ "Type": "Editor",
+ "LoadingPhase": "Default"
+ }
+ ]
+}
\ No newline at end of file
diff --git a/openpype/hosts/unreal/integration/README.md b/openpype/hosts/unreal/integration/README.md
new file mode 100644
index 0000000000..a32d89aab8
--- /dev/null
+++ b/openpype/hosts/unreal/integration/README.md
@@ -0,0 +1,11 @@
+# OpenPype Unreal Integration plugin
+
+This is plugin for Unreal Editor, creating menu for [OpenPype](https://github.com/getavalon) tools to run.
+
+## How does this work
+
+Plugin is creating basic menu items in **Window/OpenPype** section of Unreal Editor main menu and a button
+on the main toolbar with associated menu. Clicking on those menu items is calling callbacks that are
+declared in c++ but needs to be implemented during Unreal Editor
+startup in `Plugins/OpenPype/Content/Python/init_unreal.py` - this should be executed by Unreal Editor
+automatically.
diff --git a/openpype/hosts/unreal/integration/Resources/openpype128.png b/openpype/hosts/unreal/integration/Resources/openpype128.png
new file mode 100644
index 0000000000..abe8a807ef
Binary files /dev/null and b/openpype/hosts/unreal/integration/Resources/openpype128.png differ
diff --git a/openpype/hosts/unreal/integration/Resources/openpype40.png b/openpype/hosts/unreal/integration/Resources/openpype40.png
new file mode 100644
index 0000000000..f983e7a1f2
Binary files /dev/null and b/openpype/hosts/unreal/integration/Resources/openpype40.png differ
diff --git a/openpype/hosts/unreal/integration/Resources/openpype512.png b/openpype/hosts/unreal/integration/Resources/openpype512.png
new file mode 100644
index 0000000000..97c4d4326b
Binary files /dev/null and b/openpype/hosts/unreal/integration/Resources/openpype512.png differ
diff --git a/openpype/hosts/unreal/integration/Source/OpenPype/OpenPype.Build.cs b/openpype/hosts/unreal/integration/Source/OpenPype/OpenPype.Build.cs
new file mode 100644
index 0000000000..c30835b63d
--- /dev/null
+++ b/openpype/hosts/unreal/integration/Source/OpenPype/OpenPype.Build.cs
@@ -0,0 +1,57 @@
+// Copyright 1998-2019 Epic Games, Inc. All Rights Reserved.
+
+using UnrealBuildTool;
+
+public class OpenPype : ModuleRules
+{
+ public OpenPype(ReadOnlyTargetRules Target) : base(Target)
+ {
+ PCHUsage = ModuleRules.PCHUsageMode.UseExplicitOrSharedPCHs;
+
+ PublicIncludePaths.AddRange(
+ new string[] {
+ // ... add public include paths required here ...
+ }
+ );
+
+
+ PrivateIncludePaths.AddRange(
+ new string[] {
+ // ... add other private include paths required here ...
+ }
+ );
+
+
+ PublicDependencyModuleNames.AddRange(
+ new string[]
+ {
+ "Core",
+ // ... add other public dependencies that you statically link with here ...
+ }
+ );
+
+
+ PrivateDependencyModuleNames.AddRange(
+ new string[]
+ {
+ "Projects",
+ "InputCore",
+ "UnrealEd",
+ "LevelEditor",
+ "CoreUObject",
+ "Engine",
+ "Slate",
+ "SlateCore",
+ // ... add private dependencies that you statically link with here ...
+ }
+ );
+
+
+ DynamicallyLoadedModuleNames.AddRange(
+ new string[]
+ {
+ // ... add any modules that your module loads dynamically here ...
+ }
+ );
+ }
+}
diff --git a/openpype/hosts/unreal/integration/Source/OpenPype/Private/AssetContainer.cpp b/openpype/hosts/unreal/integration/Source/OpenPype/Private/AssetContainer.cpp
new file mode 100644
index 0000000000..c766f87a8e
--- /dev/null
+++ b/openpype/hosts/unreal/integration/Source/OpenPype/Private/AssetContainer.cpp
@@ -0,0 +1,115 @@
+// Fill out your copyright notice in the Description page of Project Settings.
+
+#include "AssetContainer.h"
+#include "AssetRegistryModule.h"
+#include "Misc/PackageName.h"
+#include "Engine.h"
+#include "Containers/UnrealString.h"
+
+UAssetContainer::UAssetContainer(const FObjectInitializer& ObjectInitializer)
+: UAssetUserData(ObjectInitializer)
+{
+ FAssetRegistryModule& AssetRegistryModule = FModuleManager::LoadModuleChecked("AssetRegistry");
+ FString path = UAssetContainer::GetPathName();
+ UE_LOG(LogTemp, Warning, TEXT("UAssetContainer %s"), *path);
+ FARFilter Filter;
+ Filter.PackagePaths.Add(FName(*path));
+
+ AssetRegistryModule.Get().OnAssetAdded().AddUObject(this, &UAssetContainer::OnAssetAdded);
+ AssetRegistryModule.Get().OnAssetRemoved().AddUObject(this, &UAssetContainer::OnAssetRemoved);
+ AssetRegistryModule.Get().OnAssetRenamed().AddUObject(this, &UAssetContainer::OnAssetRenamed);
+}
+
+void UAssetContainer::OnAssetAdded(const FAssetData& AssetData)
+{
+ TArray split;
+
+ // get directory of current container
+ FString selfFullPath = UAssetContainer::GetPathName();
+ FString selfDir = FPackageName::GetLongPackagePath(*selfFullPath);
+
+ // get asset path and class
+ FString assetPath = AssetData.GetFullName();
+ FString assetFName = AssetData.AssetClass.ToString();
+
+ // split path
+ assetPath.ParseIntoArray(split, TEXT(" "), true);
+
+ FString assetDir = FPackageName::GetLongPackagePath(*split[1]);
+
+ // take interest only in paths starting with path of current container
+ if (assetDir.StartsWith(*selfDir))
+ {
+ // exclude self
+ if (assetFName != "AssetContainer")
+ {
+ assets.Add(assetPath);
+ assetsData.Add(AssetData);
+ UE_LOG(LogTemp, Log, TEXT("%s: asset added to %s"), *selfFullPath, *selfDir);
+ }
+ }
+}
+
+void UAssetContainer::OnAssetRemoved(const FAssetData& AssetData)
+{
+ TArray split;
+
+ // get directory of current container
+ FString selfFullPath = UAssetContainer::GetPathName();
+ FString selfDir = FPackageName::GetLongPackagePath(*selfFullPath);
+
+ // get asset path and class
+ FString assetPath = AssetData.GetFullName();
+ FString assetFName = AssetData.AssetClass.ToString();
+
+ // split path
+ assetPath.ParseIntoArray(split, TEXT(" "), true);
+
+ FString assetDir = FPackageName::GetLongPackagePath(*split[1]);
+
+ // take interest only in paths starting with path of current container
+ FString path = UAssetContainer::GetPathName();
+ FString lpp = FPackageName::GetLongPackagePath(*path);
+
+ if (assetDir.StartsWith(*selfDir))
+ {
+ // exclude self
+ if (assetFName != "AssetContainer")
+ {
+ // UE_LOG(LogTemp, Warning, TEXT("%s: asset removed"), *lpp);
+ assets.Remove(assetPath);
+ assetsData.Remove(AssetData);
+ }
+ }
+}
+
+void UAssetContainer::OnAssetRenamed(const FAssetData& AssetData, const FString& str)
+{
+ TArray split;
+
+ // get directory of current container
+ FString selfFullPath = UAssetContainer::GetPathName();
+ FString selfDir = FPackageName::GetLongPackagePath(*selfFullPath);
+
+ // get asset path and class
+ FString assetPath = AssetData.GetFullName();
+ FString assetFName = AssetData.AssetClass.ToString();
+
+ // split path
+ assetPath.ParseIntoArray(split, TEXT(" "), true);
+
+ FString assetDir = FPackageName::GetLongPackagePath(*split[1]);
+ if (assetDir.StartsWith(*selfDir))
+ {
+ // exclude self
+ if (assetFName != "AssetContainer")
+ {
+
+ assets.Remove(str);
+ assets.Add(assetPath);
+ assetsData.Remove(AssetData);
+ // UE_LOG(LogTemp, Warning, TEXT("%s: asset renamed %s"), *lpp, *str);
+ }
+ }
+}
+
diff --git a/openpype/hosts/unreal/integration/Source/OpenPype/Private/AssetContainerFactory.cpp b/openpype/hosts/unreal/integration/Source/OpenPype/Private/AssetContainerFactory.cpp
new file mode 100644
index 0000000000..b943150bdd
--- /dev/null
+++ b/openpype/hosts/unreal/integration/Source/OpenPype/Private/AssetContainerFactory.cpp
@@ -0,0 +1,20 @@
+#include "AssetContainerFactory.h"
+#include "AssetContainer.h"
+
+UAssetContainerFactory::UAssetContainerFactory(const FObjectInitializer& ObjectInitializer)
+ : UFactory(ObjectInitializer)
+{
+ SupportedClass = UAssetContainer::StaticClass();
+ bCreateNew = false;
+ bEditorImport = true;
+}
+
+UObject* UAssetContainerFactory::FactoryCreateNew(UClass* Class, UObject* InParent, FName Name, EObjectFlags Flags, UObject* Context, FFeedbackContext* Warn)
+{
+ UAssetContainer* AssetContainer = NewObject(InParent, Class, Name, Flags);
+ return AssetContainer;
+}
+
+bool UAssetContainerFactory::ShouldShowInNewMenu() const {
+ return false;
+}
diff --git a/openpype/hosts/unreal/integration/Source/OpenPype/Private/OpenPype.cpp b/openpype/hosts/unreal/integration/Source/OpenPype/Private/OpenPype.cpp
new file mode 100644
index 0000000000..15c46b3862
--- /dev/null
+++ b/openpype/hosts/unreal/integration/Source/OpenPype/Private/OpenPype.cpp
@@ -0,0 +1,103 @@
+#include "OpenPype.h"
+#include "LevelEditor.h"
+#include "OpenPypePythonBridge.h"
+#include "OpenPypeStyle.h"
+
+
+static const FName OpenPypeTabName("OpenPype");
+
+#define LOCTEXT_NAMESPACE "FOpenPypeModule"
+
+// This function is triggered when the plugin is staring up
+void FOpenPypeModule::StartupModule()
+{
+
+ FOpenPypeStyle::Initialize();
+ FOpenPypeStyle::SetIcon("Logo", "openpype40");
+
+ // Create the Extender that will add content to the menu
+ FLevelEditorModule& LevelEditorModule = FModuleManager::LoadModuleChecked("LevelEditor");
+
+ TSharedPtr MenuExtender = MakeShareable(new FExtender());
+ TSharedPtr ToolbarExtender = MakeShareable(new FExtender());
+
+ MenuExtender->AddMenuExtension(
+ "LevelEditor",
+ EExtensionHook::After,
+ NULL,
+ FMenuExtensionDelegate::CreateRaw(this, &FOpenPypeModule::AddMenuEntry)
+ );
+ ToolbarExtender->AddToolBarExtension(
+ "Settings",
+ EExtensionHook::After,
+ NULL,
+ FToolBarExtensionDelegate::CreateRaw(this, &FOpenPypeModule::AddToobarEntry));
+
+
+ LevelEditorModule.GetMenuExtensibilityManager()->AddExtender(MenuExtender);
+ LevelEditorModule.GetToolBarExtensibilityManager()->AddExtender(ToolbarExtender);
+
+}
+
+void FOpenPypeModule::ShutdownModule()
+{
+ FOpenPypeStyle::Shutdown();
+}
+
+
+void FOpenPypeModule::AddMenuEntry(FMenuBuilder& MenuBuilder)
+{
+ // Create Section
+ MenuBuilder.BeginSection("OpenPype", TAttribute(FText::FromString("OpenPype")));
+ {
+ // Create a Submenu inside of the Section
+ MenuBuilder.AddMenuEntry(
+ FText::FromString("Tools..."),
+ FText::FromString("Pipeline tools"),
+ FSlateIcon(FOpenPypeStyle::GetStyleSetName(), "OpenPype.Logo"),
+ FUIAction(FExecuteAction::CreateRaw(this, &FOpenPypeModule::MenuPopup))
+ );
+
+ MenuBuilder.AddMenuEntry(
+ FText::FromString("Tools dialog..."),
+ FText::FromString("Pipeline tools dialog"),
+ FSlateIcon(FOpenPypeStyle::GetStyleSetName(), "OpenPype.Logo"),
+ FUIAction(FExecuteAction::CreateRaw(this, &FOpenPypeModule::MenuDialog))
+ );
+
+ }
+ MenuBuilder.EndSection();
+}
+
+void FOpenPypeModule::AddToobarEntry(FToolBarBuilder& ToolbarBuilder)
+{
+ ToolbarBuilder.BeginSection(TEXT("OpenPype"));
+ {
+ ToolbarBuilder.AddToolBarButton(
+ FUIAction(
+ FExecuteAction::CreateRaw(this, &FOpenPypeModule::MenuPopup),
+ NULL,
+ FIsActionChecked()
+
+ ),
+ NAME_None,
+ LOCTEXT("OpenPype_label", "OpenPype"),
+ LOCTEXT("OpenPype_tooltip", "OpenPype Tools"),
+ FSlateIcon(FOpenPypeStyle::GetStyleSetName(), "OpenPype.Logo")
+ );
+ }
+ ToolbarBuilder.EndSection();
+}
+
+
+void FOpenPypeModule::MenuPopup() {
+ UOpenPypePythonBridge* bridge = UOpenPypePythonBridge::Get();
+ bridge->RunInPython_Popup();
+}
+
+void FOpenPypeModule::MenuDialog() {
+ UOpenPypePythonBridge* bridge = UOpenPypePythonBridge::Get();
+ bridge->RunInPython_Dialog();
+}
+
+IMPLEMENT_MODULE(FOpenPypeModule, OpenPype)
diff --git a/openpype/hosts/unreal/integration/Source/OpenPype/Private/OpenPypeLib.cpp b/openpype/hosts/unreal/integration/Source/OpenPype/Private/OpenPypeLib.cpp
new file mode 100644
index 0000000000..5facab7b8b
--- /dev/null
+++ b/openpype/hosts/unreal/integration/Source/OpenPype/Private/OpenPypeLib.cpp
@@ -0,0 +1,48 @@
+#include "OpenPypeLib.h"
+#include "Misc/Paths.h"
+#include "Misc/ConfigCacheIni.h"
+#include "UObject/UnrealType.h"
+
+/**
+ * Sets color on folder icon on given path
+ * @param InPath - path to folder
+ * @param InFolderColor - color of the folder
+ * @warning This color will appear only after Editor restart. Is there a better way?
+ */
+
+void UOpenPypeLib::CSetFolderColor(FString FolderPath, FLinearColor FolderColor, bool bForceAdd)
+{
+ auto SaveColorInternal = [](FString InPath, FLinearColor InFolderColor)
+ {
+ // Saves the color of the folder to the config
+ if (FPaths::FileExists(GEditorPerProjectIni))
+ {
+ GConfig->SetString(TEXT("PathColor"), *InPath, *InFolderColor.ToString(), GEditorPerProjectIni);
+ }
+
+ };
+
+ SaveColorInternal(FolderPath, FolderColor);
+
+}
+/**
+ * Returns all poperties on given object
+ * @param cls - class
+ * @return TArray of properties
+ */
+TArray UOpenPypeLib::GetAllProperties(UClass* cls)
+{
+ TArray Ret;
+ if (cls != nullptr)
+ {
+ for (TFieldIterator It(cls); It; ++It)
+ {
+ FProperty* Property = *It;
+ if (Property->HasAnyPropertyFlags(EPropertyFlags::CPF_Edit))
+ {
+ Ret.Add(Property->GetName());
+ }
+ }
+ }
+ return Ret;
+}
diff --git a/openpype/hosts/unreal/integration/Source/OpenPype/Private/OpenPypePublishInstance.cpp b/openpype/hosts/unreal/integration/Source/OpenPype/Private/OpenPypePublishInstance.cpp
new file mode 100644
index 0000000000..4f1e846c0b
--- /dev/null
+++ b/openpype/hosts/unreal/integration/Source/OpenPype/Private/OpenPypePublishInstance.cpp
@@ -0,0 +1,108 @@
+#pragma once
+
+#include "OpenPypePublishInstance.h"
+#include "AssetRegistryModule.h"
+
+
+UOpenPypePublishInstance::UOpenPypePublishInstance(const FObjectInitializer& ObjectInitializer)
+ : UObject(ObjectInitializer)
+{
+ FAssetRegistryModule& AssetRegistryModule = FModuleManager::LoadModuleChecked("AssetRegistry");
+ FString path = UOpenPypePublishInstance::GetPathName();
+ FARFilter Filter;
+ Filter.PackagePaths.Add(FName(*path));
+
+ AssetRegistryModule.Get().OnAssetAdded().AddUObject(this, &UOpenPypePublishInstance::OnAssetAdded);
+ AssetRegistryModule.Get().OnAssetRemoved().AddUObject(this, &UOpenPypePublishInstance::OnAssetRemoved);
+ AssetRegistryModule.Get().OnAssetRenamed().AddUObject(this, &UOpenPypePublishInstance::OnAssetRenamed);
+}
+
+void UOpenPypePublishInstance::OnAssetAdded(const FAssetData& AssetData)
+{
+ TArray split;
+
+ // get directory of current container
+ FString selfFullPath = UOpenPypePublishInstance::GetPathName();
+ FString selfDir = FPackageName::GetLongPackagePath(*selfFullPath);
+
+ // get asset path and class
+ FString assetPath = AssetData.GetFullName();
+ FString assetFName = AssetData.AssetClass.ToString();
+
+ // split path
+ assetPath.ParseIntoArray(split, TEXT(" "), true);
+
+ FString assetDir = FPackageName::GetLongPackagePath(*split[1]);
+
+ // take interest only in paths starting with path of current container
+ if (assetDir.StartsWith(*selfDir))
+ {
+ // exclude self
+ if (assetFName != "OpenPypePublishInstance")
+ {
+ assets.Add(assetPath);
+ UE_LOG(LogTemp, Log, TEXT("%s: asset added to %s"), *selfFullPath, *selfDir);
+ }
+ }
+}
+
+void UOpenPypePublishInstance::OnAssetRemoved(const FAssetData& AssetData)
+{
+ TArray split;
+
+ // get directory of current container
+ FString selfFullPath = UOpenPypePublishInstance::GetPathName();
+ FString selfDir = FPackageName::GetLongPackagePath(*selfFullPath);
+
+ // get asset path and class
+ FString assetPath = AssetData.GetFullName();
+ FString assetFName = AssetData.AssetClass.ToString();
+
+ // split path
+ assetPath.ParseIntoArray(split, TEXT(" "), true);
+
+ FString assetDir = FPackageName::GetLongPackagePath(*split[1]);
+
+ // take interest only in paths starting with path of current container
+ FString path = UOpenPypePublishInstance::GetPathName();
+ FString lpp = FPackageName::GetLongPackagePath(*path);
+
+ if (assetDir.StartsWith(*selfDir))
+ {
+ // exclude self
+ if (assetFName != "OpenPypePublishInstance")
+ {
+ // UE_LOG(LogTemp, Warning, TEXT("%s: asset removed"), *lpp);
+ assets.Remove(assetPath);
+ }
+ }
+}
+
+void UOpenPypePublishInstance::OnAssetRenamed(const FAssetData& AssetData, const FString& str)
+{
+ TArray split;
+
+ // get directory of current container
+ FString selfFullPath = UOpenPypePublishInstance::GetPathName();
+ FString selfDir = FPackageName::GetLongPackagePath(*selfFullPath);
+
+ // get asset path and class
+ FString assetPath = AssetData.GetFullName();
+ FString assetFName = AssetData.AssetClass.ToString();
+
+ // split path
+ assetPath.ParseIntoArray(split, TEXT(" "), true);
+
+ FString assetDir = FPackageName::GetLongPackagePath(*split[1]);
+ if (assetDir.StartsWith(*selfDir))
+ {
+ // exclude self
+ if (assetFName != "AssetContainer")
+ {
+
+ assets.Remove(str);
+ assets.Add(assetPath);
+ // UE_LOG(LogTemp, Warning, TEXT("%s: asset renamed %s"), *lpp, *str);
+ }
+ }
+}
diff --git a/openpype/hosts/unreal/integration/Source/OpenPype/Private/OpenPypePublishInstanceFactory.cpp b/openpype/hosts/unreal/integration/Source/OpenPype/Private/OpenPypePublishInstanceFactory.cpp
new file mode 100644
index 0000000000..e61964c689
--- /dev/null
+++ b/openpype/hosts/unreal/integration/Source/OpenPype/Private/OpenPypePublishInstanceFactory.cpp
@@ -0,0 +1,20 @@
+#include "OpenPypePublishInstanceFactory.h"
+#include "OpenPypePublishInstance.h"
+
+UOpenPypePublishInstanceFactory::UOpenPypePublishInstanceFactory(const FObjectInitializer& ObjectInitializer)
+ : UFactory(ObjectInitializer)
+{
+ SupportedClass = UOpenPypePublishInstance::StaticClass();
+ bCreateNew = false;
+ bEditorImport = true;
+}
+
+UObject* UOpenPypePublishInstanceFactory::FactoryCreateNew(UClass* Class, UObject* InParent, FName Name, EObjectFlags Flags, UObject* Context, FFeedbackContext* Warn)
+{
+ UOpenPypePublishInstance* OpenPypePublishInstance = NewObject(InParent, Class, Name, Flags);
+ return OpenPypePublishInstance;
+}
+
+bool UOpenPypePublishInstanceFactory::ShouldShowInNewMenu() const {
+ return false;
+}
diff --git a/openpype/hosts/unreal/integration/Source/OpenPype/Private/OpenPypePythonBridge.cpp b/openpype/hosts/unreal/integration/Source/OpenPype/Private/OpenPypePythonBridge.cpp
new file mode 100644
index 0000000000..8113231503
--- /dev/null
+++ b/openpype/hosts/unreal/integration/Source/OpenPype/Private/OpenPypePythonBridge.cpp
@@ -0,0 +1,13 @@
+#include "OpenPypePythonBridge.h"
+
+UOpenPypePythonBridge* UOpenPypePythonBridge::Get()
+{
+ TArray OpenPypePythonBridgeClasses;
+ GetDerivedClasses(UOpenPypePythonBridge::StaticClass(), OpenPypePythonBridgeClasses);
+ int32 NumClasses = OpenPypePythonBridgeClasses.Num();
+ if (NumClasses > 0)
+ {
+ return Cast(OpenPypePythonBridgeClasses[NumClasses - 1]->GetDefaultObject());
+ }
+ return nullptr;
+};
\ No newline at end of file
diff --git a/openpype/hosts/unreal/integration/Source/OpenPype/Private/OpenPypeStyle.cpp b/openpype/hosts/unreal/integration/Source/OpenPype/Private/OpenPypeStyle.cpp
new file mode 100644
index 0000000000..a51c2d6aa5
--- /dev/null
+++ b/openpype/hosts/unreal/integration/Source/OpenPype/Private/OpenPypeStyle.cpp
@@ -0,0 +1,70 @@
+#include "OpenPypeStyle.h"
+#include "Framework/Application/SlateApplication.h"
+#include "Styling/SlateStyle.h"
+#include "Styling/SlateStyleRegistry.h"
+
+
+TUniquePtr< FSlateStyleSet > FOpenPypeStyle::OpenPypeStyleInstance = nullptr;
+
+void FOpenPypeStyle::Initialize()
+{
+ if (!OpenPypeStyleInstance.IsValid())
+ {
+ OpenPypeStyleInstance = Create();
+ FSlateStyleRegistry::RegisterSlateStyle(*OpenPypeStyleInstance);
+ }
+}
+
+void FOpenPypeStyle::Shutdown()
+{
+ if (OpenPypeStyleInstance.IsValid())
+ {
+ FSlateStyleRegistry::UnRegisterSlateStyle(*OpenPypeStyleInstance);
+ OpenPypeStyleInstance.Reset();
+ }
+}
+
+FName FOpenPypeStyle::GetStyleSetName()
+{
+ static FName StyleSetName(TEXT("OpenPypeStyle"));
+ return StyleSetName;
+}
+
+FName FOpenPypeStyle::GetContextName()
+{
+ static FName ContextName(TEXT("OpenPype"));
+ return ContextName;
+}
+
+#define IMAGE_BRUSH(RelativePath, ...) FSlateImageBrush( Style->RootToContentDir( RelativePath, TEXT(".png") ), __VA_ARGS__ )
+
+const FVector2D Icon40x40(40.0f, 40.0f);
+
+TUniquePtr< FSlateStyleSet > FOpenPypeStyle::Create()
+{
+ TUniquePtr< FSlateStyleSet > Style = MakeUnique(GetStyleSetName());
+ Style->SetContentRoot(FPaths::ProjectPluginsDir() / TEXT("OpenPype/Resources"));
+
+ return Style;
+}
+
+void FOpenPypeStyle::SetIcon(const FString& StyleName, const FString& ResourcePath)
+{
+ FSlateStyleSet* Style = OpenPypeStyleInstance.Get();
+
+ FString Name(GetContextName().ToString());
+ Name = Name + "." + StyleName;
+ Style->Set(*Name, new FSlateImageBrush(Style->RootToContentDir(ResourcePath, TEXT(".png")), Icon40x40));
+
+
+ FSlateApplication::Get().GetRenderer()->ReloadTextureResources();
+}
+
+#undef IMAGE_BRUSH
+
+const ISlateStyle& FOpenPypeStyle::Get()
+{
+ check(OpenPypeStyleInstance);
+ return *OpenPypeStyleInstance;
+ return *OpenPypeStyleInstance;
+}
diff --git a/openpype/hosts/unreal/integration/Source/OpenPype/Public/AssetContainer.h b/openpype/hosts/unreal/integration/Source/OpenPype/Public/AssetContainer.h
new file mode 100644
index 0000000000..3c2a360c78
--- /dev/null
+++ b/openpype/hosts/unreal/integration/Source/OpenPype/Public/AssetContainer.h
@@ -0,0 +1,39 @@
+// Fill out your copyright notice in the Description page of Project Settings.
+
+#pragma once
+
+#include "CoreMinimal.h"
+#include "UObject/NoExportTypes.h"
+#include "Engine/AssetUserData.h"
+#include "AssetData.h"
+#include "AssetContainer.generated.h"
+
+/**
+ *
+ */
+UCLASS(Blueprintable)
+class OPENPYPE_API UAssetContainer : public UAssetUserData
+{
+ GENERATED_BODY()
+
+public:
+
+ UAssetContainer(const FObjectInitializer& ObjectInitalizer);
+ // ~UAssetContainer();
+
+ UPROPERTY(EditAnywhere, BlueprintReadOnly)
+ TArray assets;
+
+ // There seems to be no reflection option to expose array of FAssetData
+ /*
+ UPROPERTY(Transient, BlueprintReadOnly, Category = "Python", meta=(DisplayName="Assets Data"))
+ TArray assetsData;
+ */
+private:
+ TArray assetsData;
+ void OnAssetAdded(const FAssetData& AssetData);
+ void OnAssetRemoved(const FAssetData& AssetData);
+ void OnAssetRenamed(const FAssetData& AssetData, const FString& str);
+};
+
+
diff --git a/openpype/hosts/unreal/integration/Source/OpenPype/Public/AssetContainerFactory.h b/openpype/hosts/unreal/integration/Source/OpenPype/Public/AssetContainerFactory.h
new file mode 100644
index 0000000000..331ce6bb50
--- /dev/null
+++ b/openpype/hosts/unreal/integration/Source/OpenPype/Public/AssetContainerFactory.h
@@ -0,0 +1,21 @@
+// Fill out your copyright notice in the Description page of Project Settings.
+
+#pragma once
+
+#include "CoreMinimal.h"
+#include "Factories/Factory.h"
+#include "AssetContainerFactory.generated.h"
+
+/**
+ *
+ */
+UCLASS()
+class OPENPYPE_API UAssetContainerFactory : public UFactory
+{
+ GENERATED_BODY()
+
+public:
+ UAssetContainerFactory(const FObjectInitializer& ObjectInitializer);
+ virtual UObject* FactoryCreateNew(UClass* Class, UObject* InParent, FName Name, EObjectFlags Flags, UObject* Context, FFeedbackContext* Warn) override;
+ virtual bool ShouldShowInNewMenu() const override;
+};
\ No newline at end of file
diff --git a/openpype/hosts/unreal/integration/Source/OpenPype/Public/OpenPype.h b/openpype/hosts/unreal/integration/Source/OpenPype/Public/OpenPype.h
new file mode 100644
index 0000000000..db3f299354
--- /dev/null
+++ b/openpype/hosts/unreal/integration/Source/OpenPype/Public/OpenPype.h
@@ -0,0 +1,21 @@
+// Copyright 1998-2019 Epic Games, Inc. All Rights Reserved.
+
+#pragma once
+
+#include "Engine.h"
+
+
+class FOpenPypeModule : public IModuleInterface
+{
+public:
+ virtual void StartupModule() override;
+ virtual void ShutdownModule() override;
+
+private:
+
+ void AddMenuEntry(FMenuBuilder& MenuBuilder);
+ void AddToobarEntry(FToolBarBuilder& ToolbarBuilder);
+ void MenuPopup();
+ void MenuDialog();
+
+};
diff --git a/openpype/hosts/unreal/integration/Source/OpenPype/Public/OpenPypeLib.h b/openpype/hosts/unreal/integration/Source/OpenPype/Public/OpenPypeLib.h
new file mode 100644
index 0000000000..59e9c8bd76
--- /dev/null
+++ b/openpype/hosts/unreal/integration/Source/OpenPype/Public/OpenPypeLib.h
@@ -0,0 +1,19 @@
+#pragma once
+
+#include "Engine.h"
+#include "OpenPypeLib.generated.h"
+
+
+UCLASS(Blueprintable)
+class OPENPYPE_API UOpenPypeLib : public UObject
+{
+
+ GENERATED_BODY()
+
+public:
+ UFUNCTION(BlueprintCallable, Category = Python)
+ static void CSetFolderColor(FString FolderPath, FLinearColor FolderColor, bool bForceAdd);
+
+ UFUNCTION(BlueprintCallable, Category = Python)
+ static TArray GetAllProperties(UClass* cls);
+};
\ No newline at end of file
diff --git a/openpype/hosts/unreal/integration/Source/OpenPype/Public/OpenPypePublishInstance.h b/openpype/hosts/unreal/integration/Source/OpenPype/Public/OpenPypePublishInstance.h
new file mode 100644
index 0000000000..0a27a078d7
--- /dev/null
+++ b/openpype/hosts/unreal/integration/Source/OpenPype/Public/OpenPypePublishInstance.h
@@ -0,0 +1,21 @@
+#pragma once
+
+#include "Engine.h"
+#include "OpenPypePublishInstance.generated.h"
+
+
+UCLASS(Blueprintable)
+class OPENPYPE_API UOpenPypePublishInstance : public UObject
+{
+ GENERATED_BODY()
+
+public:
+ UOpenPypePublishInstance(const FObjectInitializer& ObjectInitalizer);
+
+ UPROPERTY(EditAnywhere, BlueprintReadOnly)
+ TArray assets;
+private:
+ void OnAssetAdded(const FAssetData& AssetData);
+ void OnAssetRemoved(const FAssetData& AssetData);
+ void OnAssetRenamed(const FAssetData& AssetData, const FString& str);
+};
\ No newline at end of file
diff --git a/openpype/hosts/unreal/integration/Source/OpenPype/Public/OpenPypePublishInstanceFactory.h b/openpype/hosts/unreal/integration/Source/OpenPype/Public/OpenPypePublishInstanceFactory.h
new file mode 100644
index 0000000000..a2b3abe13e
--- /dev/null
+++ b/openpype/hosts/unreal/integration/Source/OpenPype/Public/OpenPypePublishInstanceFactory.h
@@ -0,0 +1,19 @@
+#pragma once
+
+#include "CoreMinimal.h"
+#include "Factories/Factory.h"
+#include "OpenPypePublishInstanceFactory.generated.h"
+
+/**
+ *
+ */
+UCLASS()
+class OPENPYPE_API UOpenPypePublishInstanceFactory : public UFactory
+{
+ GENERATED_BODY()
+
+public:
+ UOpenPypePublishInstanceFactory(const FObjectInitializer& ObjectInitializer);
+ virtual UObject* FactoryCreateNew(UClass* Class, UObject* InParent, FName Name, EObjectFlags Flags, UObject* Context, FFeedbackContext* Warn) override;
+ virtual bool ShouldShowInNewMenu() const override;
+};
\ No newline at end of file
diff --git a/openpype/hosts/unreal/integration/Source/OpenPype/Public/OpenPypePythonBridge.h b/openpype/hosts/unreal/integration/Source/OpenPype/Public/OpenPypePythonBridge.h
new file mode 100644
index 0000000000..692aab2e5e
--- /dev/null
+++ b/openpype/hosts/unreal/integration/Source/OpenPype/Public/OpenPypePythonBridge.h
@@ -0,0 +1,20 @@
+#pragma once
+#include "Engine.h"
+#include "OpenPypePythonBridge.generated.h"
+
+UCLASS(Blueprintable)
+class UOpenPypePythonBridge : public UObject
+{
+ GENERATED_BODY()
+
+public:
+ UFUNCTION(BlueprintCallable, Category = Python)
+ static UOpenPypePythonBridge* Get();
+
+ UFUNCTION(BlueprintImplementableEvent, Category = Python)
+ void RunInPython_Popup() const;
+
+ UFUNCTION(BlueprintImplementableEvent, Category = Python)
+ void RunInPython_Dialog() const;
+
+};
diff --git a/openpype/hosts/unreal/integration/Source/OpenPype/Public/OpenPypeStyle.h b/openpype/hosts/unreal/integration/Source/OpenPype/Public/OpenPypeStyle.h
new file mode 100644
index 0000000000..fbc8bcdd5b
--- /dev/null
+++ b/openpype/hosts/unreal/integration/Source/OpenPype/Public/OpenPypeStyle.h
@@ -0,0 +1,22 @@
+#pragma once
+#include "CoreMinimal.h"
+
+class FSlateStyleSet;
+class ISlateStyle;
+
+
+class FOpenPypeStyle
+{
+public:
+ static void Initialize();
+ static void Shutdown();
+ static const ISlateStyle& Get();
+ static FName GetStyleSetName();
+ static FName GetContextName();
+
+ static void SetIcon(const FString& StyleName, const FString& ResourcePath);
+
+private:
+ static TUniquePtr< FSlateStyleSet > Create();
+ static TUniquePtr< FSlateStyleSet > OpenPypeStyleInstance;
+};
\ No newline at end of file
diff --git a/openpype/hosts/unreal/api/lib.py b/openpype/hosts/unreal/lib.py
similarity index 92%
rename from openpype/hosts/unreal/api/lib.py
rename to openpype/hosts/unreal/lib.py
index 61dac46fac..d4a776e892 100644
--- a/openpype/hosts/unreal/api/lib.py
+++ b/openpype/hosts/unreal/lib.py
@@ -169,11 +169,11 @@ def create_unreal_project(project_name: str,
env: dict = None) -> None:
"""This will create `.uproject` file at specified location.
- As there is no way I know to create project via command line, this is
- easiest option. Unreal project file is basically JSON file. If we find
- `AVALON_UNREAL_PLUGIN` environment variable we assume this is location
- of Avalon Integration Plugin and we copy its content to project folder
- and enable this plugin.
+ As there is no way I know to create a project via command line, this is
+ easiest option. Unreal project file is basically a JSON file. If we find
+ the `OPENPYPE_UNREAL_PLUGIN` environment variable we assume this is the
+ location of the Integration Plugin and we copy its content to the project
+ folder and enable this plugin.
Args:
project_name (str): Name of the project.
@@ -230,18 +230,18 @@ def create_unreal_project(project_name: str,
ue_id = "{" + loaded_modules.get("BuildId") + "}"
plugins_path = None
- if os.path.isdir(env.get("AVALON_UNREAL_PLUGIN", "")):
+ if os.path.isdir(env.get("OPENPYPE_UNREAL_PLUGIN", "")):
# copy plugin to correct path under project
plugins_path = pr_dir / "Plugins"
- avalon_plugin_path = plugins_path / "Avalon"
- if not avalon_plugin_path.is_dir():
- avalon_plugin_path.mkdir(parents=True, exist_ok=True)
+ openpype_plugin_path = plugins_path / "OpenPype"
+ if not openpype_plugin_path.is_dir():
+ openpype_plugin_path.mkdir(parents=True, exist_ok=True)
dir_util._path_created = {}
- dir_util.copy_tree(os.environ.get("AVALON_UNREAL_PLUGIN"),
- avalon_plugin_path.as_posix())
+ dir_util.copy_tree(os.environ.get("OPENPYPE_UNREAL_PLUGIN"),
+ openpype_plugin_path.as_posix())
- if not (avalon_plugin_path / "Binaries").is_dir() \
- or not (avalon_plugin_path / "Intermediate").is_dir():
+ if not (openpype_plugin_path / "Binaries").is_dir() \
+ or not (openpype_plugin_path / "Intermediate").is_dir():
dev_mode = True
# data for project file
@@ -254,14 +254,14 @@ def create_unreal_project(project_name: str,
{"Name": "PythonScriptPlugin", "Enabled": True},
{"Name": "EditorScriptingUtilities", "Enabled": True},
{"Name": "SequencerScripting", "Enabled": True},
- {"Name": "Avalon", "Enabled": True}
+ {"Name": "OpenPype", "Enabled": True}
]
}
if dev_mode or preset["dev_mode"]:
- # this will add project module and necessary source file to make it
- # C++ project and to (hopefully) make Unreal Editor to compile all
- # sources at start
+ # this will add the project module and necessary source file to
+ # make it a C++ project and to (hopefully) make Unreal Editor to
+ # compile all # sources at start
data["Modules"] = [{
"Name": project_name,
@@ -304,7 +304,7 @@ def _prepare_cpp_project(project_file: Path, engine_path: Path) -> None:
"""Prepare CPP Unreal Project.
This function will add source files needed for project to be
- rebuild along with the avalon integration plugin.
+ rebuild along with the OpenPype integration plugin.
There seems not to be automated way to do it from command line.
But there might be way to create at least those target and build files
diff --git a/openpype/hosts/unreal/plugins/create/create_camera.py b/openpype/hosts/unreal/plugins/create/create_camera.py
index eda2b52be3..c2905fb6dd 100644
--- a/openpype/hosts/unreal/plugins/create/create_camera.py
+++ b/openpype/hosts/unreal/plugins/create/create_camera.py
@@ -16,7 +16,7 @@ class CreateCamera(Creator):
family = "camera"
icon = "cubes"
- root = "/Game/Avalon/Instances"
+ root = "/Game/OpenPype/Instances"
suffix = "_INS"
def __init__(self, *args, **kwargs):
diff --git a/openpype/hosts/unreal/plugins/create/create_layout.py b/openpype/hosts/unreal/plugins/create/create_layout.py
index 239b72787b..00e83cf433 100644
--- a/openpype/hosts/unreal/plugins/create/create_layout.py
+++ b/openpype/hosts/unreal/plugins/create/create_layout.py
@@ -1,3 +1,4 @@
+# -*- coding: utf-8 -*-
from unreal import EditorLevelLibrary as ell
from openpype.hosts.unreal.api.plugin import Creator
from avalon.unreal import (
@@ -6,7 +7,7 @@ from avalon.unreal import (
class CreateLayout(Creator):
- """Layout output for character rigs"""
+ """Layout output for character rigs."""
name = "layoutMain"
label = "Layout"
diff --git a/openpype/hosts/unreal/plugins/create/create_look.py b/openpype/hosts/unreal/plugins/create/create_look.py
index 7d3913b883..59c40d3e74 100644
--- a/openpype/hosts/unreal/plugins/create/create_look.py
+++ b/openpype/hosts/unreal/plugins/create/create_look.py
@@ -1,10 +1,12 @@
-import unreal
+# -*- coding: utf-8 -*-
+"""Create look in Unreal."""
+import unreal # noqa
from openpype.hosts.unreal.api.plugin import Creator
-from avalon.unreal import pipeline
+from openpype.hosts.unreal.api import pipeline
class CreateLook(Creator):
- """Shader connections defining shape look"""
+ """Shader connections defining shape look."""
name = "unrealLook"
label = "Unreal - Look"
@@ -49,14 +51,14 @@ class CreateLook(Creator):
for material in materials:
name = material.get_editor_property('material_slot_name')
object_path = f"{full_path}/{name}.{name}"
- object = unreal.EditorAssetLibrary.duplicate_loaded_asset(
+ unreal_object = unreal.EditorAssetLibrary.duplicate_loaded_asset(
cube.get_asset(), object_path
)
# Remove the default material of the cube object
- object.get_editor_property('static_materials').pop()
+ unreal_object.get_editor_property('static_materials').pop()
- object.add_material(
+ unreal_object.add_material(
material.get_editor_property('material_interface'))
self.data["members"].append(object_path)
diff --git a/openpype/hosts/unreal/plugins/create/create_staticmeshfbx.py b/openpype/hosts/unreal/plugins/create/create_staticmeshfbx.py
index 4cc67e0f1f..700eac7366 100644
--- a/openpype/hosts/unreal/plugins/create/create_staticmeshfbx.py
+++ b/openpype/hosts/unreal/plugins/create/create_staticmeshfbx.py
@@ -1,12 +1,14 @@
-import unreal
+# -*- coding: utf-8 -*-
+"""Create Static Meshes as FBX geometry."""
+import unreal # noqa
from openpype.hosts.unreal.api.plugin import Creator
-from avalon.unreal import (
+from openpype.hosts.unreal.api.pipeline import (
instantiate,
)
class CreateStaticMeshFBX(Creator):
- """Static FBX geometry"""
+ """Static FBX geometry."""
name = "unrealStaticMeshMain"
label = "Unreal - Static Mesh"
diff --git a/openpype/hosts/unreal/plugins/load/load_alembic_geometrycache.py b/openpype/hosts/unreal/plugins/load/load_alembic_geometrycache.py
index e2023e8b47..3508fe5ed7 100644
--- a/openpype/hosts/unreal/plugins/load/load_alembic_geometrycache.py
+++ b/openpype/hosts/unreal/plugins/load/load_alembic_geometrycache.py
@@ -1,12 +1,16 @@
+# -*- coding: utf-8 -*-
+"""Loader for published alembics."""
import os
-from avalon import api, pipeline
-from avalon.unreal import lib
-from avalon.unreal import pipeline as unreal_pipeline
-import unreal
+from avalon import pipeline
+from openpype.pipeline import get_representation_path
+from openpype.hosts.unreal.api import plugin
+from openpype.hosts.unreal.api import pipeline as unreal_pipeline
+
+import unreal # noqa
-class PointCacheAlembicLoader(api.Loader):
+class PointCacheAlembicLoader(plugin.Loader):
"""Load Point Cache from Alembic"""
families = ["model", "pointcache"]
@@ -56,8 +60,7 @@ class PointCacheAlembicLoader(api.Loader):
return task
def load(self, context, name, namespace, data):
- """
- Load and containerise representation into Content Browser.
+ """Load and containerise representation into Content Browser.
This is two step process. First, import FBX to temporary path and
then call `containerise()` on it - this moves all content to new
@@ -76,10 +79,10 @@ class PointCacheAlembicLoader(api.Loader):
Returns:
list(str): list of container content
- """
- # Create directory for asset and avalon container
- root = "/Game/Avalon/Assets"
+ """
+ # Create directory for asset and OpenPype container
+ root = "/Game/OpenPype/Assets"
asset = context.get('asset').get('name')
suffix = "_CON"
if asset:
@@ -109,7 +112,7 @@ class PointCacheAlembicLoader(api.Loader):
unreal.AssetToolsHelpers.get_asset_tools().import_asset_tasks([task]) # noqa: E501
# Create Asset Container
- lib.create_avalon_container(
+ unreal_pipeline.create_container(
container=container_name, path=asset_dir)
data = {
@@ -138,7 +141,7 @@ class PointCacheAlembicLoader(api.Loader):
def update(self, container, representation):
name = container["asset_name"]
- source_path = api.get_representation_path(representation)
+ source_path = get_representation_path(representation)
destination_path = container["namespace"]
task = self.get_task(source_path, destination_path, name, True)
diff --git a/openpype/hosts/unreal/plugins/load/load_alembic_skeletalmesh.py b/openpype/hosts/unreal/plugins/load/load_alembic_skeletalmesh.py
index b652af0b89..180942de51 100644
--- a/openpype/hosts/unreal/plugins/load/load_alembic_skeletalmesh.py
+++ b/openpype/hosts/unreal/plugins/load/load_alembic_skeletalmesh.py
@@ -1,12 +1,15 @@
+# -*- coding: utf-8 -*-
+"""Load Skeletal Mesh alembics."""
import os
-from avalon import api, pipeline
-from avalon.unreal import lib
-from avalon.unreal import pipeline as unreal_pipeline
-import unreal
+from avalon import pipeline
+from openpype.pipeline import get_representation_path
+from openpype.hosts.unreal.api import plugin
+from openpype.hosts.unreal.api import pipeline as unreal_pipeline
+import unreal # noqa
-class SkeletalMeshAlembicLoader(api.Loader):
+class SkeletalMeshAlembicLoader(plugin.Loader):
"""Load Unreal SkeletalMesh from Alembic"""
families = ["pointcache"]
@@ -16,8 +19,7 @@ class SkeletalMeshAlembicLoader(api.Loader):
color = "orange"
def load(self, context, name, namespace, data):
- """
- Load and containerise representation into Content Browser.
+ """Load and containerise representation into Content Browser.
This is two step process. First, import FBX to temporary path and
then call `containerise()` on it - this moves all content to new
@@ -38,8 +40,8 @@ class SkeletalMeshAlembicLoader(api.Loader):
list(str): list of container content
"""
- # Create directory for asset and avalon container
- root = "/Game/Avalon/Assets"
+ # Create directory for asset and openpype container
+ root = "/Game/OpenPype/Assets"
asset = context.get('asset').get('name')
suffix = "_CON"
if asset:
@@ -74,7 +76,7 @@ class SkeletalMeshAlembicLoader(api.Loader):
unreal.AssetToolsHelpers.get_asset_tools().import_asset_tasks([task]) # noqa: E501
# Create Asset Container
- lib.create_avalon_container(
+ unreal_pipeline.create_container(
container=container_name, path=asset_dir)
data = {
@@ -103,7 +105,7 @@ class SkeletalMeshAlembicLoader(api.Loader):
def update(self, container, representation):
name = container["asset_name"]
- source_path = api.get_representation_path(representation)
+ source_path = get_representation_path(representation)
destination_path = container["namespace"]
task = unreal.AssetImportTask()
diff --git a/openpype/hosts/unreal/plugins/load/load_alembic_staticmesh.py b/openpype/hosts/unreal/plugins/load/load_alembic_staticmesh.py
index ccec31b832..4e00af1d97 100644
--- a/openpype/hosts/unreal/plugins/load/load_alembic_staticmesh.py
+++ b/openpype/hosts/unreal/plugins/load/load_alembic_staticmesh.py
@@ -1,12 +1,15 @@
+# -*- coding: utf-8 -*-
+"""Loader for Static Mesh alembics."""
import os
-from avalon import api, pipeline
-from avalon.unreal import lib
-from avalon.unreal import pipeline as unreal_pipeline
-import unreal
+from avalon import pipeline
+from openpype.pipeline import get_representation_path
+from openpype.hosts.unreal.api import plugin
+from openpype.hosts.unreal.api import pipeline as unreal_pipeline
+import unreal # noqa
-class StaticMeshAlembicLoader(api.Loader):
+class StaticMeshAlembicLoader(plugin.Loader):
"""Load Unreal StaticMesh from Alembic"""
families = ["model"]
@@ -49,8 +52,7 @@ class StaticMeshAlembicLoader(api.Loader):
return task
def load(self, context, name, namespace, data):
- """
- Load and containerise representation into Content Browser.
+ """Load and containerise representation into Content Browser.
This is two step process. First, import FBX to temporary path and
then call `containerise()` on it - this moves all content to new
@@ -69,10 +71,10 @@ class StaticMeshAlembicLoader(api.Loader):
Returns:
list(str): list of container content
- """
- # Create directory for asset and avalon container
- root = "/Game/Avalon/Assets"
+ """
+ # Create directory for asset and OpenPype container
+ root = "/Game/OpenPype/Assets"
asset = context.get('asset').get('name')
suffix = "_CON"
if asset:
@@ -93,7 +95,7 @@ class StaticMeshAlembicLoader(api.Loader):
unreal.AssetToolsHelpers.get_asset_tools().import_asset_tasks([task]) # noqa: E501
# Create Asset Container
- lib.create_avalon_container(
+ unreal_pipeline.create_container(
container=container_name, path=asset_dir)
data = {
@@ -122,7 +124,7 @@ class StaticMeshAlembicLoader(api.Loader):
def update(self, container, representation):
name = container["asset_name"]
- source_path = api.get_representation_path(representation)
+ source_path = get_representation_path(representation)
destination_path = container["namespace"]
task = self.get_task(source_path, destination_path, name, True)
diff --git a/openpype/hosts/unreal/plugins/load/load_animation.py b/openpype/hosts/unreal/plugins/load/load_animation.py
index 20baa30847..8ef81f7851 100644
--- a/openpype/hosts/unreal/plugins/load/load_animation.py
+++ b/openpype/hosts/unreal/plugins/load/load_animation.py
@@ -1,14 +1,17 @@
+# -*- coding: utf-8 -*-
+"""Load FBX with animations."""
import os
import json
-from avalon import api, pipeline
-from avalon.unreal import lib
-from avalon.unreal import pipeline as unreal_pipeline
-import unreal
+from avalon import pipeline
+from openpype.pipeline import get_representation_path
+from openpype.hosts.unreal.api import plugin
+from openpype.hosts.unreal.api import pipeline as unreal_pipeline
+import unreal # noqa
-class AnimationFBXLoader(api.Loader):
- """Load Unreal SkeletalMesh from FBX"""
+class AnimationFBXLoader(plugin.Loader):
+ """Load Unreal SkeletalMesh from FBX."""
families = ["animation"]
label = "Import FBX Animation"
@@ -37,10 +40,10 @@ class AnimationFBXLoader(api.Loader):
Returns:
list(str): list of container content
- """
- # Create directory for asset and avalon container
- root = "/Game/Avalon/Assets"
+ """
+ # Create directory for asset and OpenPype container
+ root = "/Game/OpenPype/Assets"
asset = context.get('asset').get('name')
suffix = "_CON"
if asset:
@@ -62,9 +65,9 @@ class AnimationFBXLoader(api.Loader):
task = unreal.AssetImportTask()
task.options = unreal.FbxImportUI()
- libpath = self.fname.replace("fbx", "json")
+ lib_path = self.fname.replace("fbx", "json")
- with open(libpath, "r") as fp:
+ with open(lib_path, "r") as fp:
data = json.load(fp)
instance_name = data.get("instance_name")
@@ -127,7 +130,7 @@ class AnimationFBXLoader(api.Loader):
unreal.AssetToolsHelpers.get_asset_tools().import_asset_tasks([task])
# Create Asset Container
- lib.create_avalon_container(
+ unreal_pipeline.create_container(
container=container_name, path=asset_dir)
data = {
@@ -171,7 +174,7 @@ class AnimationFBXLoader(api.Loader):
def update(self, container, representation):
name = container["asset_name"]
- source_path = api.get_representation_path(representation)
+ source_path = get_representation_path(representation)
destination_path = container["namespace"]
task = unreal.AssetImportTask()
diff --git a/openpype/hosts/unreal/plugins/load/load_camera.py b/openpype/hosts/unreal/plugins/load/load_camera.py
index b2b25eec73..0de9470ef9 100644
--- a/openpype/hosts/unreal/plugins/load/load_camera.py
+++ b/openpype/hosts/unreal/plugins/load/load_camera.py
@@ -1,12 +1,14 @@
+# -*- coding: utf-8 -*-
+"""Load camera from FBX."""
import os
-from avalon import api, io, pipeline
-from avalon.unreal import lib
-from avalon.unreal import pipeline as unreal_pipeline
-import unreal
+from avalon import io, pipeline
+from openpype.hosts.unreal.api import plugin
+from openpype.hosts.unreal.api import pipeline as unreal_pipeline
+import unreal # noqa
-class CameraLoader(api.Loader):
+class CameraLoader(plugin.Loader):
"""Load Unreal StaticMesh from FBX"""
families = ["camera"]
@@ -38,8 +40,8 @@ class CameraLoader(api.Loader):
list(str): list of container content
"""
- # Create directory for asset and avalon container
- root = "/Game/Avalon/Assets"
+ # Create directory for asset and OpenPype container
+ root = "/Game/OpenPype/Assets"
asset = context.get('asset').get('name')
suffix = "_CON"
if asset:
@@ -109,7 +111,8 @@ class CameraLoader(api.Loader):
)
# Create Asset Container
- lib.create_avalon_container(container=container_name, path=asset_dir)
+ unreal_pipeline.create_container(
+ container=container_name, path=asset_dir)
data = {
"schema": "openpype:container-2.0",
diff --git a/openpype/hosts/unreal/plugins/load/load_layout.py b/openpype/hosts/unreal/plugins/load/load_layout.py
index 19d0b74e3e..19ee179d20 100644
--- a/openpype/hosts/unreal/plugins/load/load_layout.py
+++ b/openpype/hosts/unreal/plugins/load/load_layout.py
@@ -1,3 +1,5 @@
+# -*- coding: utf-8 -*-
+"""Loader for layouts."""
import os
import json
from pathlib import Path
@@ -9,12 +11,18 @@ from unreal import AssetToolsHelpers
from unreal import FBXImportType
from unreal import MathLibrary as umath
-from avalon import api, pipeline
-from avalon.unreal import lib
-from avalon.unreal import pipeline as unreal_pipeline
+from avalon.pipeline import AVALON_CONTAINER_ID
+from openpype.pipeline import (
+ discover_loader_plugins,
+ loaders_from_representation,
+ load_container,
+ get_representation_path,
+)
+from openpype.hosts.unreal.api import plugin
+from openpype.hosts.unreal.api import pipeline as unreal_pipeline
-class LayoutLoader(api.Loader):
+class LayoutLoader(plugin.Loader):
"""Load Layout from a JSON file"""
families = ["layout"]
@@ -23,6 +31,7 @@ class LayoutLoader(api.Loader):
label = "Load Layout"
icon = "code-fork"
color = "orange"
+ ASSET_ROOT = "/Game/OpenPype/Assets"
def _get_asset_containers(self, path):
ar = unreal.AssetRegistryHelpers.get_asset_registry()
@@ -40,7 +49,8 @@ class LayoutLoader(api.Loader):
return asset_containers
- def _get_fbx_loader(self, loaders, family):
+ @staticmethod
+ def _get_fbx_loader(loaders, family):
name = ""
if family == 'rig':
name = "SkeletalMeshFBXLoader"
@@ -58,7 +68,8 @@ class LayoutLoader(api.Loader):
return None
- def _get_abc_loader(self, loaders, family):
+ @staticmethod
+ def _get_abc_loader(loaders, family):
name = ""
if family == 'rig':
name = "SkeletalMeshAlembicLoader"
@@ -74,14 +85,15 @@ class LayoutLoader(api.Loader):
return None
- def _process_family(self, assets, classname, transform, inst_name=None):
+ @staticmethod
+ def _process_family(assets, class_name, transform, inst_name=None):
ar = unreal.AssetRegistryHelpers.get_asset_registry()
actors = []
for asset in assets:
obj = ar.get_asset_by_object_path(asset).get_asset()
- if obj.get_class().get_name() == classname:
+ if obj.get_class().get_name() == class_name:
actor = EditorLevelLibrary.spawn_actor_from_object(
obj,
transform.get('translation')
@@ -111,8 +123,9 @@ class LayoutLoader(api.Loader):
return actors
+ @staticmethod
def _import_animation(
- self, asset_dir, path, instance_name, skeleton, actors_dict,
+ asset_dir, path, instance_name, skeleton, actors_dict,
animation_file):
anim_file = Path(animation_file)
anim_file_name = anim_file.with_suffix('')
@@ -192,18 +205,18 @@ class LayoutLoader(api.Loader):
actor.skeletal_mesh_component.animation_data.set_editor_property(
'anim_to_play', animation)
- def _process(self, libpath, asset_dir, loaded=None):
+ def _process(self, lib_path, asset_dir, loaded=None):
ar = unreal.AssetRegistryHelpers.get_asset_registry()
- with open(libpath, "r") as fp:
+ with open(lib_path, "r") as fp:
data = json.load(fp)
- all_loaders = api.discover(api.Loader)
+ all_loaders = discover_loader_plugins()
if not loaded:
loaded = []
- path = Path(libpath)
+ path = Path(lib_path)
skeleton_dict = {}
actors_dict = {}
@@ -228,7 +241,7 @@ class LayoutLoader(api.Loader):
loaded.append(reference)
family = element.get('family')
- loaders = api.loaders_from_representation(
+ loaders = loaders_from_representation(
all_loaders, reference)
loader = None
@@ -245,7 +258,7 @@ class LayoutLoader(api.Loader):
"asset_dir": asset_dir
}
- assets = api.load(
+ assets = load_container(
loader,
reference,
namespace=instance_name,
@@ -292,17 +305,18 @@ class LayoutLoader(api.Loader):
asset_dir, path, instance_name, skeleton,
actors_dict, animation_file)
- def _remove_family(self, assets, components, classname, propname):
+ @staticmethod
+ def _remove_family(assets, components, class_name, prop_name):
ar = unreal.AssetRegistryHelpers.get_asset_registry()
objects = []
for a in assets:
obj = ar.get_asset_by_object_path(a)
- if obj.get_asset().get_class().get_name() == classname:
+ if obj.get_asset().get_class().get_name() == class_name:
objects.append(obj)
for obj in objects:
for comp in components:
- if comp.get_editor_property(propname) == obj.get_asset():
+ if comp.get_editor_property(prop_name) == obj.get_asset():
comp.get_owner().destroy_actor()
def _remove_actors(self, path):
@@ -334,8 +348,7 @@ class LayoutLoader(api.Loader):
assets, skel_meshes_comp, 'SkeletalMesh', 'skeletal_mesh')
def load(self, context, name, namespace, options):
- """
- Load and containerise representation into Content Browser.
+ """Load and containerise representation into Content Browser.
This is two step process. First, import FBX to temporary path and
then call `containerise()` on it - this moves all content to new
@@ -349,14 +362,14 @@ class LayoutLoader(api.Loader):
This is not passed here, so namespace is set
by `containerise()` because only then we know
real path.
- data (dict): Those would be data to be imprinted. This is not used
- now, data are imprinted by `containerise()`.
+ options (dict): Those would be data to be imprinted. This is not
+ used now, data are imprinted by `containerise()`.
Returns:
list(str): list of container content
"""
# Create directory for asset and avalon container
- root = "/Game/Avalon/Assets"
+ root = self.ASSET_ROOT
asset = context.get('asset').get('name')
suffix = "_CON"
if asset:
@@ -375,12 +388,12 @@ class LayoutLoader(api.Loader):
self._process(self.fname, asset_dir)
# Create Asset Container
- lib.create_avalon_container(
+ unreal_pipeline.create_container(
container=container_name, path=asset_dir)
data = {
"schema": "openpype:container-2.0",
- "id": pipeline.AVALON_CONTAINER_ID,
+ "id": AVALON_CONTAINER_ID,
"asset": asset,
"namespace": asset_dir,
"container_name": container_name,
@@ -404,9 +417,9 @@ class LayoutLoader(api.Loader):
def update(self, container, representation):
ar = unreal.AssetRegistryHelpers.get_asset_registry()
- source_path = api.get_representation_path(representation)
+ source_path = get_representation_path(representation)
destination_path = container["namespace"]
- libpath = Path(api.get_representation_path(representation))
+ lib_path = Path(get_representation_path(representation))
self._remove_actors(destination_path)
@@ -502,7 +515,7 @@ class LayoutLoader(api.Loader):
if animation_file and skeleton:
self._import_animation(
- destination_path, libpath,
+ destination_path, lib_path,
instance_name, skeleton,
actors_dict, animation_file)
diff --git a/openpype/hosts/unreal/plugins/load/load_rig.py b/openpype/hosts/unreal/plugins/load/load_rig.py
index c7d095aa21..3d5616364c 100644
--- a/openpype/hosts/unreal/plugins/load/load_rig.py
+++ b/openpype/hosts/unreal/plugins/load/load_rig.py
@@ -1,13 +1,16 @@
+# -*- coding: utf-8 -*-
+"""Load Skeletal Meshes form FBX."""
import os
-from avalon import api, pipeline
-from avalon.unreal import lib
-from avalon.unreal import pipeline as unreal_pipeline
-import unreal
+from avalon import pipeline
+from openpype.pipeline import get_representation_path
+from openpype.hosts.unreal.api import plugin
+from openpype.hosts.unreal.api import pipeline as unreal_pipeline
+import unreal # noqa
-class SkeletalMeshFBXLoader(api.Loader):
- """Load Unreal SkeletalMesh from FBX"""
+class SkeletalMeshFBXLoader(plugin.Loader):
+ """Load Unreal SkeletalMesh from FBX."""
families = ["rig"]
label = "Import FBX Skeletal Mesh"
@@ -16,8 +19,7 @@ class SkeletalMeshFBXLoader(api.Loader):
color = "orange"
def load(self, context, name, namespace, options):
- """
- Load and containerise representation into Content Browser.
+ """Load and containerise representation into Content Browser.
This is two step process. First, import FBX to temporary path and
then call `containerise()` on it - this moves all content to new
@@ -31,15 +33,15 @@ class SkeletalMeshFBXLoader(api.Loader):
This is not passed here, so namespace is set
by `containerise()` because only then we know
real path.
- data (dict): Those would be data to be imprinted. This is not used
- now, data are imprinted by `containerise()`.
+ options (dict): Those would be data to be imprinted. This is not
+ used now, data are imprinted by `containerise()`.
Returns:
list(str): list of container content
- """
- # Create directory for asset and avalon container
- root = "/Game/Avalon/Assets"
+ """
+ # Create directory for asset and OpenPype container
+ root = "/Game/OpenPype/Assets"
if options and options.get("asset_dir"):
root = options["asset_dir"]
asset = context.get('asset').get('name')
@@ -94,7 +96,7 @@ class SkeletalMeshFBXLoader(api.Loader):
unreal.AssetToolsHelpers.get_asset_tools().import_asset_tasks([task]) # noqa: E501
# Create Asset Container
- lib.create_avalon_container(
+ unreal_pipeline.create_container(
container=container_name, path=asset_dir)
data = {
@@ -123,7 +125,7 @@ class SkeletalMeshFBXLoader(api.Loader):
def update(self, container, representation):
name = container["asset_name"]
- source_path = api.get_representation_path(representation)
+ source_path = get_representation_path(representation)
destination_path = container["namespace"]
task = unreal.AssetImportTask()
diff --git a/openpype/hosts/unreal/plugins/load/load_staticmeshfbx.py b/openpype/hosts/unreal/plugins/load/load_staticmeshfbx.py
index 510c4331ad..587fc83a77 100644
--- a/openpype/hosts/unreal/plugins/load/load_staticmeshfbx.py
+++ b/openpype/hosts/unreal/plugins/load/load_staticmeshfbx.py
@@ -1,13 +1,16 @@
+# -*- coding: utf-8 -*-
+"""Load Static meshes form FBX."""
import os
-from avalon import api, pipeline
-from avalon.unreal import lib
-from avalon.unreal import pipeline as unreal_pipeline
-import unreal
+from avalon import pipeline
+from openpype.pipeline import get_representation_path
+from openpype.hosts.unreal.api import plugin
+from openpype.hosts.unreal.api import pipeline as unreal_pipeline
+import unreal # noqa
-class StaticMeshFBXLoader(api.Loader):
- """Load Unreal StaticMesh from FBX"""
+class StaticMeshFBXLoader(plugin.Loader):
+ """Load Unreal StaticMesh from FBX."""
families = ["model", "unrealStaticMesh"]
label = "Import FBX Static Mesh"
@@ -15,7 +18,8 @@ class StaticMeshFBXLoader(api.Loader):
icon = "cube"
color = "orange"
- def get_task(self, filename, asset_dir, asset_name, replace):
+ @staticmethod
+ def get_task(filename, asset_dir, asset_name, replace):
task = unreal.AssetImportTask()
options = unreal.FbxImportUI()
import_data = unreal.FbxStaticMeshImportData()
@@ -41,8 +45,7 @@ class StaticMeshFBXLoader(api.Loader):
return task
def load(self, context, name, namespace, options):
- """
- Load and containerise representation into Content Browser.
+ """Load and containerise representation into Content Browser.
This is two step process. First, import FBX to temporary path and
then call `containerise()` on it - this moves all content to new
@@ -56,15 +59,15 @@ class StaticMeshFBXLoader(api.Loader):
This is not passed here, so namespace is set
by `containerise()` because only then we know
real path.
- data (dict): Those would be data to be imprinted. This is not used
- now, data are imprinted by `containerise()`.
+ options (dict): Those would be data to be imprinted. This is not
+ used now, data are imprinted by `containerise()`.
Returns:
list(str): list of container content
"""
- # Create directory for asset and avalon container
- root = "/Game/Avalon/Assets"
+ # Create directory for asset and OpenPype container
+ root = "/Game/OpenPype/Assets"
if options and options.get("asset_dir"):
root = options["asset_dir"]
asset = context.get('asset').get('name')
@@ -87,7 +90,7 @@ class StaticMeshFBXLoader(api.Loader):
unreal.AssetToolsHelpers.get_asset_tools().import_asset_tasks([task]) # noqa: E501
# Create Asset Container
- lib.create_avalon_container(
+ unreal_pipeline.create_container(
container=container_name, path=asset_dir)
data = {
@@ -116,7 +119,7 @@ class StaticMeshFBXLoader(api.Loader):
def update(self, container, representation):
name = container["asset_name"]
- source_path = api.get_representation_path(representation)
+ source_path = get_representation_path(representation)
destination_path = container["namespace"]
task = self.get_task(source_path, destination_path, name, True)
diff --git a/openpype/hosts/unreal/plugins/publish/collect_current_file.py b/openpype/hosts/unreal/plugins/publish/collect_current_file.py
index 4e828933bb..acd4c5c8d2 100644
--- a/openpype/hosts/unreal/plugins/publish/collect_current_file.py
+++ b/openpype/hosts/unreal/plugins/publish/collect_current_file.py
@@ -1,17 +1,18 @@
-import unreal
-
+# -*- coding: utf-8 -*-
+"""Collect current project path."""
+import unreal # noqa
import pyblish.api
class CollectUnrealCurrentFile(pyblish.api.ContextPlugin):
- """Inject the current working file into context"""
+ """Inject the current working file into context."""
order = pyblish.api.CollectorOrder - 0.5
label = "Unreal Current File"
hosts = ['unreal']
def process(self, context):
- """Inject the current working file"""
+ """Inject the current working file."""
current_file = unreal.Paths.get_project_file_path()
context.data['currentFile'] = current_file
diff --git a/openpype/hosts/unreal/plugins/publish/collect_instances.py b/openpype/hosts/unreal/plugins/publish/collect_instances.py
index 62676f9938..94e732d728 100644
--- a/openpype/hosts/unreal/plugins/publish/collect_instances.py
+++ b/openpype/hosts/unreal/plugins/publish/collect_instances.py
@@ -1,12 +1,14 @@
+# -*- coding: utf-8 -*-
+"""Collect publishable instances in Unreal."""
import ast
-import unreal
+import unreal # noqa
import pyblish.api
class CollectInstances(pyblish.api.ContextPlugin):
- """Gather instances by AvalonPublishInstance class
+ """Gather instances by OpenPypePublishInstance class
- This collector finds all paths containing `AvalonPublishInstance` class
+ This collector finds all paths containing `OpenPypePublishInstance` class
asset
Identifier:
@@ -22,7 +24,7 @@ class CollectInstances(pyblish.api.ContextPlugin):
ar = unreal.AssetRegistryHelpers.get_asset_registry()
instance_containers = ar.get_assets_by_class(
- "AvalonPublishInstance", True)
+ "OpenPypePublishInstance", True)
for container_data in instance_containers:
asset = container_data.get_asset()
diff --git a/openpype/hosts/unreal/plugins/publish/extract_camera.py b/openpype/hosts/unreal/plugins/publish/extract_camera.py
index 10862fc0ef..ce53824563 100644
--- a/openpype/hosts/unreal/plugins/publish/extract_camera.py
+++ b/openpype/hosts/unreal/plugins/publish/extract_camera.py
@@ -1,3 +1,5 @@
+# -*- coding: utf-8 -*-
+"""Extract camera from Unreal."""
import os
import unreal
@@ -17,7 +19,7 @@ class ExtractCamera(openpype.api.Extractor):
def process(self, instance):
# Define extract output file path
- stagingdir = self.staging_dir(instance)
+ staging_dir = self.staging_dir(instance)
fbx_filename = "{}.fbx".format(instance.name)
# Perform extraction
@@ -38,7 +40,7 @@ class ExtractCamera(openpype.api.Extractor):
sequence,
sequence.get_bindings(),
unreal.FbxExportOption(),
- os.path.join(stagingdir, fbx_filename)
+ os.path.join(staging_dir, fbx_filename)
)
break
@@ -49,6 +51,6 @@ class ExtractCamera(openpype.api.Extractor):
'name': 'fbx',
'ext': 'fbx',
'files': fbx_filename,
- "stagingDir": stagingdir,
+ "stagingDir": staging_dir,
}
instance.data["representations"].append(fbx_representation)
diff --git a/openpype/hosts/unreal/plugins/publish/extract_layout.py b/openpype/hosts/unreal/plugins/publish/extract_layout.py
index a47187cf47..2d09b0e7bd 100644
--- a/openpype/hosts/unreal/plugins/publish/extract_layout.py
+++ b/openpype/hosts/unreal/plugins/publish/extract_layout.py
@@ -1,3 +1,4 @@
+# -*- coding: utf-8 -*-
import os
import json
import math
@@ -20,7 +21,7 @@ class ExtractLayout(openpype.api.Extractor):
def process(self, instance):
# Define extract output file path
- stagingdir = self.staging_dir(instance)
+ staging_dir = self.staging_dir(instance)
# Perform extraction
self.log.info("Performing extraction..")
@@ -96,7 +97,7 @@ class ExtractLayout(openpype.api.Extractor):
json_data.append(json_element)
json_filename = "{}.json".format(instance.name)
- json_path = os.path.join(stagingdir, json_filename)
+ json_path = os.path.join(staging_dir, json_filename)
with open(json_path, "w+") as file:
json.dump(json_data, fp=file, indent=2)
@@ -108,6 +109,6 @@ class ExtractLayout(openpype.api.Extractor):
'name': 'json',
'ext': 'json',
'files': json_filename,
- "stagingDir": stagingdir,
+ "stagingDir": staging_dir,
}
instance.data["representations"].append(json_representation)
diff --git a/openpype/hosts/unreal/plugins/publish/extract_look.py b/openpype/hosts/unreal/plugins/publish/extract_look.py
index 0f1539a7d5..ea39949417 100644
--- a/openpype/hosts/unreal/plugins/publish/extract_look.py
+++ b/openpype/hosts/unreal/plugins/publish/extract_look.py
@@ -1,3 +1,4 @@
+# -*- coding: utf-8 -*-
import json
import os
@@ -17,7 +18,7 @@ class ExtractLook(openpype.api.Extractor):
def process(self, instance):
# Define extract output file path
- stagingdir = self.staging_dir(instance)
+ staging_dir = self.staging_dir(instance)
resources_dir = instance.data["resourcesDir"]
ar = unreal.AssetRegistryHelpers.get_asset_registry()
@@ -57,7 +58,7 @@ class ExtractLook(openpype.api.Extractor):
tga_export_task.set_editor_property('automated', True)
tga_export_task.set_editor_property('object', texture)
tga_export_task.set_editor_property(
- 'filename', f"{stagingdir}/{tga_filename}")
+ 'filename', f"{staging_dir}/{tga_filename}")
tga_export_task.set_editor_property('prompt', False)
tga_export_task.set_editor_property('selected', False)
@@ -66,7 +67,7 @@ class ExtractLook(openpype.api.Extractor):
json_element['tga_filename'] = tga_filename
transfers.append((
- f"{stagingdir}/{tga_filename}",
+ f"{staging_dir}/{tga_filename}",
f"{resources_dir}/{tga_filename}"))
fbx_filename = f"{instance.name}_{name}.fbx"
@@ -84,7 +85,7 @@ class ExtractLook(openpype.api.Extractor):
task.set_editor_property('automated', True)
task.set_editor_property('object', object)
task.set_editor_property(
- 'filename', f"{stagingdir}/{fbx_filename}")
+ 'filename', f"{staging_dir}/{fbx_filename}")
task.set_editor_property('prompt', False)
task.set_editor_property('selected', False)
@@ -93,13 +94,13 @@ class ExtractLook(openpype.api.Extractor):
json_element['fbx_filename'] = fbx_filename
transfers.append((
- f"{stagingdir}/{fbx_filename}",
+ f"{staging_dir}/{fbx_filename}",
f"{resources_dir}/{fbx_filename}"))
json_data.append(json_element)
json_filename = f"{instance.name}.json"
- json_path = os.path.join(stagingdir, json_filename)
+ json_path = os.path.join(staging_dir, json_filename)
with open(json_path, "w+") as file:
json.dump(json_data, fp=file, indent=2)
@@ -113,7 +114,7 @@ class ExtractLook(openpype.api.Extractor):
'name': 'json',
'ext': 'json',
'files': json_filename,
- "stagingDir": stagingdir,
+ "stagingDir": staging_dir,
}
instance.data["representations"].append(json_representation)
diff --git a/openpype/hosts/webpublisher/api/__init__.py b/openpype/hosts/webpublisher/api/__init__.py
index e40d46d662..dbeb628073 100644
--- a/openpype/hosts/webpublisher/api/__init__.py
+++ b/openpype/hosts/webpublisher/api/__init__.py
@@ -12,30 +12,19 @@ HOST_DIR = os.path.dirname(os.path.abspath(
openpype.hosts.webpublisher.__file__))
PLUGINS_DIR = os.path.join(HOST_DIR, "plugins")
PUBLISH_PATH = os.path.join(PLUGINS_DIR, "publish")
-LOAD_PATH = os.path.join(PLUGINS_DIR, "load")
-CREATE_PATH = os.path.join(PLUGINS_DIR, "create")
-
-
-def application_launch():
- pass
def install():
print("Installing Pype config...")
pyblish.register_plugin_path(PUBLISH_PATH)
- avalon.register_plugin_path(avalon.Loader, LOAD_PATH)
- avalon.register_plugin_path(avalon.Creator, CREATE_PATH)
log.info(PUBLISH_PATH)
io.install()
- avalon.on("application.launched", application_launch)
def uninstall():
pyblish.deregister_plugin_path(PUBLISH_PATH)
- avalon.deregister_plugin_path(avalon.Loader, LOAD_PATH)
- avalon.deregister_plugin_path(avalon.Creator, CREATE_PATH)
# to have required methods for interface
diff --git a/openpype/hosts/webpublisher/plugins/publish/collect_published_files.py b/openpype/hosts/webpublisher/plugins/publish/collect_published_files.py
index abad14106f..65cef14703 100644
--- a/openpype/hosts/webpublisher/plugins/publish/collect_published_files.py
+++ b/openpype/hosts/webpublisher/plugins/publish/collect_published_files.py
@@ -10,14 +10,22 @@ Provides:
import os
import clique
import tempfile
+import math
+
from avalon import io
import pyblish.api
-from openpype.lib import prepare_template_data
+from openpype.lib import (
+ prepare_template_data,
+ get_asset,
+ get_ffprobe_streams,
+ convert_ffprobe_fps_value,
+)
from openpype.lib.plugin_tools import (
parse_json,
get_subset_name_with_asset_doc
)
+
class CollectPublishedFiles(pyblish.api.ContextPlugin):
"""
This collector will try to find json files in provided
@@ -49,10 +57,7 @@ class CollectPublishedFiles(pyblish.api.ContextPlugin):
self.log.info("task_sub:: {}".format(task_subfolders))
asset_name = context.data["asset"]
- asset_doc = io.find_one({
- "type": "asset",
- "name": asset_name
- })
+ asset_doc = get_asset()
task_name = context.data["task"]
task_type = context.data["taskType"]
project_name = context.data["project_name"]
@@ -97,11 +102,27 @@ class CollectPublishedFiles(pyblish.api.ContextPlugin):
instance.data["frameEnd"] = \
instance.data["representations"][0]["frameEnd"]
else:
- instance.data["frameStart"] = 0
- instance.data["frameEnd"] = 1
+ frame_start = asset_doc["data"]["frameStart"]
+ instance.data["frameStart"] = frame_start
+ instance.data["frameEnd"] = asset_doc["data"]["frameEnd"]
instance.data["representations"] = self._get_single_repre(
task_dir, task_data["files"], tags
)
+ file_url = os.path.join(task_dir, task_data["files"][0])
+ no_of_frames = self._get_number_of_frames(file_url)
+ if no_of_frames:
+ try:
+ frame_end = int(frame_start) + math.ceil(no_of_frames)
+ instance.data["frameEnd"] = math.ceil(frame_end) - 1
+ self.log.debug("frameEnd:: {}".format(
+ instance.data["frameEnd"]))
+ except ValueError:
+ self.log.warning("Unable to count frames "
+ "duration {}".format(no_of_frames))
+
+ # raise ValueError("STOP")
+ instance.data["handleStart"] = asset_doc["data"]["handleStart"]
+ instance.data["handleEnd"] = asset_doc["data"]["handleEnd"]
self.log.info("instance.data:: {}".format(instance.data))
@@ -127,7 +148,7 @@ class CollectPublishedFiles(pyblish.api.ContextPlugin):
return [repre_data]
def _process_sequence(self, files, task_dir, tags):
- """Prepare reprentations for sequence of files."""
+ """Prepare representation for sequence of files."""
collections, remainder = clique.assemble(files)
assert len(collections) == 1, \
"Too many collections in {}".format(files)
@@ -188,6 +209,7 @@ class CollectPublishedFiles(pyblish.api.ContextPlugin):
msg = "No family found for combination of " +\
"task_type: {}, is_sequence:{}, extension: {}".format(
task_type, is_sequence, extension)
+ found_family = "render"
assert found_family, msg
return (found_family,
@@ -243,3 +265,43 @@ class CollectPublishedFiles(pyblish.api.ContextPlugin):
return version[0].get("version") or 0
else:
return 0
+
+ def _get_number_of_frames(self, file_url):
+ """Return duration in frames"""
+ try:
+ streams = get_ffprobe_streams(file_url, self.log)
+ except Exception as exc:
+ raise AssertionError((
+ "FFprobe couldn't read information about input file: \"{}\"."
+ " Error message: {}"
+ ).format(file_url, str(exc)))
+
+ first_video_stream = None
+ for stream in streams:
+ if "width" in stream and "height" in stream:
+ first_video_stream = stream
+ break
+
+ if first_video_stream:
+ nb_frames = stream.get("nb_frames")
+ if nb_frames:
+ try:
+ return int(nb_frames)
+ except ValueError:
+ self.log.warning(
+ "nb_frames {} not convertible".format(nb_frames))
+
+ duration = stream.get("duration")
+ frame_rate = convert_ffprobe_fps_value(
+ stream.get("r_frame_rate", '0/0')
+ )
+ self.log.debug("duration:: {} frame_rate:: {}".format(
+ duration, frame_rate))
+ try:
+ return float(duration) * float(frame_rate)
+ except ValueError:
+ self.log.warning(
+ "{} or {} cannot be converted".format(duration,
+ frame_rate))
+
+ self.log.warning("Cannot get number of frames")
diff --git a/openpype/lib/__init__.py b/openpype/lib/__init__.py
index ebe7648ad7..b8502ae718 100644
--- a/openpype/lib/__init__.py
+++ b/openpype/lib/__init__.py
@@ -16,6 +16,19 @@ sys.path.insert(0, python_version_dir)
site.addsitedir(python_version_dir)
+from .events import (
+ emit_event,
+ register_event_callback
+)
+
+from .vendor_bin_utils import (
+ find_executable,
+ get_vendor_bin_path,
+ get_oiio_tools_path,
+ get_ffmpeg_tool_path,
+ is_oiio_supported
+)
+
from .env_tools import (
env_value_to_bool,
get_paths_from_environ,
@@ -29,30 +42,35 @@ from .execute import (
get_linux_launcher_args,
execute,
run_subprocess,
+ run_detached_process,
run_openpype_process,
clean_envs_for_openpype_process,
path_to_subprocess_arg,
CREATE_NO_WINDOW
)
from .log import PypeLogger, timeit
+
+from .path_templates import (
+ merge_dict,
+ TemplateMissingKey,
+ TemplateUnsolved,
+ StringTemplate,
+ TemplatesDict,
+ FormatObject,
+)
+
from .mongo import (
get_default_components,
validate_mongo_connection,
OpenPypeMongoConnection
)
from .anatomy import (
- merge_dict,
Anatomy
)
-from .config import get_datetime_data
-
-from .vendor_bin_utils import (
- get_vendor_bin_path,
- get_oiio_tools_path,
- get_ffmpeg_tool_path,
- ffprobe_streams,
- is_oiio_supported
+from .config import (
+ get_datetime_data,
+ get_formatted_current_time
)
from .python_module_tools import (
@@ -71,7 +89,12 @@ from .profiles_filtering import (
from .transcoding import (
get_transcode_temp_directory,
should_convert_for_ffmpeg,
- convert_for_ffmpeg
+ convert_for_ffmpeg,
+ get_ffprobe_data,
+ get_ffprobe_streams,
+ get_ffmpeg_codec_args,
+ get_ffmpeg_format_args,
+ convert_ffprobe_fps_value,
)
from .avalon_context import (
CURRENT_DOC_SCHEMAS,
@@ -130,7 +153,7 @@ from .applications import (
PostLaunchHook,
EnvironmentPrepData,
- prepare_host_environments,
+ prepare_app_environments,
prepare_context_environments,
get_app_environments_for_context,
apply_project_environments_value
@@ -149,6 +172,7 @@ from .plugin_tools import (
)
from .path_tools import (
+ create_hard_link,
version_up,
get_version_from_path,
get_last_version_from_path,
@@ -183,11 +207,16 @@ from .openpype_version import (
terminal = Terminal
__all__ = [
+ "emit_event",
+ "register_event_callback",
+
+ "find_executable",
"get_openpype_execute_args",
"get_pype_execute_args",
"get_linux_launcher_args",
"execute",
"run_subprocess",
+ "run_detached_process",
"run_openpype_process",
"clean_envs_for_openpype_process",
"path_to_subprocess_arg",
@@ -200,7 +229,6 @@ __all__ = [
"get_vendor_bin_path",
"get_oiio_tools_path",
"get_ffmpeg_tool_path",
- "ffprobe_streams",
"is_oiio_supported",
"import_filepath",
@@ -212,6 +240,11 @@ __all__ = [
"get_transcode_temp_directory",
"should_convert_for_ffmpeg",
"convert_for_ffmpeg",
+ "get_ffprobe_data",
+ "get_ffprobe_streams",
+ "get_ffmpeg_codec_args",
+ "get_ffmpeg_format_args",
+ "convert_ffprobe_fps_value",
"CURRENT_DOC_SCHEMAS",
"PROJECT_NAME_ALLOWED_SYMBOLS",
@@ -261,7 +294,7 @@ __all__ = [
"PreLaunchHook",
"PostLaunchHook",
"EnvironmentPrepData",
- "prepare_host_environments",
+ "prepare_app_environments",
"prepare_context_environments",
"get_app_environments_for_context",
"apply_project_environments_value",
@@ -279,16 +312,24 @@ __all__ = [
"get_unique_layer_name",
"get_background_layers",
+ "create_hard_link",
"version_up",
"get_version_from_path",
"get_last_version_from_path",
+ "merge_dict",
+ "TemplateMissingKey",
+ "TemplateUnsolved",
+ "StringTemplate",
+ "TemplatesDict",
+ "FormatObject",
+
"terminal",
- "merge_dict",
"Anatomy",
"get_datetime_data",
+ "get_formatted_current_time",
"PypeLogger",
"get_default_components",
diff --git a/openpype/lib/abstract_collect_render.py b/openpype/lib/abstract_collect_render.py
index 3839aad45d..7c768e280c 100644
--- a/openpype/lib/abstract_collect_render.py
+++ b/openpype/lib/abstract_collect_render.py
@@ -26,7 +26,7 @@ class RenderInstance(object):
# metadata
version = attr.ib() # instance version
- time = attr.ib() # time of instance creation (avalon.api.time())
+ time = attr.ib() # time of instance creation (get_formatted_current_time)
source = attr.ib() # path to source scene file
label = attr.ib() # label to show in GUI
subset = attr.ib() # subset name
diff --git a/openpype/lib/anatomy.py b/openpype/lib/anatomy.py
index fa81a18ff7..3fbc05ee88 100644
--- a/openpype/lib/anatomy.py
+++ b/openpype/lib/anatomy.py
@@ -9,6 +9,12 @@ from openpype.settings.lib import (
get_default_anatomy_settings,
get_anatomy_settings
)
+from .path_templates import (
+ TemplateUnsolved,
+ TemplateResult,
+ TemplatesDict,
+ FormatObject,
+)
from .log import PypeLogger
log = PypeLogger().get_logger(__name__)
@@ -19,32 +25,6 @@ except NameError:
StringType = str
-def merge_dict(main_dict, enhance_dict):
- """Merges dictionaries by keys.
-
- Function call itself if value on key is again dictionary.
-
- Args:
- main_dict (dict): First dict to merge second one into.
- enhance_dict (dict): Second dict to be merged.
-
- Returns:
- dict: Merged result.
-
- .. note:: does not overrides whole value on first found key
- but only values differences from enhance_dict
-
- """
- for key, value in enhance_dict.items():
- if key not in main_dict:
- main_dict[key] = value
- elif isinstance(value, dict) and isinstance(main_dict[key], dict):
- main_dict[key] = merge_dict(main_dict[key], value)
- else:
- main_dict[key] = value
- return main_dict
-
-
class ProjectNotSet(Exception):
"""Exception raised when is created Anatomy without project name."""
@@ -59,7 +39,7 @@ class RootCombinationError(Exception):
# TODO better error message
msg = (
"Combination of root with and"
- " without root name in Templates. {}"
+ " without root name in AnatomyTemplates. {}"
).format(joined_roots)
super(RootCombinationError, self).__init__(msg)
@@ -68,7 +48,7 @@ class RootCombinationError(Exception):
class Anatomy:
"""Anatomy module helps to keep project settings.
- Wraps key project specifications, Templates and Roots.
+ Wraps key project specifications, AnatomyTemplates and Roots.
Args:
project_name (str): Project name to look on overrides.
@@ -93,7 +73,7 @@ class Anatomy:
get_anatomy_settings(project_name, site_name)
)
self._site_name = site_name
- self._templates_obj = Templates(self)
+ self._templates_obj = AnatomyTemplates(self)
self._roots_obj = Roots(self)
# Anatomy used as dictionary
@@ -158,12 +138,12 @@ class Anatomy:
@property
def templates(self):
- """Wrap property `templates` of Anatomy's Templates instance."""
+ """Wrap property `templates` of Anatomy's AnatomyTemplates instance."""
return self._templates_obj.templates
@property
def templates_obj(self):
- """Return `Templates` object of current Anatomy instance."""
+ """Return `AnatomyTemplates` object of current Anatomy instance."""
return self._templates_obj
def format(self, *args, **kwargs):
@@ -375,203 +355,45 @@ class Anatomy:
return rootless_path.format(**data)
-class TemplateMissingKey(Exception):
- """Exception for cases when key does not exist in Anatomy."""
-
- msg = "Anatomy key does not exist: `anatomy{0}`."
-
- def __init__(self, parents):
- parent_join = "".join(["[\"{0}\"]".format(key) for key in parents])
- super(TemplateMissingKey, self).__init__(
- self.msg.format(parent_join)
- )
-
-
-class TemplateUnsolved(Exception):
+class AnatomyTemplateUnsolved(TemplateUnsolved):
"""Exception for unsolved template when strict is set to True."""
msg = "Anatomy template \"{0}\" is unsolved.{1}{2}"
- invalid_types_msg = " Keys with invalid DataType: `{0}`."
- missing_keys_msg = " Missing keys: \"{0}\"."
- def __init__(self, template, missing_keys, invalid_types):
- invalid_type_items = []
- for _key, _type in invalid_types.items():
- invalid_type_items.append(
- "\"{0}\" {1}".format(_key, str(_type))
- )
- invalid_types_msg = ""
- if invalid_type_items:
- invalid_types_msg = self.invalid_types_msg.format(
- ", ".join(invalid_type_items)
- )
+class AnatomyTemplateResult(TemplateResult):
+ rootless = None
- missing_keys_msg = ""
- if missing_keys:
- missing_keys_msg = self.missing_keys_msg.format(
- ", ".join(missing_keys)
- )
- super(TemplateUnsolved, self).__init__(
- self.msg.format(template, missing_keys_msg, invalid_types_msg)
+ def __new__(cls, result, rootless_path):
+ new_obj = super(AnatomyTemplateResult, cls).__new__(
+ cls,
+ str(result),
+ result.template,
+ result.solved,
+ result.used_values,
+ result.missing_keys,
+ result.invalid_types
)
-
-
-class TemplateResult(str):
- """Result (formatted template) of anatomy with most of information in.
-
- Args:
- used_values (dict): Dictionary of template filling data with
- only used keys.
- solved (bool): For check if all required keys were filled.
- template (str): Original template.
- missing_keys (list): Missing keys that were not in the data. Include
- missing optional keys.
- invalid_types (dict): When key was found in data, but value had not
- allowed DataType. Allowed data types are `numbers`,
- `str`(`basestring`) and `dict`. Dictionary may cause invalid type
- when value of key in data is dictionary but template expect string
- of number.
- """
-
- def __new__(
- cls, filled_template, template, solved, rootless_path,
- used_values, missing_keys, invalid_types
- ):
- new_obj = super(TemplateResult, cls).__new__(cls, filled_template)
- new_obj.used_values = used_values
- new_obj.solved = solved
- new_obj.template = template
new_obj.rootless = rootless_path
- new_obj.missing_keys = list(set(missing_keys))
- _invalid_types = {}
- for invalid_type in invalid_types:
- for key, val in invalid_type.items():
- if key in _invalid_types:
- continue
- _invalid_types[key] = val
- new_obj.invalid_types = _invalid_types
return new_obj
-
-class TemplatesDict(dict):
- """Holds and wrap TemplateResults for easy bug report."""
-
- def __init__(self, in_data, key=None, parent=None, strict=None):
- super(TemplatesDict, self).__init__()
- for _key, _value in in_data.items():
- if isinstance(_value, dict):
- _value = self.__class__(_value, _key, self)
- self[_key] = _value
-
- self.key = key
- self.parent = parent
- self.strict = strict
- if self.parent is None and strict is None:
- self.strict = True
-
- def __getitem__(self, key):
- # Raise error about missing key in anatomy.yaml
- if key not in self.keys():
- hier = self.hierarchy()
- hier.append(key)
- raise TemplateMissingKey(hier)
-
- value = super(TemplatesDict, self).__getitem__(key)
- if isinstance(value, self.__class__):
- return value
-
- # Raise exception when expected solved templates and it is not.
- if (
- self.raise_on_unsolved
- and (hasattr(value, "solved") and not value.solved)
- ):
- raise TemplateUnsolved(
- value.template, value.missing_keys, value.invalid_types
+ def validate(self):
+ if not self.solved:
+ raise AnatomyTemplateUnsolved(
+ self.template,
+ self.missing_keys,
+ self.invalid_types
)
- return value
-
- @property
- def raise_on_unsolved(self):
- """To affect this change `strict` attribute."""
- if self.strict is not None:
- return self.strict
- return self.parent.raise_on_unsolved
-
- def hierarchy(self):
- """Return dictionary keys one by one to root parent."""
- if self.parent is None:
- return []
-
- hier_keys = []
- par_hier = self.parent.hierarchy()
- if par_hier:
- hier_keys.extend(par_hier)
- hier_keys.append(self.key)
-
- return hier_keys
-
- @property
- def missing_keys(self):
- """Return missing keys of all children templates."""
- missing_keys = []
- for value in self.values():
- missing_keys.extend(value.missing_keys)
- return list(set(missing_keys))
-
- @property
- def invalid_types(self):
- """Return invalid types of all children templates."""
- invalid_types = {}
- for value in self.values():
- for invalid_type in value.invalid_types:
- _invalid_types = {}
- for key, val in invalid_type.items():
- if key in invalid_types:
- continue
- _invalid_types[key] = val
- invalid_types = merge_dict(invalid_types, _invalid_types)
- return invalid_types
-
- @property
- def used_values(self):
- """Return used values for all children templates."""
- used_values = {}
- for value in self.values():
- used_values = merge_dict(used_values, value.used_values)
- return used_values
-
- def get_solved(self):
- """Get only solved key from templates."""
- result = {}
- for key, value in self.items():
- if isinstance(value, self.__class__):
- value = value.get_solved()
- if not value:
- continue
- result[key] = value
-
- elif (
- not hasattr(value, "solved") or
- value.solved
- ):
- result[key] = value
- return self.__class__(result, key=self.key, parent=self.parent)
-class Templates:
- key_pattern = re.compile(r"(\{.*?[^{0]*\})")
- key_padding_pattern = re.compile(r"([^:]+)\S+[><]\S+")
- sub_dict_pattern = re.compile(r"([^\[\]]+)")
- optional_pattern = re.compile(r"(<.*?[^{0]*>)[^0-9]*?")
-
+class AnatomyTemplates(TemplatesDict):
inner_key_pattern = re.compile(r"(\{@.*?[^{}0]*\})")
inner_key_name_pattern = re.compile(r"\{@(.*?[^{}0]*)\}")
def __init__(self, anatomy):
+ super(AnatomyTemplates, self).__init__()
self.anatomy = anatomy
self.loaded_project = None
- self._templates = None
def __getitem__(self, key):
return self.templates[key]
@@ -580,7 +402,9 @@ class Templates:
return self.templates.get(key, default)
def reset(self):
+ self._raw_templates = None
self._templates = None
+ self._objected_templates = None
@property
def project_name(self):
@@ -592,17 +416,66 @@ class Templates:
@property
def templates(self):
+ self._validate_discovery()
+ return self._templates
+
+ @property
+ def objected_templates(self):
+ self._validate_discovery()
+ return self._objected_templates
+
+ def _validate_discovery(self):
if self.project_name != self.loaded_project:
- self._templates = None
+ self.reset()
if self._templates is None:
- self._templates = self._discover()
+ self._discover()
self.loaded_project = self.project_name
- return self._templates
+
+ def _format_value(self, value, data):
+ if isinstance(value, RootItem):
+ return self._solve_dict(value, data)
+
+ result = super(AnatomyTemplates, self)._format_value(value, data)
+ if isinstance(result, TemplateResult):
+ rootless_path = self._rootless_path(result, data)
+ result = AnatomyTemplateResult(result, rootless_path)
+ return result
+
+ def set_templates(self, templates):
+ if not templates:
+ self.reset()
+ return
+
+ self._raw_templates = copy.deepcopy(templates)
+ templates = copy.deepcopy(templates)
+ v_queue = collections.deque()
+ v_queue.append(templates)
+ while v_queue:
+ item = v_queue.popleft()
+ if not isinstance(item, dict):
+ continue
+
+ for key in tuple(item.keys()):
+ value = item[key]
+ if isinstance(value, dict):
+ v_queue.append(value)
+
+ elif (
+ isinstance(value, StringType)
+ and "{task}" in value
+ ):
+ item[key] = value.replace("{task}", "{task[name]}")
+
+ solved_templates = self.solve_template_inner_links(templates)
+ self._templates = solved_templates
+ self._objected_templates = self.create_ojected_templates(
+ solved_templates
+ )
def default_templates(self):
"""Return default templates data with solved inner keys."""
- return Templates.solve_template_inner_links(
+ return self.solve_template_inner_links(
self.anatomy["templates"]
)
@@ -613,7 +486,7 @@ class Templates:
TODO: create templates if not exist.
Returns:
- TemplatesDict: Contain templates data for current project of
+ TemplatesResultDict: Contain templates data for current project of
default templates.
"""
@@ -624,7 +497,7 @@ class Templates:
" Trying to use default."
).format(self.project_name))
- return Templates.solve_template_inner_links(self.anatomy["templates"])
+ self.set_templates(self.anatomy["templates"])
@classmethod
def replace_inner_keys(cls, matches, value, key_values, key):
@@ -791,149 +664,6 @@ class Templates:
return keys_by_subkey
- def _filter_optional(self, template, data):
- """Filter invalid optional keys.
-
- Invalid keys may be missing keys of with invalid value DataType.
-
- Args:
- template (str): Anatomy template which will be formatted.
- data (dict): Containing keys to be filled into template.
-
- Result:
- tuple: Contain origin template without missing optional keys and
- without optional keys identificator ("<" and ">"), information
- about missing optional keys and invalid types of optional keys.
-
- """
-
- # Remove optional missing keys
- missing_keys = []
- invalid_types = []
- for optional_group in self.optional_pattern.findall(template):
- _missing_keys = []
- _invalid_types = []
- for optional_key in self.key_pattern.findall(optional_group):
- key = str(optional_key[1:-1])
- key_padding = list(
- self.key_padding_pattern.findall(key)
- )
- if key_padding:
- key = key_padding[0]
-
- validation_result = self._validate_data_key(
- key, data
- )
- missing_key = validation_result["missing_key"]
- invalid_type = validation_result["invalid_type"]
-
- valid = True
- if missing_key is not None:
- _missing_keys.append(missing_key)
- valid = False
-
- if invalid_type is not None:
- _invalid_types.append(invalid_type)
- valid = False
-
- if valid:
- try:
- optional_key.format(**data)
- except KeyError:
- _missing_keys.append(key)
- valid = False
-
- valid = len(_invalid_types) == 0 and len(_missing_keys) == 0
- missing_keys.extend(_missing_keys)
- invalid_types.extend(_invalid_types)
- replacement = ""
- if valid:
- replacement = optional_group[1:-1]
-
- template = template.replace(optional_group, replacement)
- return (template, missing_keys, invalid_types)
-
- def _validate_data_key(self, key, data):
- """Check and prepare missing keys and invalid types of template."""
- result = {
- "missing_key": None,
- "invalid_type": None
- }
-
- # check if key expects subdictionary keys (e.g. project[name])
- key_subdict = list(self.sub_dict_pattern.findall(key))
- used_keys = []
- if len(key_subdict) <= 1:
- if key not in data:
- result["missing_key"] = key
- return result
-
- used_keys.append(key)
- value = data[key]
-
- else:
- value = data
- missing_key = False
- invalid_type = False
- for sub_key in key_subdict:
- if (
- value is None
- or (hasattr(value, "items") and sub_key not in value)
- ):
- missing_key = True
- used_keys.append(sub_key)
- break
-
- elif not hasattr(value, "items"):
- invalid_type = True
- break
-
- used_keys.append(sub_key)
- value = value.get(sub_key)
-
- if missing_key or invalid_type:
- if len(used_keys) == 0:
- invalid_key = key_subdict[0]
- else:
- invalid_key = used_keys[0]
- for idx, sub_key in enumerate(used_keys):
- if idx == 0:
- continue
- invalid_key += "[{0}]".format(sub_key)
-
- if missing_key:
- result["missing_key"] = invalid_key
-
- elif invalid_type:
- result["invalid_type"] = {invalid_key: type(value)}
-
- return result
-
- if isinstance(value, (numbers.Number, Roots, RootItem)):
- return result
-
- for inh_class in type(value).mro():
- if inh_class == StringType:
- return result
-
- result["missing_key"] = key
- result["invalid_type"] = {key: type(value)}
- return result
-
- def _merge_used_values(self, current_used, keys, value):
- key = keys[0]
- _keys = keys[1:]
- if len(_keys) == 0:
- current_used[key] = value
- else:
- next_dict = {}
- if key in current_used:
- next_dict = current_used[key]
- current_used[key] = self._merge_used_values(
- next_dict, _keys, value
- )
- return current_used
-
def _dict_to_subkeys_list(self, subdict, pre_keys=None):
if pre_keys is None:
pre_keys = []
@@ -956,9 +686,11 @@ class Templates:
return {key_list[0]: value}
return {key_list[0]: self._keys_to_dicts(key_list[1:], value)}
- def _rootless_path(
- self, template, used_values, final_data, missing_keys, invalid_types
- ):
+ def _rootless_path(self, result, final_data):
+ used_values = result.used_values
+ missing_keys = result.missing_keys
+ template = result.template
+ invalid_types = result.invalid_types
if (
"root" not in used_values
or "root" in missing_keys
@@ -974,210 +706,49 @@ class Templates:
if not root_keys:
return
- roots_dict = {}
+ output = str(result)
for used_root_keys in root_keys:
if not used_root_keys:
continue
+ used_value = used_values
root_key = None
for key in used_root_keys:
+ used_value = used_value[key]
if root_key is None:
root_key = key
else:
root_key += "[{}]".format(key)
root_key = "{" + root_key + "}"
-
- roots_dict = merge_dict(
- roots_dict,
- self._keys_to_dicts(used_root_keys, root_key)
- )
-
- final_data["root"] = roots_dict["root"]
- return template.format(**final_data)
-
- def _format(self, orig_template, data):
- """ Figure out with whole formatting.
-
- Separate advanced keys (*Like '{project[name]}') from string which must
- be formatted separatelly in case of missing or incomplete keys in data.
-
- Args:
- template (str): Anatomy template which will be formatted.
- data (dict): Containing keys to be filled into template.
-
- Returns:
- TemplateResult: Filled or partially filled template containing all
- data needed or missing for filling template.
- """
- task_data = data.get("task")
- if (
- isinstance(task_data, StringType)
- and "{task[name]}" in orig_template
- ):
- # Change task to dictionary if template expect dictionary
- data["task"] = {"name": task_data}
-
- template, missing_optional, invalid_optional = (
- self._filter_optional(orig_template, data)
- )
- # Remove optional missing keys
- used_values = {}
- invalid_required = []
- missing_required = []
- replace_keys = []
-
- for group in self.key_pattern.findall(template):
- orig_key = group[1:-1]
- key = str(orig_key)
- key_padding = list(self.key_padding_pattern.findall(key))
- if key_padding:
- key = key_padding[0]
-
- validation_result = self._validate_data_key(key, data)
- missing_key = validation_result["missing_key"]
- invalid_type = validation_result["invalid_type"]
-
- if invalid_type is not None:
- invalid_required.append(invalid_type)
- replace_keys.append(key)
- continue
-
- if missing_key is not None:
- missing_required.append(missing_key)
- replace_keys.append(key)
- continue
-
- try:
- value = group.format(**data)
- key_subdict = list(self.sub_dict_pattern.findall(key))
- if len(key_subdict) <= 1:
- used_values[key] = value
-
- else:
- used_values = self._merge_used_values(
- used_values, key_subdict, value
- )
-
- except (TypeError, KeyError):
- missing_required.append(key)
- replace_keys.append(key)
-
- final_data = copy.deepcopy(data)
- for key in replace_keys:
- key_subdict = list(self.sub_dict_pattern.findall(key))
- if len(key_subdict) <= 1:
- final_data[key] = "{" + key + "}"
- continue
-
- replace_key_dst = "---".join(key_subdict)
- replace_key_dst_curly = "{" + replace_key_dst + "}"
- replace_key_src_curly = "{" + key + "}"
- template = template.replace(
- replace_key_src_curly, replace_key_dst_curly
- )
- final_data[replace_key_dst] = replace_key_src_curly
-
- solved = len(missing_required) == 0 and len(invalid_required) == 0
-
- missing_keys = missing_required + missing_optional
- invalid_types = invalid_required + invalid_optional
-
- filled_template = template.format(**final_data)
- # WARNING `_rootless_path` change values in `final_data` please keep
- # in midn when changing order
- rootless_path = self._rootless_path(
- template, used_values, final_data, missing_keys, invalid_types
- )
- if rootless_path is None:
- rootless_path = filled_template
-
- result = TemplateResult(
- filled_template, orig_template, solved, rootless_path,
- used_values, missing_keys, invalid_types
- )
- return result
-
- def solve_dict(self, templates, data):
- """ Solves templates with entered data.
-
- Args:
- templates (dict): All Anatomy templates which will be formatted.
- data (dict): Containing keys to be filled into template.
-
- Returns:
- dict: With `TemplateResult` in values containing filled or
- partially filled templates.
- """
- output = collections.defaultdict(dict)
- for key, orig_value in templates.items():
- if isinstance(orig_value, StringType):
- # Replace {task} by '{task[name]}' for backward compatibility
- if '{task}' in orig_value:
- orig_value = orig_value.replace('{task}', '{task[name]}')
-
- output[key] = self._format(orig_value, data)
- continue
-
- # Check if orig_value has items attribute (any dict inheritance)
- if not hasattr(orig_value, "items"):
- # TODO we should handle this case
- output[key] = orig_value
- continue
-
- for s_key, s_value in self.solve_dict(orig_value, data).items():
- output[key][s_key] = s_value
+ output = output.replace(str(used_value), root_key)
return output
+ def format(self, data, strict=True):
+ copy_data = copy.deepcopy(data)
+ roots = self.roots
+ if roots:
+ copy_data["root"] = roots
+ result = super(AnatomyTemplates, self).format(copy_data)
+ result.strict = strict
+ return result
+
def format_all(self, in_data, only_keys=True):
""" Solves templates based on entered data.
Args:
data (dict): Containing keys to be filled into template.
- only_keys (bool, optional): Decides if environ will be used to
- fill templates or only keys in data.
Returns:
- TemplatesDict: Output `TemplateResult` have `strict` attribute
- set to False so accessing unfilled keys in templates won't
- raise any exceptions.
+ TemplatesResultDict: Output `TemplateResult` have `strict`
+ attribute set to False so accessing unfilled keys in templates
+ won't raise any exceptions.
"""
- output = self.format(in_data, only_keys)
- output.strict = False
- return output
-
- def format(self, in_data, only_keys=True):
- """ Solves templates based on entered data.
-
- Args:
- data (dict): Containing keys to be filled into template.
- only_keys (bool, optional): Decides if environ will be used to
- fill templates or only keys in data.
-
- Returns:
- TemplatesDict: Output `TemplateResult` have `strict` attribute
- set to True so accessing unfilled keys in templates will
- raise exceptions with explaned error.
- """
- # Create a copy of inserted data
- data = copy.deepcopy(in_data)
-
- # Add environment variable to data
- if only_keys is False:
- for key, val in os.environ.items():
- data["$" + key] = val
-
- # override root value
- roots = self.roots
- if roots:
- data["root"] = roots
- solved = self.solve_dict(self.templates, data)
-
- return TemplatesDict(solved)
+ return self.format(in_data, strict=False)
-class RootItem:
+class RootItem(FormatObject):
"""Represents one item or roots.
Holds raw data of root item specification. Raw data contain value
diff --git a/openpype/lib/applications.py b/openpype/lib/applications.py
index 393c83e9be..ef175ac89a 100644
--- a/openpype/lib/applications.py
+++ b/openpype/lib/applications.py
@@ -7,7 +7,6 @@ import platform
import collections
import inspect
import subprocess
-import distutils.spawn
from abc import ABCMeta, abstractmethod
import six
@@ -28,15 +27,18 @@ from .profiles_filtering import filter_profiles
from .local_settings import get_openpype_username
from .avalon_context import (
get_workdir_data,
- get_workdir_with_workdir_data
+ get_workdir_with_workdir_data,
+ get_workfile_template_key
)
from .python_module_tools import (
modules_from_path,
classes_from_module
)
-from .execute import get_linux_launcher_args
-
+from .execute import (
+ find_executable,
+ get_linux_launcher_args
+)
_logger = None
@@ -646,7 +648,7 @@ class ApplicationExecutable:
def _realpath(self):
"""Check if path is valid executable path."""
# Check for executable in PATH
- result = distutils.spawn.find_executable(self.executable_path)
+ result = find_executable(self.executable_path)
if result is not None:
return result
@@ -1295,7 +1297,7 @@ def get_app_environments_for_context(
"env": env
})
- prepare_host_environments(data, env_group)
+ prepare_app_environments(data, env_group)
prepare_context_environments(data, env_group)
# Discard avalon connection
@@ -1316,7 +1318,7 @@ def _merge_env(env, current_env):
return result
-def prepare_host_environments(data, env_group=None, implementation_envs=True):
+def prepare_app_environments(data, env_group=None, implementation_envs=True):
"""Modify launch environments based on launched app and context.
Args:
@@ -1474,6 +1476,22 @@ def prepare_context_environments(data, env_group=None):
)
app = data["app"]
+ context_env = {
+ "AVALON_PROJECT": project_doc["name"],
+ "AVALON_ASSET": asset_doc["name"],
+ "AVALON_TASK": task_name,
+ "AVALON_APP_NAME": app.full_name
+ }
+
+ log.debug(
+ "Context environments set:\n{}".format(
+ json.dumps(context_env, indent=4)
+ )
+ )
+ data["env"].update(context_env)
+ if not app.is_host:
+ return
+
workdir_data = get_workdir_data(
project_doc, asset_doc, task_name, app.host_name
)
@@ -1504,20 +1522,8 @@ def prepare_context_environments(data, env_group=None):
"Couldn't create workdir because: {}".format(str(exc))
)
- context_env = {
- "AVALON_PROJECT": project_doc["name"],
- "AVALON_ASSET": asset_doc["name"],
- "AVALON_TASK": task_name,
- "AVALON_APP": app.host_name,
- "AVALON_APP_NAME": app.full_name,
- "AVALON_WORKDIR": workdir
- }
- log.debug(
- "Context environments set:\n{}".format(
- json.dumps(context_env, indent=4)
- )
- )
- data["env"].update(context_env)
+ data["env"]["AVALON_APP"] = app.host_name
+ data["env"]["AVALON_WORKDIR"] = workdir
_prepare_last_workfile(data, workdir)
@@ -1587,14 +1593,15 @@ def _prepare_last_workfile(data, workdir):
last_workfile_path = data.get("last_workfile_path") or ""
if not last_workfile_path:
extensions = avalon.api.HOST_WORKFILE_EXTENSIONS.get(app.host_name)
-
if extensions:
anatomy = data["anatomy"]
+ project_settings = data["project_settings"]
+ task_type = workdir_data["task"]["type"]
+ template_key = get_workfile_template_key(
+ task_type, app.host_name, project_settings=project_settings
+ )
# Find last workfile
- file_template = anatomy.templates["work"]["file"]
- # Replace {task} by '{task[name]}' for backward compatibility
- if '{task}' in file_template:
- file_template = file_template.replace('{task}', '{task[name]}')
+ file_template = str(anatomy.templates[template_key]["file"])
workdir_data.update({
"version": 1,
diff --git a/openpype/lib/avalon_context.py b/openpype/lib/avalon_context.py
index 3ce205c499..26beba41ee 100644
--- a/openpype/lib/avalon_context.py
+++ b/openpype/lib/avalon_context.py
@@ -15,6 +15,7 @@ from openpype.settings import (
)
from .anatomy import Anatomy
from .profiles_filtering import filter_profiles
+from .events import emit_event
# avalon module is not imported at the top
# - may not be in path at the time of pype.lib initialization
@@ -644,6 +645,165 @@ def get_workdir(
)
+def template_data_from_session(session=None):
+ """ Return dictionary with template from session keys.
+
+ Args:
+ session (dict, Optional): The Session to use. If not provided use the
+ currently active global Session.
+ Returns:
+ dict: All available data from session.
+ """
+ from avalon import io
+ import avalon.api
+
+ if session is None:
+ session = avalon.api.Session
+
+ project_name = session["AVALON_PROJECT"]
+ project_doc = io._database[project_name].find_one({"type": "project"})
+ asset_doc = io._database[project_name].find_one({
+ "type": "asset",
+ "name": session["AVALON_ASSET"]
+ })
+ task_name = session["AVALON_TASK"]
+ host_name = session["AVALON_APP"]
+ return get_workdir_data(project_doc, asset_doc, task_name, host_name)
+
+
+def compute_session_changes(
+ session, task=None, asset=None, app=None, template_key=None
+):
+ """Compute the changes for a Session object on asset, task or app switch
+
+ This does *NOT* update the Session object, but returns the changes
+ required for a valid update of the Session.
+
+ Args:
+ session (dict): The initial session to compute changes to.
+ This is required for computing the full Work Directory, as that
+ also depends on the values that haven't changed.
+ task (str, Optional): Name of task to switch to.
+ asset (str or dict, Optional): Name of asset to switch to.
+ You can also directly provide the Asset dictionary as returned
+ from the database to avoid an additional query. (optimization)
+ app (str, Optional): Name of app to switch to.
+
+ Returns:
+ dict: The required changes in the Session dictionary.
+
+ """
+ changes = dict()
+
+ # If no changes, return directly
+ if not any([task, asset, app]):
+ return changes
+
+ # Get asset document and asset
+ asset_document = None
+ asset_tasks = None
+ if isinstance(asset, dict):
+ # Assume asset database document
+ asset_document = asset
+ asset_tasks = asset_document.get("data", {}).get("tasks")
+ asset = asset["name"]
+
+ if not asset_document or not asset_tasks:
+ from avalon import io
+
+ # Assume asset name
+ asset_document = io.find_one(
+ {
+ "name": asset,
+ "type": "asset"
+ },
+ {"data.tasks": True}
+ )
+ assert asset_document, "Asset must exist"
+
+ # Detect any changes compared session
+ mapping = {
+ "AVALON_ASSET": asset,
+ "AVALON_TASK": task,
+ "AVALON_APP": app,
+ }
+ changes = {
+ key: value
+ for key, value in mapping.items()
+ if value and value != session.get(key)
+ }
+ if not changes:
+ return changes
+
+ # Compute work directory (with the temporary changed session so far)
+ _session = session.copy()
+ _session.update(changes)
+
+ changes["AVALON_WORKDIR"] = get_workdir_from_session(_session)
+
+ return changes
+
+
+def get_workdir_from_session(session=None, template_key=None):
+ import avalon.api
+
+ if session is None:
+ session = avalon.api.Session
+ project_name = session["AVALON_PROJECT"]
+ host_name = session["AVALON_APP"]
+ anatomy = Anatomy(project_name)
+ template_data = template_data_from_session(session)
+ anatomy_filled = anatomy.format(template_data)
+
+ if not template_key:
+ task_type = template_data["task"]["type"]
+ template_key = get_workfile_template_key(
+ task_type,
+ host_name,
+ project_name=project_name
+ )
+ return anatomy_filled[template_key]["folder"]
+
+
+def update_current_task(task=None, asset=None, app=None, template_key=None):
+ """Update active Session to a new task work area.
+
+ This updates the live Session to a different `asset`, `task` or `app`.
+
+ Args:
+ task (str): The task to set.
+ asset (str): The asset to set.
+ app (str): The app to set.
+
+ Returns:
+ dict: The changed key, values in the current Session.
+
+ """
+ import avalon.api
+
+ changes = compute_session_changes(
+ avalon.api.Session,
+ task=task,
+ asset=asset,
+ app=app,
+ template_key=template_key
+ )
+
+ # Update the Session and environments. Pop from environments all keys with
+ # value set to None.
+ for key, value in changes.items():
+ avalon.api.Session[key] = value
+ if value is None:
+ os.environ.pop(key, None)
+ else:
+ os.environ[key] = value
+
+ # Emit session change
+ emit_event("taskChanged", changes.copy())
+
+ return changes
+
+
@with_avalon
def get_workfile_doc(asset_id, task_name, filename, dbcon=None):
"""Return workfile document for entered context.
@@ -820,6 +980,8 @@ class BuildWorkfile:
...
}]
"""
+ from openpype.pipeline import discover_loader_plugins
+
# Get current asset name and entity
current_asset_name = avalon.io.Session["AVALON_ASSET"]
current_asset_entity = avalon.io.find_one({
@@ -836,7 +998,7 @@ class BuildWorkfile:
# Prepare available loaders
loaders_by_name = {}
- for loader in avalon.api.discover(avalon.api.Loader):
+ for loader in discover_loader_plugins():
loader_name = loader.__name__
if loader_name in loaders_by_name:
raise KeyError(
@@ -952,7 +1114,7 @@ class BuildWorkfile:
Returns:
(dict): preset per entered task name
"""
- host_name = avalon.api.registered_host().__name__.rsplit(".", 1)[-1]
+ host_name = os.environ["AVALON_APP"]
project_settings = get_project_settings(
avalon.io.Session["AVALON_PROJECT"]
)
@@ -1230,6 +1392,11 @@ class BuildWorkfile:
Returns:
(list) Objects of loaded containers.
"""
+ from openpype.pipeline import (
+ IncompatibleLoaderError,
+ load_container,
+ )
+
loaded_containers = []
# Get subset id order from build presets.
@@ -1291,7 +1458,7 @@ class BuildWorkfile:
if not loader:
continue
try:
- container = avalon.api.load(
+ container = load_container(
loader,
repre["_id"],
name=subset_name
@@ -1300,7 +1467,7 @@ class BuildWorkfile:
is_loaded = True
except Exception as exc:
- if exc == avalon.pipeline.IncompatibleLoaderError:
+ if exc == IncompatibleLoaderError:
self.log.info((
"Loader `{}` is not compatible with"
" representation `{}`"
@@ -1434,11 +1601,13 @@ def get_creator_by_name(creator_name, case_sensitive=False):
Returns:
Creator: Return first matching plugin or `None`.
"""
+ from openpype.pipeline import LegacyCreator
+
# Lower input creator name if is not case sensitive
if not case_sensitive:
creator_name = creator_name.lower()
- for creator_plugin in avalon.api.discover(avalon.api.Creator):
+ for creator_plugin in avalon.api.discover(LegacyCreator):
_creator_name = creator_plugin.__name__
# Lower creator plugin name if is not case sensitive
diff --git a/openpype/lib/config.py b/openpype/lib/config.py
index ba394cfd56..57e8efa57d 100644
--- a/openpype/lib/config.py
+++ b/openpype/lib/config.py
@@ -74,3 +74,9 @@ def get_datetime_data(datetime_obj=None):
"S": str(int(seconds)),
"SS": str(seconds),
}
+
+
+def get_formatted_current_time():
+ return datetime.datetime.now().strftime(
+ "%Y%m%dT%H%M%SZ"
+ )
diff --git a/openpype/lib/delivery.py b/openpype/lib/delivery.py
index 01fcc907ed..9fc65aae8e 100644
--- a/openpype/lib/delivery.py
+++ b/openpype/lib/delivery.py
@@ -17,7 +17,7 @@ def collect_frames(files):
Returns:
(dict): {'/asset/subset_v001.0001.png': '0001', ....}
"""
- collections, remainder = clique.assemble(files)
+ collections, remainder = clique.assemble(files, minimum_items=1)
sources_and_frames = {}
if collections:
@@ -71,15 +71,14 @@ def path_from_representation(representation, anatomy):
def copy_file(src_path, dst_path):
"""Hardlink file if possible(to save space), copy if not"""
- from avalon.vendor import filelink # safer importing
+ from openpype.lib import create_hard_link # safer importing
if os.path.exists(dst_path):
return
try:
- filelink.create(
+ create_hard_link(
src_path,
- dst_path,
- filelink.HARDLINK
+ dst_path
)
except OSError:
shutil.copyfile(src_path, dst_path)
diff --git a/openpype/lib/events.py b/openpype/lib/events.py
new file mode 100644
index 0000000000..7bec6ee30d
--- /dev/null
+++ b/openpype/lib/events.py
@@ -0,0 +1,268 @@
+"""Events holding data about specific event."""
+import os
+import re
+import inspect
+import logging
+import weakref
+from uuid import uuid4
+try:
+ from weakref import WeakMethod
+except Exception:
+ from openpype.lib.python_2_comp import WeakMethod
+
+
+class EventCallback(object):
+ """Callback registered to a topic.
+
+ The callback function is registered to a topic. Topic is a string which
+ may contain '*' that will be handled as "any characters".
+
+ # Examples:
+ - "workfile.save" Callback will be triggered if the event topic is
+ exactly "workfile.save" .
+ - "workfile.*" Callback will be triggered an event topic starts with
+ "workfile." so "workfile.save" and "workfile.open"
+ will trigger the callback.
+ - "*" Callback will listen to all events.
+
+ Callback can be function or method. In both cases it should expect one
+ or none arguments. When 1 argument is expected then the processed 'Event'
+ object is passed in.
+
+ The registered callbacks don't keep function in memory so it is not
+ possible to store lambda function as callback.
+
+ Args:
+ topic(str): Topic which will be listened.
+ func(func): Callback to a topic.
+
+ Raises:
+ TypeError: When passed function is not a callable object.
+ """
+
+ def __init__(self, topic, func):
+ self._log = None
+ self._topic = topic
+ # Replace '*' with any character regex and escape rest of text
+ # - when callback is registered for '*' topic it will receive all
+ # events
+ # - it is possible to register to a partial topis 'my.event.*'
+ # - it will receive all matching event topics
+ # e.g. 'my.event.start' and 'my.event.end'
+ topic_regex_str = "^{}$".format(
+ ".+".join(
+ re.escape(part)
+ for part in topic.split("*")
+ )
+ )
+ topic_regex = re.compile(topic_regex_str)
+ self._topic_regex = topic_regex
+
+ # Convert callback into references
+ # - deleted functions won't cause crashes
+ if inspect.ismethod(func):
+ func_ref = WeakMethod(func)
+ elif callable(func):
+ func_ref = weakref.ref(func)
+ else:
+ raise TypeError((
+ "Registered callback is not callable. \"{}\""
+ ).format(str(func)))
+
+ # Collect additional data about function
+ # - name
+ # - path
+ # - if expect argument or not
+ func_name = func.__name__
+ func_path = os.path.abspath(inspect.getfile(func))
+ if hasattr(inspect, "signature"):
+ sig = inspect.signature(func)
+ expect_args = len(sig.parameters) > 0
+ else:
+ expect_args = len(inspect.getargspec(func)[0]) > 0
+
+ self._func_ref = func_ref
+ self._func_name = func_name
+ self._func_path = func_path
+ self._expect_args = expect_args
+ self._ref_valid = func_ref is not None
+ self._enabled = True
+
+ def __repr__(self):
+ return "< {} - {} > {}".format(
+ self.__class__.__name__, self._func_name, self._func_path
+ )
+
+ @property
+ def log(self):
+ if self._log is None:
+ self._log = logging.getLogger(self.__class__.__name__)
+ return self._log
+
+ @property
+ def is_ref_valid(self):
+ return self._ref_valid
+
+ def validate_ref(self):
+ if not self._ref_valid:
+ return
+
+ callback = self._func_ref()
+ if not callback:
+ self._ref_valid = False
+
+ @property
+ def enabled(self):
+ """Is callback enabled."""
+ return self._enabled
+
+ def set_enabled(self, enabled):
+ """Change if callback is enabled."""
+ self._enabled = enabled
+
+ def deregister(self):
+ """Calling this funcion will cause that callback will be removed."""
+ # Fake reference
+ self._ref_valid = False
+
+ def topic_matches(self, topic):
+ """Check if event topic matches callback's topic."""
+ return self._topic_regex.match(topic)
+
+ def process_event(self, event):
+ """Process event.
+
+ Args:
+ event(Event): Event that was triggered.
+ """
+
+ # Skip if callback is not enabled or has invalid reference
+ if not self._ref_valid or not self._enabled:
+ return
+
+ # Get reference
+ callback = self._func_ref()
+ # Check if reference is valid or callback's topic matches the event
+ if not callback:
+ # Change state if is invalid so the callback is removed
+ self._ref_valid = False
+
+ elif self.topic_matches(event.topic):
+ # Try execute callback
+ try:
+ if self._expect_args:
+ callback(event)
+ else:
+ callback()
+
+ except Exception:
+ self.log.warning(
+ "Failed to execute event callback {}".format(
+ str(repr(self))
+ ),
+ exc_info=True
+ )
+
+
+# Inherit from 'object' for Python 2 hosts
+class Event(object):
+ """Base event object.
+
+ Can be used for any event because is not specific. Only required argument
+ is topic which defines why event is happening and may be used for
+ filtering.
+
+ Arg:
+ topic (str): Identifier of event.
+ data (Any): Data specific for event. Dictionary is recommended.
+ source (str): Identifier of source.
+ """
+ _data = {}
+
+ def __init__(self, topic, data=None, source=None):
+ self._id = str(uuid4())
+ self._topic = topic
+ if data is None:
+ data = {}
+ self._data = data
+ self._source = source
+
+ def __getitem__(self, key):
+ return self._data[key]
+
+ def get(self, key, *args, **kwargs):
+ return self._data.get(key, *args, **kwargs)
+
+ @property
+ def id(self):
+ return self._id
+
+ @property
+ def source(self):
+ return self._source
+
+ @property
+ def data(self):
+ return self._data
+
+ @property
+ def topic(self):
+ return self._topic
+
+ def emit(self):
+ """Emit event and trigger callbacks."""
+ StoredCallbacks.emit_event(self)
+
+
+class StoredCallbacks:
+ _registered_callbacks = []
+
+ @classmethod
+ def add_callback(cls, topic, callback):
+ callback = EventCallback(topic, callback)
+ cls._registered_callbacks.append(callback)
+ return callback
+
+ @classmethod
+ def emit_event(cls, event):
+ invalid_callbacks = []
+ for callback in cls._registered_callbacks:
+ callback.process_event(event)
+ if not callback.is_ref_valid:
+ invalid_callbacks.append(callback)
+
+ for callback in invalid_callbacks:
+ cls._registered_callbacks.remove(callback)
+
+
+def register_event_callback(topic, callback):
+ """Add callback that will be executed on specific topic.
+
+ Args:
+ topic(str): Topic on which will callback be triggered.
+ callback(function): Callback that will be triggered when a topic
+ is triggered. Callback should expect none or 1 argument where
+ `Event` object is passed.
+
+ Returns:
+ EventCallback: Object wrapping the callback. It can be used to
+ enable/disable listening to a topic or remove the callback from
+ the topic completely.
+ """
+ return StoredCallbacks.add_callback(topic, callback)
+
+
+def emit_event(topic, data=None, source=None):
+ """Emit event with topic and data.
+
+ Arg:
+ topic(str): Event's topic.
+ data(dict): Event's additional data. Optional.
+ source(str): Who emitted the topic. Optional.
+
+ Returns:
+ Event: Object of event that was emitted.
+ """
+ event = Event(topic, data, source)
+ event.emit()
+ return event
diff --git a/openpype/lib/execute.py b/openpype/lib/execute.py
index afde844f2d..c3e35772f3 100644
--- a/openpype/lib/execute.py
+++ b/openpype/lib/execute.py
@@ -1,8 +1,12 @@
import os
+import sys
import subprocess
-import distutils.spawn
+import platform
+import json
+import tempfile
from .log import PypeLogger as Logger
+from .vendor_bin_utils import find_executable
# MSDN process creation flag (Windows only)
CREATE_NO_WINDOW = 0x08000000
@@ -181,6 +185,80 @@ def run_openpype_process(*args, **kwargs):
return run_subprocess(args, env=env, **kwargs)
+def run_detached_process(args, **kwargs):
+ """Execute process with passed arguments as separated process.
+
+ Values from 'os.environ' are used for environments if are not passed.
+ They are cleaned using 'clean_envs_for_openpype_process' function.
+
+ Example:
+ ```
+ run_detached_openpype_process("run", "")
+ ```
+
+ Args:
+ *args (tuple): OpenPype cli arguments.
+ **kwargs (dict): Keyword arguments for for subprocess.Popen.
+
+ Returns:
+ subprocess.Popen: Pointer to launched process but it is possible that
+ launched process is already killed (on linux).
+ """
+ env = kwargs.pop("env", None)
+ # Keep env untouched if are passed and not empty
+ if not env:
+ env = os.environ
+
+ # Create copy of passed env
+ kwargs["env"] = {k: v for k, v in env.items()}
+
+ low_platform = platform.system().lower()
+ if low_platform == "darwin":
+ new_args = ["open", "-na", args.pop(0), "--args"]
+ new_args.extend(args)
+ args = new_args
+
+ elif low_platform == "windows":
+ flags = (
+ subprocess.CREATE_NEW_PROCESS_GROUP
+ | subprocess.DETACHED_PROCESS
+ )
+ kwargs["creationflags"] = flags
+
+ if not sys.stdout:
+ kwargs["stdout"] = subprocess.DEVNULL
+ kwargs["stderr"] = subprocess.DEVNULL
+
+ elif low_platform == "linux" and get_linux_launcher_args() is not None:
+ json_data = {
+ "args": args,
+ "env": kwargs.pop("env")
+ }
+ json_temp = tempfile.NamedTemporaryFile(
+ mode="w", prefix="op_app_args", suffix=".json", delete=False
+ )
+ json_temp.close()
+ json_temp_filpath = json_temp.name
+ with open(json_temp_filpath, "w") as stream:
+ json.dump(json_data, stream)
+
+ new_args = get_linux_launcher_args()
+ new_args.append(json_temp_filpath)
+
+ # Create mid-process which will launch application
+ process = subprocess.Popen(new_args, **kwargs)
+ # Wait until the process finishes
+ # - This is important! The process would stay in "open" state.
+ process.wait()
+ # Remove the temp file
+ os.remove(json_temp_filpath)
+ # Return process which is already terminated
+ return process
+
+ process = subprocess.Popen(args, **kwargs)
+ return process
+
+
def path_to_subprocess_arg(path):
"""Prepare path for subprocess arguments.
@@ -263,7 +341,7 @@ def get_linux_launcher_args(*args):
os.path.dirname(openpype_executable),
filename
)
- executable_path = distutils.spawn.find_executable(new_executable)
+ executable_path = find_executable(new_executable)
if executable_path is None:
return None
launch_args = [executable_path]
diff --git a/openpype/lib/path_templates.py b/openpype/lib/path_templates.py
new file mode 100644
index 0000000000..14e5fe59f8
--- /dev/null
+++ b/openpype/lib/path_templates.py
@@ -0,0 +1,788 @@
+import os
+import re
+import copy
+import numbers
+import collections
+
+import six
+
+from .log import PypeLogger
+
+log = PypeLogger.get_logger(__name__)
+
+
+KEY_PATTERN = re.compile(r"(\{.*?[^{0]*\})")
+KEY_PADDING_PATTERN = re.compile(r"([^:]+)\S+[><]\S+")
+SUB_DICT_PATTERN = re.compile(r"([^\[\]]+)")
+OPTIONAL_PATTERN = re.compile(r"(<.*?[^{0]*>)[^0-9]*?")
+
+
+def merge_dict(main_dict, enhance_dict):
+ """Merges dictionaries by keys.
+
+ Function call itself if value on key is again dictionary.
+
+ Args:
+ main_dict (dict): First dict to merge second one into.
+ enhance_dict (dict): Second dict to be merged.
+
+ Returns:
+ dict: Merged result.
+
+ .. note:: does not overrides whole value on first found key
+ but only values differences from enhance_dict
+
+ """
+ for key, value in enhance_dict.items():
+ if key not in main_dict:
+ main_dict[key] = value
+ elif isinstance(value, dict) and isinstance(main_dict[key], dict):
+ main_dict[key] = merge_dict(main_dict[key], value)
+ else:
+ main_dict[key] = value
+ return main_dict
+
+
+class TemplateMissingKey(Exception):
+ """Exception for cases when key does not exist in template."""
+
+ msg = "Template key does not exist: `{}`."
+
+ def __init__(self, parents):
+ parent_join = "".join(["[\"{0}\"]".format(key) for key in parents])
+ super(TemplateMissingKey, self).__init__(
+ self.msg.format(parent_join)
+ )
+
+
+class TemplateUnsolved(Exception):
+ """Exception for unsolved template when strict is set to True."""
+
+ msg = "Template \"{0}\" is unsolved.{1}{2}"
+ invalid_types_msg = " Keys with invalid DataType: `{0}`."
+ missing_keys_msg = " Missing keys: \"{0}\"."
+
+ def __init__(self, template, missing_keys, invalid_types):
+ invalid_type_items = []
+ for _key, _type in invalid_types.items():
+ invalid_type_items.append(
+ "\"{0}\" {1}".format(_key, str(_type))
+ )
+
+ invalid_types_msg = ""
+ if invalid_type_items:
+ invalid_types_msg = self.invalid_types_msg.format(
+ ", ".join(invalid_type_items)
+ )
+
+ missing_keys_msg = ""
+ if missing_keys:
+ missing_keys_msg = self.missing_keys_msg.format(
+ ", ".join(missing_keys)
+ )
+ super(TemplateUnsolved, self).__init__(
+ self.msg.format(template, missing_keys_msg, invalid_types_msg)
+ )
+
+
+class StringTemplate(object):
+ """String that can be formatted."""
+ def __init__(self, template):
+ if not isinstance(template, six.string_types):
+ raise TypeError("<{}> argument must be a string, not {}.".format(
+ self.__class__.__name__, str(type(template))
+ ))
+
+ self._template = template
+ parts = []
+ last_end_idx = 0
+ for item in KEY_PATTERN.finditer(template):
+ start, end = item.span()
+ if start > last_end_idx:
+ parts.append(template[last_end_idx:start])
+ parts.append(FormattingPart(template[start:end]))
+ last_end_idx = end
+
+ if last_end_idx < len(template):
+ parts.append(template[last_end_idx:len(template)])
+
+ new_parts = []
+ for part in parts:
+ if not isinstance(part, six.string_types):
+ new_parts.append(part)
+ continue
+
+ substr = ""
+ for char in part:
+ if char not in ("<", ">"):
+ substr += char
+ else:
+ if substr:
+ new_parts.append(substr)
+ new_parts.append(char)
+ substr = ""
+ if substr:
+ new_parts.append(substr)
+
+ self._parts = self.find_optional_parts(new_parts)
+
+ def __str__(self):
+ return self.template
+
+ def __repr__(self):
+ return "<{}> {}".format(self.__class__.__name__, self.template)
+
+ def __contains__(self, other):
+ return other in self.template
+
+ def replace(self, *args, **kwargs):
+ self._template = self.template.replace(*args, **kwargs)
+ return self
+
+ @property
+ def template(self):
+ return self._template
+
+ def format(self, data):
+ """ Figure out with whole formatting.
+
+ Separate advanced keys (*Like '{project[name]}') from string which must
+ be formatted separatelly in case of missing or incomplete keys in data.
+
+ Args:
+ data (dict): Containing keys to be filled into template.
+
+ Returns:
+ TemplateResult: Filled or partially filled template containing all
+ data needed or missing for filling template.
+ """
+ result = TemplatePartResult()
+ for part in self._parts:
+ if isinstance(part, six.string_types):
+ result.add_output(part)
+ else:
+ part.format(data, result)
+
+ invalid_types = result.invalid_types
+ invalid_types.update(result.invalid_optional_types)
+ invalid_types = result.split_keys_to_subdicts(invalid_types)
+
+ missing_keys = result.missing_keys
+ missing_keys |= result.missing_optional_keys
+
+ solved = result.solved
+ used_values = result.get_clean_used_values()
+
+ return TemplateResult(
+ result.output,
+ self.template,
+ solved,
+ used_values,
+ missing_keys,
+ invalid_types
+ )
+
+ def format_strict(self, *args, **kwargs):
+ result = self.format(*args, **kwargs)
+ result.validate()
+ return result
+
+ @classmethod
+ def format_template(cls, template, data):
+ objected_template = cls(template)
+ return objected_template.format(data)
+
+ @classmethod
+ def format_strict_template(cls, template, data):
+ objected_template = cls(template)
+ return objected_template.format_strict(data)
+
+ @staticmethod
+ def find_optional_parts(parts):
+ new_parts = []
+ tmp_parts = {}
+ counted_symb = -1
+ for part in parts:
+ if part == "<":
+ counted_symb += 1
+ tmp_parts[counted_symb] = []
+
+ elif part == ">":
+ if counted_symb > -1:
+ parts = tmp_parts.pop(counted_symb)
+ counted_symb -= 1
+ if parts:
+ # Remove optional start char
+ parts.pop(0)
+ if counted_symb < 0:
+ out_parts = new_parts
+ else:
+ out_parts = tmp_parts[counted_symb]
+ # Store temp parts
+ out_parts.append(OptionalPart(parts))
+ continue
+
+ if counted_symb < 0:
+ new_parts.append(part)
+ else:
+ tmp_parts[counted_symb].append(part)
+
+ if tmp_parts:
+ for idx in sorted(tmp_parts.keys()):
+ new_parts.extend(tmp_parts[idx])
+ return new_parts
+
+
+class TemplatesDict(object):
+ def __init__(self, templates=None):
+ self._raw_templates = None
+ self._templates = None
+ self._objected_templates = None
+ self.set_templates(templates)
+
+ def set_templates(self, templates):
+ if templates is None:
+ self._raw_templates = None
+ self._templates = None
+ self._objected_templates = None
+ elif isinstance(templates, dict):
+ self._raw_templates = copy.deepcopy(templates)
+ self._templates = templates
+ self._objected_templates = self.create_ojected_templates(templates)
+ else:
+ raise TypeError("<{}> argument must be a dict, not {}.".format(
+ self.__class__.__name__, str(type(templates))
+ ))
+
+ def __getitem__(self, key):
+ return self.templates[key]
+
+ def get(self, key, *args, **kwargs):
+ return self.templates.get(key, *args, **kwargs)
+
+ @property
+ def raw_templates(self):
+ return self._raw_templates
+
+ @property
+ def templates(self):
+ return self._templates
+
+ @property
+ def objected_templates(self):
+ return self._objected_templates
+
+ @classmethod
+ def create_ojected_templates(cls, templates):
+ if not isinstance(templates, dict):
+ raise TypeError("Expected dict object, got {}".format(
+ str(type(templates))
+ ))
+
+ objected_templates = copy.deepcopy(templates)
+ inner_queue = collections.deque()
+ inner_queue.append(objected_templates)
+ while inner_queue:
+ item = inner_queue.popleft()
+ if not isinstance(item, dict):
+ continue
+ for key in tuple(item.keys()):
+ value = item[key]
+ if isinstance(value, six.string_types):
+ item[key] = StringTemplate(value)
+ elif isinstance(value, dict):
+ inner_queue.append(value)
+ return objected_templates
+
+ def _format_value(self, value, data):
+ if isinstance(value, StringTemplate):
+ return value.format(data)
+
+ if isinstance(value, dict):
+ return self._solve_dict(value, data)
+ return value
+
+ def _solve_dict(self, templates, data):
+ """ Solves templates with entered data.
+
+ Args:
+ templates (dict): All templates which will be formatted.
+ data (dict): Containing keys to be filled into template.
+
+ Returns:
+ dict: With `TemplateResult` in values containing filled or
+ partially filled templates.
+ """
+ output = collections.defaultdict(dict)
+ for key, value in templates.items():
+ output[key] = self._format_value(value, data)
+
+ return output
+
+ def format(self, in_data, only_keys=True, strict=True):
+ """ Solves templates based on entered data.
+
+ Args:
+ data (dict): Containing keys to be filled into template.
+ only_keys (bool, optional): Decides if environ will be used to
+ fill templates or only keys in data.
+
+ Returns:
+ TemplatesResultDict: Output `TemplateResult` have `strict`
+ attribute set to True so accessing unfilled keys in templates
+ will raise exceptions with explaned error.
+ """
+ # Create a copy of inserted data
+ data = copy.deepcopy(in_data)
+
+ # Add environment variable to data
+ if only_keys is False:
+ for key, val in os.environ.items():
+ env_key = "$" + key
+ if env_key not in data:
+ data[env_key] = val
+
+ solved = self._solve_dict(self.objected_templates, data)
+
+ output = TemplatesResultDict(solved)
+ output.strict = strict
+ return output
+
+
+class TemplateResult(str):
+ """Result of template format with most of information in.
+
+ Args:
+ used_values (dict): Dictionary of template filling data with
+ only used keys.
+ solved (bool): For check if all required keys were filled.
+ template (str): Original template.
+ missing_keys (list): Missing keys that were not in the data. Include
+ missing optional keys.
+ invalid_types (dict): When key was found in data, but value had not
+ allowed DataType. Allowed data types are `numbers`,
+ `str`(`basestring`) and `dict`. Dictionary may cause invalid type
+ when value of key in data is dictionary but template expect string
+ of number.
+ """
+ used_values = None
+ solved = None
+ template = None
+ missing_keys = None
+ invalid_types = None
+
+ def __new__(
+ cls, filled_template, template, solved,
+ used_values, missing_keys, invalid_types
+ ):
+ new_obj = super(TemplateResult, cls).__new__(cls, filled_template)
+ new_obj.used_values = used_values
+ new_obj.solved = solved
+ new_obj.template = template
+ new_obj.missing_keys = list(set(missing_keys))
+ new_obj.invalid_types = invalid_types
+ return new_obj
+
+ def validate(self):
+ if not self.solved:
+ raise TemplateUnsolved(
+ self.template,
+ self.missing_keys,
+ self.invalid_types
+ )
+
+
+class TemplatesResultDict(dict):
+ """Holds and wrap TemplateResults for easy bug report."""
+
+ def __init__(self, in_data, key=None, parent=None, strict=None):
+ super(TemplatesResultDict, self).__init__()
+ for _key, _value in in_data.items():
+ if isinstance(_value, dict):
+ _value = self.__class__(_value, _key, self)
+ self[_key] = _value
+
+ self.key = key
+ self.parent = parent
+ self.strict = strict
+ if self.parent is None and strict is None:
+ self.strict = True
+
+ def __getitem__(self, key):
+ if key not in self.keys():
+ hier = self.hierarchy()
+ hier.append(key)
+ raise TemplateMissingKey(hier)
+
+ value = super(TemplatesResultDict, self).__getitem__(key)
+ if isinstance(value, self.__class__):
+ return value
+
+ # Raise exception when expected solved templates and it is not.
+ if self.raise_on_unsolved and hasattr(value, "validate"):
+ value.validate()
+ return value
+
+ @property
+ def raise_on_unsolved(self):
+ """To affect this change `strict` attribute."""
+ if self.strict is not None:
+ return self.strict
+ return self.parent.raise_on_unsolved
+
+ def hierarchy(self):
+ """Return dictionary keys one by one to root parent."""
+ if self.parent is None:
+ return []
+
+ hier_keys = []
+ par_hier = self.parent.hierarchy()
+ if par_hier:
+ hier_keys.extend(par_hier)
+ hier_keys.append(self.key)
+
+ return hier_keys
+
+ @property
+ def missing_keys(self):
+ """Return missing keys of all children templates."""
+ missing_keys = set()
+ for value in self.values():
+ missing_keys |= value.missing_keys
+ return missing_keys
+
+ @property
+ def invalid_types(self):
+ """Return invalid types of all children templates."""
+ invalid_types = {}
+ for value in self.values():
+ invalid_types = merge_dict(invalid_types, value.invalid_types)
+ return invalid_types
+
+ @property
+ def used_values(self):
+ """Return used values for all children templates."""
+ used_values = {}
+ for value in self.values():
+ used_values = merge_dict(used_values, value.used_values)
+ return used_values
+
+ def get_solved(self):
+ """Get only solved key from templates."""
+ result = {}
+ for key, value in self.items():
+ if isinstance(value, self.__class__):
+ value = value.get_solved()
+ if not value:
+ continue
+ result[key] = value
+
+ elif (
+ not hasattr(value, "solved") or
+ value.solved
+ ):
+ result[key] = value
+ return self.__class__(result, key=self.key, parent=self.parent)
+
+
+class TemplatePartResult:
+ """Result to store result of template parts."""
+ def __init__(self, optional=False):
+ # Missing keys or invalid value types of required keys
+ self._missing_keys = set()
+ self._invalid_types = {}
+ # Missing keys or invalid value types of optional keys
+ self._missing_optional_keys = set()
+ self._invalid_optional_types = {}
+
+ # Used values stored by key with origin type
+ # - key without any padding or key modifiers
+ # - value from filling data
+ # Example: {"version": 1}
+ self._used_values = {}
+ # Used values stored by key with all modifirs
+ # - value is already formatted string
+ # Example: {"version:0>3": "001"}
+ self._realy_used_values = {}
+ # Concatenated string output after formatting
+ self._output = ""
+ # Is this result from optional part
+ self._optional = True
+
+ def add_output(self, other):
+ if isinstance(other, six.string_types):
+ self._output += other
+
+ elif isinstance(other, TemplatePartResult):
+ self._output += other.output
+
+ self._missing_keys |= other.missing_keys
+ self._missing_optional_keys |= other.missing_optional_keys
+
+ self._invalid_types.update(other.invalid_types)
+ self._invalid_optional_types.update(other.invalid_optional_types)
+
+ if other.optional and not other.solved:
+ return
+ self._used_values.update(other.used_values)
+ self._realy_used_values.update(other.realy_used_values)
+
+ else:
+ raise TypeError("Cannot add data from \"{}\" to \"{}\"".format(
+ str(type(other)), self.__class__.__name__)
+ )
+
+ @property
+ def solved(self):
+ if self.optional:
+ if (
+ len(self.missing_optional_keys) > 0
+ or len(self.invalid_optional_types) > 0
+ ):
+ return False
+ return (
+ len(self.missing_keys) == 0
+ and len(self.invalid_types) == 0
+ )
+
+ @property
+ def optional(self):
+ return self._optional
+
+ @property
+ def output(self):
+ return self._output
+
+ @property
+ def missing_keys(self):
+ return self._missing_keys
+
+ @property
+ def missing_optional_keys(self):
+ return self._missing_optional_keys
+
+ @property
+ def invalid_types(self):
+ return self._invalid_types
+
+ @property
+ def invalid_optional_types(self):
+ return self._invalid_optional_types
+
+ @property
+ def realy_used_values(self):
+ return self._realy_used_values
+
+ @property
+ def used_values(self):
+ return self._used_values
+
+ @staticmethod
+ def split_keys_to_subdicts(values):
+ output = {}
+ for key, value in values.items():
+ key_padding = list(KEY_PADDING_PATTERN.findall(key))
+ if key_padding:
+ key = key_padding[0]
+ key_subdict = list(SUB_DICT_PATTERN.findall(key))
+ data = output
+ last_key = key_subdict.pop(-1)
+ for subkey in key_subdict:
+ if subkey not in data:
+ data[subkey] = {}
+ data = data[subkey]
+ data[last_key] = value
+ return output
+
+ def get_clean_used_values(self):
+ new_used_values = {}
+ for key, value in self.used_values.items():
+ if isinstance(value, FormatObject):
+ value = str(value)
+ new_used_values[key] = value
+
+ return self.split_keys_to_subdicts(new_used_values)
+
+ def add_realy_used_value(self, key, value):
+ self._realy_used_values[key] = value
+
+ def add_used_value(self, key, value):
+ self._used_values[key] = value
+
+ def add_missing_key(self, key):
+ if self._optional:
+ self._missing_optional_keys.add(key)
+ else:
+ self._missing_keys.add(key)
+
+ def add_invalid_type(self, key, value):
+ if self._optional:
+ self._invalid_optional_types[key] = type(value)
+ else:
+ self._invalid_types[key] = type(value)
+
+
+class FormatObject(object):
+ """Object that can be used for formatting.
+
+ This is base that is valid for to be used in 'StringTemplate' value.
+ """
+ def __init__(self):
+ self.value = ""
+
+ def __format__(self, *args, **kwargs):
+ return self.value.__format__(*args, **kwargs)
+
+ def __str__(self):
+ return str(self.value)
+
+ def __repr__(self):
+ return self.__str__()
+
+
+class FormattingPart:
+ """String with formatting template.
+
+ Containt only single key to format e.g. "{project[name]}".
+
+ Args:
+ template(str): String containing the formatting key.
+ """
+ def __init__(self, template):
+ self._template = template
+
+ @property
+ def template(self):
+ return self._template
+
+ def __repr__(self):
+ return "".format(self._template)
+
+ def __str__(self):
+ return self._template
+
+ @staticmethod
+ def validate_value_type(value):
+ """Check if value can be used for formatting of single key."""
+ if isinstance(value, (numbers.Number, FormatObject)):
+ return True
+
+ for inh_class in type(value).mro():
+ if inh_class in six.string_types:
+ return True
+ return False
+
+ def format(self, data, result):
+ """Format the formattings string.
+
+ Args:
+ data(dict): Data that should be used for formatting.
+ result(TemplatePartResult): Object where result is stored.
+ """
+ key = self.template[1:-1]
+ if key in result.realy_used_values:
+ result.add_output(result.realy_used_values[key])
+ return result
+
+ # check if key expects subdictionary keys (e.g. project[name])
+ existence_check = key
+ key_padding = list(KEY_PADDING_PATTERN.findall(existence_check))
+ if key_padding:
+ existence_check = key_padding[0]
+ key_subdict = list(SUB_DICT_PATTERN.findall(existence_check))
+
+ value = data
+ missing_key = False
+ invalid_type = False
+ used_keys = []
+ for sub_key in key_subdict:
+ if (
+ value is None
+ or (hasattr(value, "items") and sub_key not in value)
+ ):
+ missing_key = True
+ used_keys.append(sub_key)
+ break
+
+ if not hasattr(value, "items"):
+ invalid_type = True
+ break
+
+ used_keys.append(sub_key)
+ value = value.get(sub_key)
+
+ if missing_key or invalid_type:
+ if len(used_keys) == 0:
+ invalid_key = key_subdict[0]
+ else:
+ invalid_key = used_keys[0]
+ for idx, sub_key in enumerate(used_keys):
+ if idx == 0:
+ continue
+ invalid_key += "[{0}]".format(sub_key)
+
+ if missing_key:
+ result.add_missing_key(invalid_key)
+
+ elif invalid_type:
+ result.add_invalid_type(invalid_key, value)
+
+ result.add_output(self.template)
+ return result
+
+ if self.validate_value_type(value):
+ fill_data = {}
+ first_value = True
+ for used_key in reversed(used_keys):
+ if first_value:
+ first_value = False
+ fill_data[used_key] = value
+ else:
+ _fill_data = {used_key: fill_data}
+ fill_data = _fill_data
+
+ formatted_value = self.template.format(**fill_data)
+ result.add_realy_used_value(key, formatted_value)
+ result.add_used_value(existence_check, formatted_value)
+ result.add_output(formatted_value)
+ return result
+
+ result.add_invalid_type(key, value)
+ result.add_output(self.template)
+
+ return result
+
+
+class OptionalPart:
+ """Template part which contains optional formatting strings.
+
+ If this part can't be filled the result is empty string.
+
+ Args:
+ parts(list): Parts of template. Can contain 'str', 'OptionalPart' or
+ 'FormattingPart'.
+ """
+ def __init__(self, parts):
+ self._parts = parts
+
+ @property
+ def parts(self):
+ return self._parts
+
+ def __str__(self):
+ return "<{}>".format("".join([str(p) for p in self._parts]))
+
+ def __repr__(self):
+ return "".format("".join([str(p) for p in self._parts]))
+
+ def format(self, data, result):
+ new_result = TemplatePartResult(True)
+ for part in self._parts:
+ if isinstance(part, six.string_types):
+ new_result.add_output(part)
+ else:
+ part.format(data, new_result)
+
+ if new_result.solved:
+ result.add_output(new_result)
+ return result
diff --git a/openpype/lib/path_tools.py b/openpype/lib/path_tools.py
index d6c32ad9e8..851bc872fb 100644
--- a/openpype/lib/path_tools.py
+++ b/openpype/lib/path_tools.py
@@ -4,9 +4,9 @@ import abc
import json
import logging
import six
+import platform
from openpype.settings import get_project_settings
-from openpype.settings.lib import get_site_local_overrides
from .anatomy import Anatomy
from .profiles_filtering import filter_profiles
@@ -14,6 +14,42 @@ from .profiles_filtering import filter_profiles
log = logging.getLogger(__name__)
+def create_hard_link(src_path, dst_path):
+ """Create hardlink of file.
+
+ Args:
+ src_path(str): Full path to a file which is used as source for
+ hardlink.
+ dst_path(str): Full path to a file where a link of source will be
+ added.
+ """
+ # Use `os.link` if is available
+ # - should be for all platforms with newer python versions
+ if hasattr(os, "link"):
+ os.link(src_path, dst_path)
+ return
+
+ # Windows implementation of hardlinks
+ # - used in Python 2
+ if platform.system().lower() == "windows":
+ import ctypes
+ from ctypes.wintypes import BOOL
+ CreateHardLink = ctypes.windll.kernel32.CreateHardLinkW
+ CreateHardLink.argtypes = [
+ ctypes.c_wchar_p, ctypes.c_wchar_p, ctypes.c_void_p
+ ]
+ CreateHardLink.restype = BOOL
+
+ res = CreateHardLink(dst_path, src_path, None)
+ if res == 0:
+ raise ctypes.WinError()
+ return
+ # Raises not implemented error if gets here
+ raise NotImplementedError(
+ "Implementation of hardlink for current environment is missing."
+ )
+
+
def _rreplace(s, a, b, n=1):
"""Replace a with b in string s from right side n times."""
return b.join(s.rsplit(a, n))
diff --git a/openpype/lib/plugin_tools.py b/openpype/lib/plugin_tools.py
index 183aad939a..f11ba56865 100644
--- a/openpype/lib/plugin_tools.py
+++ b/openpype/lib/plugin_tools.py
@@ -280,6 +280,7 @@ def set_plugin_attributes_from_settings(
project_name (str): Name of project for which settings will be loaded.
Value from environment `AVALON_PROJECT` is used if not entered.
"""
+ from openpype.pipeline import LegacyCreator, LoaderPlugin
# determine host application to use for finding presets
if host_name is None:
@@ -289,11 +290,11 @@ def set_plugin_attributes_from_settings(
project_name = os.environ.get("AVALON_PROJECT")
# map plugin superclass to preset json. Currently supported is load and
- # create (avalon.api.Loader and avalon.api.Creator)
+ # create (LoaderPlugin and LegacyCreator)
plugin_type = None
- if superclass.__name__.split(".")[-1] in ("Loader", "SubsetLoader"):
+ if superclass is LoaderPlugin or issubclass(superclass, LoaderPlugin):
plugin_type = "load"
- elif superclass.__name__.split(".")[-1] == "Creator":
+ elif superclass is LegacyCreator or issubclass(superclass, LegacyCreator):
plugin_type = "create"
if not host_name or not project_name or plugin_type is None:
diff --git a/openpype/lib/terminal.py b/openpype/lib/terminal.py
index bc0744931a..5121b6ec26 100644
--- a/openpype/lib/terminal.py
+++ b/openpype/lib/terminal.py
@@ -49,11 +49,13 @@ class Terminal:
"""
from openpype.lib import env_value_to_bool
- use_colors = env_value_to_bool(
- "OPENPYPE_LOG_NO_COLORS", default=Terminal.use_colors
+ log_no_colors = env_value_to_bool(
+ "OPENPYPE_LOG_NO_COLORS", default=None
)
- if not use_colors:
- Terminal.use_colors = use_colors
+ if log_no_colors is not None:
+ Terminal.use_colors = not log_no_colors
+
+ if not Terminal.use_colors:
Terminal._initialized = True
return
diff --git a/openpype/lib/transcoding.py b/openpype/lib/transcoding.py
index e89fa6331e..6bab6a8160 100644
--- a/openpype/lib/transcoding.py
+++ b/openpype/lib/transcoding.py
@@ -1,15 +1,18 @@
import os
import re
import logging
+import json
import collections
import tempfile
+import subprocess
import xml.etree.ElementTree
from .execute import run_subprocess
from .vendor_bin_utils import (
+ get_ffmpeg_tool_path,
get_oiio_tools_path,
- is_oiio_supported
+ is_oiio_supported,
)
# Max length of string that is supported by ffmpeg
@@ -90,7 +93,7 @@ class RationalToInt:
if len(parts) != 1:
bottom = float(parts[1])
- self._value = top / bottom
+ self._value = float(top) / float(bottom)
self._string_value = string_value
@property
@@ -170,6 +173,23 @@ def convert_value_by_type_name(value_type, value, logger=None):
if value_type == "rational2i":
return RationalToInt(value)
+ if value_type == "vector":
+ parts = [part.strip() for part in value.split(",")]
+ output = []
+ for part in parts:
+ if part == "-nan":
+ output.append(None)
+ continue
+ try:
+ part = float(part)
+ except ValueError:
+ pass
+ output.append(part)
+ return output
+
+ if value_type == "timecode":
+ return value
+
# Array of other types is converted to list
re_result = ARRAY_TYPE_REGEX.findall(value_type)
if re_result:
@@ -466,3 +486,290 @@ def convert_for_ffmpeg(
logger.debug("Conversion command: {}".format(" ".join(oiio_cmd)))
run_subprocess(oiio_cmd, logger=logger)
+
+
+# FFMPEG functions
+def get_ffprobe_data(path_to_file, logger=None):
+ """Load data about entered filepath via ffprobe.
+
+ Args:
+ path_to_file (str): absolute path
+ logger (logging.Logger): injected logger, if empty new is created
+ """
+ if not logger:
+ logger = logging.getLogger(__name__)
+ logger.info(
+ "Getting information about input \"{}\".".format(path_to_file)
+ )
+ args = [
+ get_ffmpeg_tool_path("ffprobe"),
+ "-hide_banner",
+ "-loglevel", "fatal",
+ "-show_error",
+ "-show_format",
+ "-show_streams",
+ "-show_programs",
+ "-show_chapters",
+ "-show_private_data",
+ "-print_format", "json",
+ path_to_file
+ ]
+
+ logger.debug("FFprobe command: {}".format(
+ subprocess.list2cmdline(args)
+ ))
+ popen = subprocess.Popen(
+ args,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE
+ )
+
+ popen_stdout, popen_stderr = popen.communicate()
+ if popen_stdout:
+ logger.debug("FFprobe stdout:\n{}".format(
+ popen_stdout.decode("utf-8")
+ ))
+
+ if popen_stderr:
+ logger.warning("FFprobe stderr:\n{}".format(
+ popen_stderr.decode("utf-8")
+ ))
+
+ return json.loads(popen_stdout)
+
+
+def get_ffprobe_streams(path_to_file, logger=None):
+ """Load streams from entered filepath via ffprobe.
+
+ Args:
+ path_to_file (str): absolute path
+ logger (logging.Logger): injected logger, if empty new is created
+ """
+ return get_ffprobe_data(path_to_file, logger)["streams"]
+
+
+def get_ffmpeg_format_args(ffprobe_data, source_ffmpeg_cmd=None):
+ """Copy format from input metadata for output.
+
+ Args:
+ ffprobe_data(dict): Data received from ffprobe.
+ source_ffmpeg_cmd(str): Command that created input if available.
+ """
+ input_format = ffprobe_data.get("format") or {}
+ if input_format.get("format_name") == "mxf":
+ return _ffmpeg_mxf_format_args(ffprobe_data, source_ffmpeg_cmd)
+ return []
+
+
+def _ffmpeg_mxf_format_args(ffprobe_data, source_ffmpeg_cmd):
+ input_format = ffprobe_data["format"]
+ format_tags = input_format.get("tags") or {}
+ product_name = format_tags.get("product_name") or ""
+ output = []
+ if "opatom" in product_name.lower():
+ output.extend(["-f", "mxf_opatom"])
+ return output
+
+
+def get_ffmpeg_codec_args(ffprobe_data, source_ffmpeg_cmd=None, logger=None):
+ """Copy codec from input metadata for output.
+
+ Args:
+ ffprobe_data(dict): Data received from ffprobe.
+ source_ffmpeg_cmd(str): Command that created input if available.
+ """
+ if logger is None:
+ logger = logging.getLogger(__name__)
+
+ video_stream = None
+ no_audio_stream = None
+ for stream in ffprobe_data["streams"]:
+ codec_type = stream["codec_type"]
+ if codec_type == "video":
+ video_stream = stream
+ break
+ elif no_audio_stream is None and codec_type != "audio":
+ no_audio_stream = stream
+
+ if video_stream is None:
+ if no_audio_stream is None:
+ logger.warning(
+ "Couldn't find stream that is not an audio file."
+ )
+ return []
+ logger.info(
+ "Didn't find video stream. Using first non audio stream."
+ )
+ video_stream = no_audio_stream
+
+ codec_name = video_stream.get("codec_name")
+ # Codec "prores"
+ if codec_name == "prores":
+ return _ffmpeg_prores_codec_args(video_stream, source_ffmpeg_cmd)
+
+ # Codec "h264"
+ if codec_name == "h264":
+ return _ffmpeg_h264_codec_args(video_stream, source_ffmpeg_cmd)
+
+ # Coded DNxHD
+ if codec_name == "dnxhd":
+ return _ffmpeg_dnxhd_codec_args(video_stream, source_ffmpeg_cmd)
+
+ output = []
+ if codec_name:
+ output.extend(["-codec:v", codec_name])
+
+ bit_rate = video_stream.get("bit_rate")
+ if bit_rate:
+ output.extend(["-b:v", bit_rate])
+
+ pix_fmt = video_stream.get("pix_fmt")
+ if pix_fmt:
+ output.extend(["-pix_fmt", pix_fmt])
+
+ output.extend(["-g", "1"])
+
+ return output
+
+
+def _ffmpeg_prores_codec_args(stream_data, source_ffmpeg_cmd):
+ output = []
+
+ tags = stream_data.get("tags") or {}
+ encoder = tags.get("encoder") or ""
+ if encoder.endswith("prores_ks"):
+ codec_name = "prores_ks"
+
+ elif encoder.endswith("prores_aw"):
+ codec_name = "prores_aw"
+
+ else:
+ codec_name = "prores"
+
+ output.extend(["-codec:v", codec_name])
+
+ pix_fmt = stream_data.get("pix_fmt")
+ if pix_fmt:
+ output.extend(["-pix_fmt", pix_fmt])
+
+ # Rest of arguments is prores_kw specific
+ if codec_name == "prores_ks":
+ codec_tag_to_profile_map = {
+ "apco": "proxy",
+ "apcs": "lt",
+ "apcn": "standard",
+ "apch": "hq",
+ "ap4h": "4444",
+ "ap4x": "4444xq"
+ }
+ codec_tag_str = stream_data.get("codec_tag_string")
+ if codec_tag_str:
+ profile = codec_tag_to_profile_map.get(codec_tag_str)
+ if profile:
+ output.extend(["-profile:v", profile])
+
+ return output
+
+
+def _ffmpeg_h264_codec_args(stream_data, source_ffmpeg_cmd):
+ output = ["-codec:v", "h264"]
+
+ # Use arguments from source if are available source arguments
+ if source_ffmpeg_cmd:
+ copy_args = (
+ "-crf",
+ "-b:v", "-vb",
+ "-minrate", "-minrate:",
+ "-maxrate", "-maxrate:",
+ "-bufsize", "-bufsize:"
+ )
+ args = source_ffmpeg_cmd.split(" ")
+ for idx, arg in enumerate(args):
+ if arg in copy_args:
+ output.extend([arg, args[idx + 1]])
+
+ pix_fmt = stream_data.get("pix_fmt")
+ if pix_fmt:
+ output.extend(["-pix_fmt", pix_fmt])
+
+ output.extend(["-intra"])
+ output.extend(["-g", "1"])
+
+ return output
+
+
+def _ffmpeg_dnxhd_codec_args(stream_data, source_ffmpeg_cmd):
+ output = ["-codec:v", "dnxhd"]
+
+ # Use source profile (profiles in metadata are not usable in args directly)
+ profile = stream_data.get("profile") or ""
+ # Lower profile and replace space with underscore
+ cleaned_profile = profile.lower().replace(" ", "_")
+
+ # TODO validate this statement
+ # Looks like using 'dnxhd' profile must have set bit rate and in that case
+ # should be used bitrate from source.
+ # - related attributes 'bit_rate_defined', 'bit_rate_must_be_defined'
+ bit_rate_must_be_defined = True
+ dnx_profiles = {
+ "dnxhd",
+ "dnxhr_lb",
+ "dnxhr_sq",
+ "dnxhr_hq",
+ "dnxhr_hqx",
+ "dnxhr_444"
+ }
+ if cleaned_profile in dnx_profiles:
+ if cleaned_profile != "dnxhd":
+ bit_rate_must_be_defined = False
+ output.extend(["-profile:v", cleaned_profile])
+
+ pix_fmt = stream_data.get("pix_fmt")
+ if pix_fmt:
+ output.extend(["-pix_fmt", pix_fmt])
+
+ # Use arguments from source if are available source arguments
+ bit_rate_defined = False
+ if source_ffmpeg_cmd:
+ # Define bitrate arguments
+ bit_rate_args = ("-b:v", "-vb",)
+ # Seprate the two variables in case something else should be copied
+ # from source command
+ copy_args = []
+ copy_args.extend(bit_rate_args)
+
+ args = source_ffmpeg_cmd.split(" ")
+ for idx, arg in enumerate(args):
+ if arg in copy_args:
+ if arg in bit_rate_args:
+ bit_rate_defined = True
+ output.extend([arg, args[idx + 1]])
+
+ # Add bitrate if needed
+ if bit_rate_must_be_defined and not bit_rate_defined:
+ src_bit_rate = stream_data.get("bit_rate")
+ if src_bit_rate:
+ output.extend(["-b:v", src_bit_rate])
+
+ output.extend(["-g", "1"])
+ return output
+
+
+def convert_ffprobe_fps_value(str_value):
+ """Returns (str) value of fps from ffprobe frame format (120/1)"""
+ if str_value == "0/0":
+ print("WARNING: Source has \"r_frame_rate\" value set to \"0/0\".")
+ return "Unknown"
+
+ items = str_value.split("/")
+ if len(items) == 1:
+ fps = float(items[0])
+
+ elif len(items) == 2:
+ fps = float(items[0]) / float(items[1])
+
+ # Check if fps is integer or float number
+ if int(fps) == fps:
+ fps = int(fps)
+
+ return str(fps)
diff --git a/openpype/lib/vendor_bin_utils.py b/openpype/lib/vendor_bin_utils.py
index 4c2cf93dfa..23e28ea304 100644
--- a/openpype/lib/vendor_bin_utils.py
+++ b/openpype/lib/vendor_bin_utils.py
@@ -1,11 +1,87 @@
import os
import logging
-import json
import platform
-import subprocess
-import distutils
-log = logging.getLogger("FFmpeg utils")
+log = logging.getLogger("Vendor utils")
+
+
+def is_file_executable(filepath):
+ """Filepath lead to executable file.
+
+ Args:
+ filepath(str): Full path to file.
+ """
+ if not filepath:
+ return False
+
+ if os.path.isfile(filepath):
+ if os.access(filepath, os.X_OK):
+ return True
+
+ log.info(
+ "Filepath is not available for execution \"{}\"".format(filepath)
+ )
+ return False
+
+
+def find_executable(executable):
+ """Find full path to executable.
+
+ Also tries additional extensions if passed executable does not contain one.
+
+ Paths where it is looked for executable is defined by 'PATH' environment
+ variable, 'os.confstr("CS_PATH")' or 'os.defpath'.
+
+ Args:
+ executable(str): Name of executable with or without extension. Can be
+ path to file.
+
+ Returns:
+ str: Full path to executable with extension (is file).
+ None: When the executable was not found.
+ """
+ # Skip if passed path is file
+ if is_file_executable(executable):
+ return executable
+
+ low_platform = platform.system().lower()
+ _, ext = os.path.splitext(executable)
+
+ # Prepare variants for which it will be looked
+ variants = [executable]
+ # Add other extension variants only if passed executable does not have one
+ if not ext:
+ if low_platform == "windows":
+ exts = [".exe", ".ps1", ".bat"]
+ for ext in os.getenv("PATHEXT", "").split(os.pathsep):
+ ext = ext.lower()
+ if ext and ext not in exts:
+ exts.append(ext)
+ else:
+ exts = [".sh"]
+
+ for ext in exts:
+ variant = executable + ext
+ if is_file_executable(variant):
+ return variant
+ variants.append(variant)
+
+ # Get paths where to look for executable
+ path_str = os.environ.get("PATH", None)
+ if path_str is None:
+ if hasattr(os, "confstr"):
+ path_str = os.confstr("CS_PATH")
+ elif hasattr(os, "defpath"):
+ path_str = os.defpath
+
+ if path_str:
+ paths = path_str.split(os.pathsep)
+ for path in paths:
+ for variant in variants:
+ filepath = os.path.abspath(os.path.join(path, variant))
+ if is_file_executable(filepath):
+ return filepath
+ return None
def get_vendor_bin_path(bin_app):
@@ -41,11 +117,7 @@ def get_oiio_tools_path(tool="oiiotool"):
Default is "oiiotool".
"""
oiio_dir = get_vendor_bin_path("oiio")
- if platform.system().lower() == "windows" and not tool.lower().endswith(
- ".exe"
- ):
- tool = "{}.exe".format(tool)
- return os.path.join(oiio_dir, tool)
+ return find_executable(os.path.join(oiio_dir, tool))
def get_ffmpeg_tool_path(tool="ffmpeg"):
@@ -61,57 +133,7 @@ def get_ffmpeg_tool_path(tool="ffmpeg"):
ffmpeg_dir = get_vendor_bin_path("ffmpeg")
if platform.system().lower() == "windows":
ffmpeg_dir = os.path.join(ffmpeg_dir, "bin")
- return os.path.join(ffmpeg_dir, tool)
-
-
-def ffprobe_streams(path_to_file, logger=None):
- """Load streams from entered filepath via ffprobe.
-
- Args:
- path_to_file (str): absolute path
- logger (logging.getLogger): injected logger, if empty new is created
-
- """
- if not logger:
- logger = log
- logger.info(
- "Getting information about input \"{}\".".format(path_to_file)
- )
- args = [
- get_ffmpeg_tool_path("ffprobe"),
- "-hide_banner",
- "-loglevel", "fatal",
- "-show_error",
- "-show_format",
- "-show_streams",
- "-show_programs",
- "-show_chapters",
- "-show_private_data",
- "-print_format", "json",
- path_to_file
- ]
-
- logger.debug("FFprobe command: {}".format(
- subprocess.list2cmdline(args)
- ))
- popen = subprocess.Popen(
- args,
- stdout=subprocess.PIPE,
- stderr=subprocess.PIPE
- )
-
- popen_stdout, popen_stderr = popen.communicate()
- if popen_stdout:
- logger.debug("FFprobe stdout:\n{}".format(
- popen_stdout.decode("utf-8")
- ))
-
- if popen_stderr:
- logger.warning("FFprobe stderr:\n{}".format(
- popen_stderr.decode("utf-8")
- ))
-
- return json.loads(popen_stdout)["streams"]
+ return find_executable(os.path.join(ffmpeg_dir, tool))
def is_oiio_supported():
@@ -122,7 +144,7 @@ def is_oiio_supported():
"""
loaded_path = oiio_path = get_oiio_tools_path()
if oiio_path:
- oiio_path = distutils.spawn.find_executable(oiio_path)
+ oiio_path = find_executable(oiio_path)
if not oiio_path:
log.debug("OIIOTool is not configured or not present at {}".format(
diff --git a/openpype/modules/base.py b/openpype/modules/base.py
index d566692439..175957ae39 100644
--- a/openpype/modules/base.py
+++ b/openpype/modules/base.py
@@ -33,16 +33,21 @@ DEFAULT_OPENPYPE_MODULES = (
"avalon_apps",
"clockify",
"log_viewer",
+ "deadline",
"muster",
+ "royalrender",
"python_console_interpreter",
+ "ftrack",
"slack",
"webserver",
"launcher_action",
"project_manager_action",
"settings_action",
"standalonepublish_action",
+ "traypublish_action",
"job_queue",
"timers_manager",
+ "sync_server",
)
@@ -56,6 +61,7 @@ class _ModuleClass(object):
def __init__(self, name):
# Call setattr on super class
super(_ModuleClass, self).__setattr__("name", name)
+ super(_ModuleClass, self).__setattr__("__name__", name)
# Where modules and interfaces are stored
super(_ModuleClass, self).__setattr__("__attributes__", dict())
@@ -67,7 +73,7 @@ class _ModuleClass(object):
if attr_name not in self.__attributes__:
if attr_name in ("__path__", "__file__"):
return None
- raise ImportError("No module named {}.{}".format(
+ raise AttributeError("'{}' has not attribute '{}'".format(
self.name, attr_name
))
return self.__attributes__[attr_name]
@@ -218,8 +224,6 @@ def load_interfaces(force=False):
def _load_interfaces():
# Key under which will be modules imported in `sys.modules`
- from openpype.lib import import_filepath
-
modules_key = "openpype_interfaces"
sys.modules[modules_key] = openpype_interfaces = (
@@ -844,6 +848,7 @@ class TrayModulesManager(ModulesManager):
"avalon",
"clockify",
"standalonepublish_tool",
+ "traypublish_tool",
"log_viewer",
"local_settings",
"settings"
diff --git a/openpype/modules/default_modules/deadline/__init__.py b/openpype/modules/deadline/__init__.py
similarity index 100%
rename from openpype/modules/default_modules/deadline/__init__.py
rename to openpype/modules/deadline/__init__.py
diff --git a/openpype/lib/abstract_submit_deadline.py b/openpype/modules/deadline/abstract_submit_deadline.py
similarity index 99%
rename from openpype/lib/abstract_submit_deadline.py
rename to openpype/modules/deadline/abstract_submit_deadline.py
index f54a2501a3..22902d79ea 100644
--- a/openpype/lib/abstract_submit_deadline.py
+++ b/openpype/modules/deadline/abstract_submit_deadline.py
@@ -15,7 +15,7 @@ import attr
import requests
import pyblish.api
-from .abstract_metaplugins import AbstractMetaInstancePlugin
+from openpype.lib.abstract_metaplugins import AbstractMetaInstancePlugin
def requests_post(*args, **kwargs):
diff --git a/openpype/modules/default_modules/deadline/deadline_module.py b/openpype/modules/deadline/deadline_module.py
similarity index 100%
rename from openpype/modules/default_modules/deadline/deadline_module.py
rename to openpype/modules/deadline/deadline_module.py
diff --git a/openpype/modules/default_modules/deadline/plugins/publish/collect_deadline_server_from_instance.py b/openpype/modules/deadline/plugins/publish/collect_deadline_server_from_instance.py
similarity index 100%
rename from openpype/modules/default_modules/deadline/plugins/publish/collect_deadline_server_from_instance.py
rename to openpype/modules/deadline/plugins/publish/collect_deadline_server_from_instance.py
diff --git a/openpype/modules/default_modules/deadline/plugins/publish/collect_default_deadline_server.py b/openpype/modules/deadline/plugins/publish/collect_default_deadline_server.py
similarity index 100%
rename from openpype/modules/default_modules/deadline/plugins/publish/collect_default_deadline_server.py
rename to openpype/modules/deadline/plugins/publish/collect_default_deadline_server.py
diff --git a/openpype/modules/default_modules/deadline/plugins/publish/submit_aftereffects_deadline.py b/openpype/modules/deadline/plugins/publish/submit_aftereffects_deadline.py
similarity index 95%
rename from openpype/modules/default_modules/deadline/plugins/publish/submit_aftereffects_deadline.py
rename to openpype/modules/deadline/plugins/publish/submit_aftereffects_deadline.py
index 1fff55500e..2918b54d4a 100644
--- a/openpype/modules/default_modules/deadline/plugins/publish/submit_aftereffects_deadline.py
+++ b/openpype/modules/deadline/plugins/publish/submit_aftereffects_deadline.py
@@ -5,9 +5,9 @@ import pyblish.api
from avalon import api
-from openpype.lib import abstract_submit_deadline
-from openpype.lib.abstract_submit_deadline import DeadlineJobInfo
from openpype.lib import env_value_to_bool
+from openpype_modules.deadline import abstract_submit_deadline
+from openpype_modules.deadline.abstract_submit_deadline import DeadlineJobInfo
@attr.s
@@ -24,7 +24,9 @@ class DeadlinePluginInfo():
MultiProcess = attr.ib(default=None)
-class AfterEffectsSubmitDeadline(abstract_submit_deadline.AbstractSubmitDeadline):
+class AfterEffectsSubmitDeadline(
+ abstract_submit_deadline.AbstractSubmitDeadline
+):
label = "Submit AE to Deadline"
order = pyblish.api.IntegratorOrder + 0.1
diff --git a/openpype/modules/default_modules/deadline/plugins/publish/submit_harmony_deadline.py b/openpype/modules/deadline/plugins/publish/submit_harmony_deadline.py
similarity index 98%
rename from openpype/modules/default_modules/deadline/plugins/publish/submit_harmony_deadline.py
rename to openpype/modules/deadline/plugins/publish/submit_harmony_deadline.py
index 9d55d43ba6..918efb6630 100644
--- a/openpype/modules/default_modules/deadline/plugins/publish/submit_harmony_deadline.py
+++ b/openpype/modules/deadline/plugins/publish/submit_harmony_deadline.py
@@ -8,11 +8,11 @@ import re
import attr
import pyblish.api
-
-import openpype.lib.abstract_submit_deadline
-from openpype.lib.abstract_submit_deadline import DeadlineJobInfo
from avalon import api
+from openpype_modules.deadline import abstract_submit_deadline
+from openpype_modules.deadline.abstract_submit_deadline import DeadlineJobInfo
+
class _ZipFile(ZipFile):
"""Extended check for windows invalid characters."""
@@ -217,7 +217,8 @@ class PluginInfo(object):
class HarmonySubmitDeadline(
- openpype.lib.abstract_submit_deadline.AbstractSubmitDeadline):
+ abstract_submit_deadline.AbstractSubmitDeadline
+):
"""Submit render write of Harmony scene to Deadline.
Renders are submitted to a Deadline Web Service as
diff --git a/openpype/modules/default_modules/deadline/plugins/publish/submit_houdini_remote_publish.py b/openpype/modules/deadline/plugins/publish/submit_houdini_remote_publish.py
similarity index 100%
rename from openpype/modules/default_modules/deadline/plugins/publish/submit_houdini_remote_publish.py
rename to openpype/modules/deadline/plugins/publish/submit_houdini_remote_publish.py
diff --git a/openpype/modules/default_modules/deadline/plugins/publish/submit_houdini_render_deadline.py b/openpype/modules/deadline/plugins/publish/submit_houdini_render_deadline.py
similarity index 98%
rename from openpype/modules/default_modules/deadline/plugins/publish/submit_houdini_render_deadline.py
rename to openpype/modules/deadline/plugins/publish/submit_houdini_render_deadline.py
index 2cd6b0e6b0..59aeb68b79 100644
--- a/openpype/modules/default_modules/deadline/plugins/publish/submit_houdini_render_deadline.py
+++ b/openpype/modules/deadline/plugins/publish/submit_houdini_render_deadline.py
@@ -50,8 +50,8 @@ class HoudiniSubmitRenderDeadline(pyblish.api.InstancePlugin):
# StartFrame to EndFrame by byFrameStep
frames = "{start}-{end}x{step}".format(
- start=int(instance.data["startFrame"]),
- end=int(instance.data["endFrame"]),
+ start=int(instance.data["frameStart"]),
+ end=int(instance.data["frameEnd"]),
step=int(instance.data["byFrameStep"]),
)
diff --git a/openpype/modules/default_modules/deadline/plugins/publish/submit_maya_deadline.py b/openpype/modules/deadline/plugins/publish/submit_maya_deadline.py
similarity index 100%
rename from openpype/modules/default_modules/deadline/plugins/publish/submit_maya_deadline.py
rename to openpype/modules/deadline/plugins/publish/submit_maya_deadline.py
diff --git a/openpype/modules/default_modules/deadline/plugins/publish/submit_nuke_deadline.py b/openpype/modules/deadline/plugins/publish/submit_nuke_deadline.py
similarity index 100%
rename from openpype/modules/default_modules/deadline/plugins/publish/submit_nuke_deadline.py
rename to openpype/modules/deadline/plugins/publish/submit_nuke_deadline.py
diff --git a/openpype/modules/default_modules/deadline/plugins/publish/submit_publish_job.py b/openpype/modules/deadline/plugins/publish/submit_publish_job.py
similarity index 98%
rename from openpype/modules/default_modules/deadline/plugins/publish/submit_publish_job.py
rename to openpype/modules/deadline/plugins/publish/submit_publish_job.py
index a77a968815..19d504b6c9 100644
--- a/openpype/modules/default_modules/deadline/plugins/publish/submit_publish_job.py
+++ b/openpype/modules/deadline/plugins/publish/submit_publish_job.py
@@ -13,6 +13,8 @@ from avalon import api, io
import pyblish.api
+from openpype.pipeline import get_representation_path
+
def get_resources(version, extension=None):
"""Get the files from the specific version."""
@@ -23,7 +25,7 @@ def get_resources(version, extension=None):
representation = io.find_one(query)
assert representation, "This is a bug"
- directory = api.get_representation_path(representation)
+ directory = get_representation_path(representation)
print("Source: ", directory)
resources = sorted(
[
@@ -316,8 +318,8 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin):
import speedcopy
self.log.info("Preparing to copy ...")
- start = instance.data.get("startFrame")
- end = instance.data.get("endFrame")
+ start = instance.data.get("frameStart")
+ end = instance.data.get("frameEnd")
# get latest version of subset
# this will stop if subset wasn't published yet
@@ -516,7 +518,6 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin):
"""
representations = []
collections, remainders = clique.assemble(exp_files)
- bake_renders = instance.get("bakingNukeScripts", [])
# create representation for every collected sequento ce
for collection in collections:
@@ -534,9 +535,6 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin):
preview = True
break
- if bake_renders:
- preview = False
-
# toggle preview on if multipart is on
if instance.get("multipartExr", False):
preview = True
@@ -610,16 +608,6 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin):
})
self._solve_families(instance, True)
- if (bake_renders
- and remainder in bake_renders[0]["bakeRenderPath"]):
- rep.update({
- "fps": instance.get("fps"),
- "tags": ["review", "delete"]
- })
- # solve families with `preview` attributes
- self._solve_families(instance, True)
- representations.append(rep)
-
return representations
def _solve_families(self, instance, preview=False):
diff --git a/openpype/modules/default_modules/deadline/plugins/publish/validate_deadline_connection.py b/openpype/modules/deadline/plugins/publish/validate_deadline_connection.py
similarity index 100%
rename from openpype/modules/default_modules/deadline/plugins/publish/validate_deadline_connection.py
rename to openpype/modules/deadline/plugins/publish/validate_deadline_connection.py
diff --git a/openpype/modules/deadline/plugins/publish/validate_expected_and_rendered_files.py b/openpype/modules/deadline/plugins/publish/validate_expected_and_rendered_files.py
new file mode 100644
index 0000000000..c2426e0d78
--- /dev/null
+++ b/openpype/modules/deadline/plugins/publish/validate_expected_and_rendered_files.py
@@ -0,0 +1,209 @@
+import os
+import requests
+
+import pyblish.api
+
+from openpype.lib.delivery import collect_frames
+from openpype_modules.deadline.abstract_submit_deadline import requests_get
+
+
+class ValidateExpectedFiles(pyblish.api.InstancePlugin):
+ """Compare rendered and expected files"""
+
+ label = "Validate rendered files from Deadline"
+ order = pyblish.api.ValidatorOrder
+ families = ["render"]
+ targets = ["deadline"]
+
+ # check if actual frame range on render job wasn't different
+ # case when artists wants to render only subset of frames
+ allow_user_override = True
+
+ def process(self, instance):
+ self.instance = instance
+ frame_list = self._get_frame_list(instance.data["render_job_id"])
+
+ for repre in instance.data["representations"]:
+ expected_files = self._get_expected_files(repre)
+
+ staging_dir = repre["stagingDir"]
+ existing_files = self._get_existing_files(staging_dir)
+
+ if self.allow_user_override:
+ # We always check for user override because the user might have
+ # also overridden the Job frame list to be longer than the
+ # originally submitted frame range
+ # todo: We should first check if Job frame range was overridden
+ # at all so we don't unnecessarily override anything
+ file_name_template, frame_placeholder = \
+ self._get_file_name_template_and_placeholder(
+ expected_files)
+
+ if not file_name_template:
+ raise RuntimeError("Unable to retrieve file_name template"
+ "from files: {}".format(expected_files))
+
+ job_expected_files = self._get_job_expected_files(
+ file_name_template,
+ frame_placeholder,
+ frame_list)
+
+ job_files_diff = job_expected_files.difference(expected_files)
+ if job_files_diff:
+ self.log.debug(
+ "Detected difference in expected output files from "
+ "Deadline job. Assuming an updated frame list by the "
+ "user. Difference: {}".format(sorted(job_files_diff))
+ )
+
+ # Update the representation expected files
+ self.log.info("Update range from actual job range "
+ "to frame list: {}".format(frame_list))
+ repre["files"] = sorted(job_expected_files)
+
+ # Update the expected files
+ expected_files = job_expected_files
+
+ # We don't use set.difference because we do allow other existing
+ # files to be in the folder that we might not want to use.
+ missing = expected_files - existing_files
+ if missing:
+ raise RuntimeError("Missing expected files: {}".format(
+ sorted(missing)))
+
+ def _get_frame_list(self, original_job_id):
+ """Returns list of frame ranges from all render job.
+
+ Render job might be re-submitted so job_id in metadata.json could be
+ invalid. GlobalJobPreload injects current job id to RENDER_JOB_IDS.
+
+ Args:
+ original_job_id (str)
+ Returns:
+ (list)
+ """
+ all_frame_lists = []
+ render_job_ids = os.environ.get("RENDER_JOB_IDS")
+ if render_job_ids:
+ render_job_ids = render_job_ids.split(',')
+ else: # fallback
+ render_job_ids = [original_job_id]
+
+ for job_id in render_job_ids:
+ job_info = self._get_job_info(job_id)
+ frame_list = job_info["Props"]["Frames"]
+ if frame_list:
+ all_frame_lists.extend(frame_list.split(','))
+
+ return all_frame_lists
+
+ def _get_job_expected_files(self,
+ file_name_template,
+ frame_placeholder,
+ frame_list):
+ """Calculates list of names of expected rendered files.
+
+ Might be different from expected files from submission if user
+ explicitly and manually changed the frame list on the Deadline job.
+
+ """
+ # no frames in file name at all, eg 'renderCompositingMain.withLut.mov'
+ if not frame_placeholder:
+ return set([file_name_template])
+
+ real_expected_rendered = set()
+ src_padding_exp = "%0{}d".format(len(frame_placeholder))
+ for frames in frame_list:
+ if '-' not in frames: # single frame
+ frames = "{}-{}".format(frames, frames)
+
+ start, end = frames.split('-')
+ for frame in range(int(start), int(end) + 1):
+ ren_name = file_name_template.replace(
+ frame_placeholder, src_padding_exp % frame)
+ real_expected_rendered.add(ren_name)
+
+ return real_expected_rendered
+
+ def _get_file_name_template_and_placeholder(self, files):
+ """Returns file name with frame replaced with # and this placeholder"""
+ sources_and_frames = collect_frames(files)
+
+ file_name_template = frame_placeholder = None
+ for file_name, frame in sources_and_frames.items():
+
+ # There might be cases where clique was unable to collect
+ # collections in `collect_frames` - thus we capture that case
+ if frame is not None:
+ frame_placeholder = "#" * len(frame)
+
+ file_name_template = os.path.basename(
+ file_name.replace(frame, frame_placeholder))
+ else:
+ file_name_template = file_name
+ break
+
+ return file_name_template, frame_placeholder
+
+ def _get_job_info(self, job_id):
+ """Calls DL for actual job info for 'job_id'
+
+ Might be different than job info saved in metadata.json if user
+ manually changes job pre/during rendering.
+
+ """
+ # get default deadline webservice url from deadline module
+ deadline_url = self.instance.context.data["defaultDeadline"]
+ # if custom one is set in instance, use that
+ if self.instance.data.get("deadlineUrl"):
+ deadline_url = self.instance.data.get("deadlineUrl")
+ assert deadline_url, "Requires Deadline Webservice URL"
+
+ url = "{}/api/jobs?JobID={}".format(deadline_url, job_id)
+ try:
+ response = requests_get(url)
+ except requests.exceptions.ConnectionError:
+ self.log.error("Deadline is not accessible at "
+ "{}".format(deadline_url))
+ return {}
+
+ if not response.ok:
+ self.log.error("Submission failed!")
+ self.log.error(response.status_code)
+ self.log.error(response.content)
+ raise RuntimeError(response.text)
+
+ json_content = response.json()
+ if json_content:
+ return json_content.pop()
+ return {}
+
+ def _get_existing_files(self, staging_dir):
+ """Returns set of existing file names from 'staging_dir'"""
+ existing_files = set()
+ for file_name in os.listdir(staging_dir):
+ existing_files.add(file_name)
+ return existing_files
+
+ def _get_expected_files(self, repre):
+ """Returns set of file names in representation['files']
+
+ The representations are collected from `CollectRenderedFiles` using
+ the metadata.json file submitted along with the render job.
+
+ Args:
+ repre (dict): The representation containing 'files'
+
+ Returns:
+ set: Set of expected file_names in the staging directory.
+
+ """
+ expected_files = set()
+
+ files = repre["files"]
+ if not isinstance(files, list):
+ files = [files]
+
+ for file_name in files:
+ expected_files.add(file_name)
+ return expected_files
diff --git a/vendor/deadline/custom/plugins/GlobalJobPreLoad.py b/openpype/modules/deadline/repository/custom/plugins/GlobalJobPreLoad.py
similarity index 96%
rename from vendor/deadline/custom/plugins/GlobalJobPreLoad.py
rename to openpype/modules/deadline/repository/custom/plugins/GlobalJobPreLoad.py
index ee137a2ee3..82c2494e7a 100644
--- a/vendor/deadline/custom/plugins/GlobalJobPreLoad.py
+++ b/openpype/modules/deadline/repository/custom/plugins/GlobalJobPreLoad.py
@@ -1,10 +1,11 @@
# -*- coding: utf-8 -*-
import os
import tempfile
-import time
+from datetime import datetime
import subprocess
import json
import platform
+import uuid
from Deadline.Scripting import RepositoryUtils, FileUtils
@@ -36,9 +37,11 @@ def inject_openpype_environment(deadlinePlugin):
print("--- OpenPype executable: {}".format(openpype_app))
# tempfile.TemporaryFile cannot be used because of locking
- export_url = os.path.join(tempfile.gettempdir(),
- time.strftime('%Y%m%d%H%M%S'),
- 'env.json') # add HHMMSS + delete later
+ temp_file_name = "{}_{}.json".format(
+ datetime.utcnow().strftime('%Y%m%d%H%M%S%f'),
+ str(uuid.uuid1())
+ )
+ export_url = os.path.join(tempfile.gettempdir(), temp_file_name)
print(">>> Temporary path: {}".format(export_url))
args = [
diff --git a/vendor/deadline/custom/plugins/HarmonyOpenPype/HarmonyOpenPype.ico b/openpype/modules/deadline/repository/custom/plugins/HarmonyOpenPype/HarmonyOpenPype.ico
similarity index 100%
rename from vendor/deadline/custom/plugins/HarmonyOpenPype/HarmonyOpenPype.ico
rename to openpype/modules/deadline/repository/custom/plugins/HarmonyOpenPype/HarmonyOpenPype.ico
diff --git a/vendor/deadline/custom/plugins/HarmonyOpenPype/HarmonyOpenPype.options b/openpype/modules/deadline/repository/custom/plugins/HarmonyOpenPype/HarmonyOpenPype.options
similarity index 100%
rename from vendor/deadline/custom/plugins/HarmonyOpenPype/HarmonyOpenPype.options
rename to openpype/modules/deadline/repository/custom/plugins/HarmonyOpenPype/HarmonyOpenPype.options
diff --git a/vendor/deadline/custom/plugins/HarmonyOpenPype/HarmonyOpenPype.param b/openpype/modules/deadline/repository/custom/plugins/HarmonyOpenPype/HarmonyOpenPype.param
similarity index 100%
rename from vendor/deadline/custom/plugins/HarmonyOpenPype/HarmonyOpenPype.param
rename to openpype/modules/deadline/repository/custom/plugins/HarmonyOpenPype/HarmonyOpenPype.param
diff --git a/vendor/deadline/custom/plugins/HarmonyOpenPype/HarmonyOpenPype.py b/openpype/modules/deadline/repository/custom/plugins/HarmonyOpenPype/HarmonyOpenPype.py
similarity index 100%
rename from vendor/deadline/custom/plugins/HarmonyOpenPype/HarmonyOpenPype.py
rename to openpype/modules/deadline/repository/custom/plugins/HarmonyOpenPype/HarmonyOpenPype.py
diff --git a/vendor/deadline/custom/plugins/OpenPype/OpenPype.ico b/openpype/modules/deadline/repository/custom/plugins/OpenPype/OpenPype.ico
similarity index 100%
rename from vendor/deadline/custom/plugins/OpenPype/OpenPype.ico
rename to openpype/modules/deadline/repository/custom/plugins/OpenPype/OpenPype.ico
diff --git a/vendor/deadline/custom/plugins/OpenPype/OpenPype.options b/openpype/modules/deadline/repository/custom/plugins/OpenPype/OpenPype.options
similarity index 100%
rename from vendor/deadline/custom/plugins/OpenPype/OpenPype.options
rename to openpype/modules/deadline/repository/custom/plugins/OpenPype/OpenPype.options
diff --git a/vendor/deadline/custom/plugins/OpenPype/OpenPype.param b/openpype/modules/deadline/repository/custom/plugins/OpenPype/OpenPype.param
similarity index 100%
rename from vendor/deadline/custom/plugins/OpenPype/OpenPype.param
rename to openpype/modules/deadline/repository/custom/plugins/OpenPype/OpenPype.param
diff --git a/vendor/deadline/custom/plugins/OpenPype/OpenPype.py b/openpype/modules/deadline/repository/custom/plugins/OpenPype/OpenPype.py
similarity index 100%
rename from vendor/deadline/custom/plugins/OpenPype/OpenPype.py
rename to openpype/modules/deadline/repository/custom/plugins/OpenPype/OpenPype.py
diff --git a/vendor/deadline/custom/plugins/OpenPypeTileAssembler/OpenPypeTileAssembler.ico b/openpype/modules/deadline/repository/custom/plugins/OpenPypeTileAssembler/OpenPypeTileAssembler.ico
similarity index 100%
rename from vendor/deadline/custom/plugins/OpenPypeTileAssembler/OpenPypeTileAssembler.ico
rename to openpype/modules/deadline/repository/custom/plugins/OpenPypeTileAssembler/OpenPypeTileAssembler.ico
diff --git a/vendor/deadline/custom/plugins/OpenPypeTileAssembler/OpenPypeTileAssembler.options b/openpype/modules/deadline/repository/custom/plugins/OpenPypeTileAssembler/OpenPypeTileAssembler.options
similarity index 100%
rename from vendor/deadline/custom/plugins/OpenPypeTileAssembler/OpenPypeTileAssembler.options
rename to openpype/modules/deadline/repository/custom/plugins/OpenPypeTileAssembler/OpenPypeTileAssembler.options
diff --git a/vendor/deadline/custom/plugins/OpenPypeTileAssembler/OpenPypeTileAssembler.param b/openpype/modules/deadline/repository/custom/plugins/OpenPypeTileAssembler/OpenPypeTileAssembler.param
similarity index 100%
rename from vendor/deadline/custom/plugins/OpenPypeTileAssembler/OpenPypeTileAssembler.param
rename to openpype/modules/deadline/repository/custom/plugins/OpenPypeTileAssembler/OpenPypeTileAssembler.param
diff --git a/vendor/deadline/custom/plugins/OpenPypeTileAssembler/OpenPypeTileAssembler.py b/openpype/modules/deadline/repository/custom/plugins/OpenPypeTileAssembler/OpenPypeTileAssembler.py
similarity index 65%
rename from vendor/deadline/custom/plugins/OpenPypeTileAssembler/OpenPypeTileAssembler.py
rename to openpype/modules/deadline/repository/custom/plugins/OpenPypeTileAssembler/OpenPypeTileAssembler.py
index cf864b03d3..9fca1b5391 100644
--- a/vendor/deadline/custom/plugins/OpenPypeTileAssembler/OpenPypeTileAssembler.py
+++ b/openpype/modules/deadline/repository/custom/plugins/OpenPypeTileAssembler/OpenPypeTileAssembler.py
@@ -5,8 +5,9 @@ Todo:
Currently we support only EXRs with their data window set.
"""
import os
+import re
import subprocess
-from xml.dom import minidom
+import xml.etree.ElementTree
from System.IO import Path
@@ -15,17 +16,220 @@ from Deadline.Scripting import (
FileUtils, RepositoryUtils, SystemUtils)
-INT_KEYS = {
- "x", "y", "height", "width", "full_x", "full_y",
- "full_width", "full_height", "full_depth", "full_z",
- "tile_width", "tile_height", "tile_depth", "deep", "depth",
- "nchannels", "z_channel", "alpha_channel", "subimages"
+STRING_TAGS = {
+ "format"
}
-LIST_KEYS = {
- "channelnames"
+INT_TAGS = {
+ "x", "y", "z",
+ "width", "height", "depth",
+ "full_x", "full_y", "full_z",
+ "full_width", "full_height", "full_depth",
+ "tile_width", "tile_height", "tile_depth",
+ "nchannels",
+ "alpha_channel",
+ "z_channel",
+ "deep",
+ "subimages",
}
+XML_CHAR_REF_REGEX_HEX = re.compile(r"?[0-9a-fA-F]+;")
+
+# Regex to parse array attributes
+ARRAY_TYPE_REGEX = re.compile(r"^(int|float|string)\[\d+\]$")
+
+
+def convert_value_by_type_name(value_type, value):
+ """Convert value to proper type based on type name.
+
+ In some cases value types have custom python class.
+ """
+
+ # Simple types
+ if value_type == "string":
+ return value
+
+ if value_type == "int":
+ return int(value)
+
+ if value_type == "float":
+ return float(value)
+
+ # Vectors will probably have more types
+ if value_type == "vec2f":
+ return [float(item) for item in value.split(",")]
+
+ # Matrix should be always have square size of element 3x3, 4x4
+ # - are returned as list of lists
+ if value_type == "matrix":
+ output = []
+ current_index = -1
+ parts = value.split(",")
+ parts_len = len(parts)
+ if parts_len == 1:
+ divisor = 1
+ elif parts_len == 4:
+ divisor = 2
+ elif parts_len == 9:
+ divisor == 3
+ elif parts_len == 16:
+ divisor = 4
+ else:
+ print("Unknown matrix resolution {}. Value: \"{}\"".format(
+ parts_len, value
+ ))
+ for part in parts:
+ output.append(float(part))
+ return output
+
+ for idx, item in enumerate(parts):
+ list_index = idx % divisor
+ if list_index > current_index:
+ current_index = list_index
+ output.append([])
+ output[list_index].append(float(item))
+ return output
+
+ if value_type == "rational2i":
+ parts = value.split("/")
+ top = float(parts[0])
+ bottom = 1.0
+ if len(parts) != 1:
+ bottom = float(parts[1])
+ return float(top) / float(bottom)
+
+ if value_type == "vector":
+ parts = [part.strip() for part in value.split(",")]
+ output = []
+ for part in parts:
+ if part == "-nan":
+ output.append(None)
+ continue
+ try:
+ part = float(part)
+ except ValueError:
+ pass
+ output.append(part)
+ return output
+
+ if value_type == "timecode":
+ return value
+
+ # Array of other types is converted to list
+ re_result = ARRAY_TYPE_REGEX.findall(value_type)
+ if re_result:
+ array_type = re_result[0]
+ output = []
+ for item in value.split(","):
+ output.append(
+ convert_value_by_type_name(array_type, item)
+ )
+ return output
+
+ print((
+ "MISSING IMPLEMENTATION:"
+ " Unknown attrib type \"{}\". Value: {}"
+ ).format(value_type, value))
+ return value
+
+
+def parse_oiio_xml_output(xml_string):
+ """Parse xml output from OIIO info command."""
+ output = {}
+ if not xml_string:
+ return output
+
+ # Fix values with ampresand (lazy fix)
+ # - oiiotool exports invalid xml which ElementTree can't handle
+ # e.g. ""
+ # WARNING: this will affect even valid character entities. If you need
+ # those values correctly, this must take care of valid character ranges.
+ # See https://github.com/pypeclub/OpenPype/pull/2729
+ matches = XML_CHAR_REF_REGEX_HEX.findall(xml_string)
+ for match in matches:
+ new_value = match.replace("&", "&")
+ xml_string = xml_string.replace(match, new_value)
+
+ tree = xml.etree.ElementTree.fromstring(xml_string)
+ attribs = {}
+ output["attribs"] = attribs
+ for child in tree:
+ tag_name = child.tag
+ if tag_name == "attrib":
+ attrib_def = child.attrib
+ value = convert_value_by_type_name(
+ attrib_def["type"], child.text
+ )
+
+ attribs[attrib_def["name"]] = value
+ continue
+
+ # Channels are stored as tex on each child
+ if tag_name == "channelnames":
+ value = []
+ for channel in child:
+ value.append(channel.text)
+
+ # Convert known integer type tags to int
+ elif tag_name in INT_TAGS:
+ value = int(child.text)
+
+ # Keep value of known string tags
+ elif tag_name in STRING_TAGS:
+ value = child.text
+
+ # Keep value as text for unknown tags
+ # - feel free to add more tags
+ else:
+ value = child.text
+ print((
+ "MISSING IMPLEMENTATION:"
+ " Unknown tag \"{}\". Value \"{}\""
+ ).format(tag_name, value))
+
+ output[child.tag] = value
+
+ return output
+
+
+def info_about_input(oiiotool_path, filepath):
+ args = [
+ oiiotool_path,
+ "--info",
+ "-v",
+ "-i:infoformat=xml",
+ filepath
+ ]
+ popen = subprocess.Popen(args, stdout=subprocess.PIPE)
+ _stdout, _stderr = popen.communicate()
+ output = ""
+ if _stdout:
+ output += _stdout.decode("utf-8")
+
+ if _stderr:
+ output += _stderr.decode("utf-8")
+
+ output = output.replace("\r\n", "\n")
+ xml_started = False
+ lines = []
+ for line in output.split("\n"):
+ if not xml_started:
+ if not line.startswith("<"):
+ continue
+ xml_started = True
+ if xml_started:
+ lines.append(line)
+
+ if not xml_started:
+ raise ValueError(
+ "Failed to read input file \"{}\".\nOutput:\n{}".format(
+ filepath, output
+ )
+ )
+ xml_text = "\n".join(lines)
+ return parse_oiio_xml_output(xml_text)
+
+
def GetDeadlinePlugin(): # noqa: N802
"""Helper."""
return OpenPypeTileAssembler()
@@ -218,8 +422,9 @@ class OpenPypeTileAssembler(DeadlinePlugin):
# Create new image with output resolution, and with same type and
# channels as input
+ oiiotool_path = self.render_executable()
first_tile_path = tile_info[0]["filepath"]
- first_tile_info = self.info_about_input(first_tile_path)
+ first_tile_info = info_about_input(oiiotool_path, first_tile_path)
create_arg_template = "--create{} {}x{} {}"
image_type = ""
@@ -236,7 +441,7 @@ class OpenPypeTileAssembler(DeadlinePlugin):
for tile in tile_info:
path = tile["filepath"]
pos_x = tile["pos_x"]
- tile_height = self.info_about_input(path)["height"]
+ tile_height = info_about_input(oiiotool_path, path)["height"]
if self.renderer == "vray":
pos_y = tile["pos_y"]
else:
@@ -326,47 +531,3 @@ class OpenPypeTileAssembler(DeadlinePlugin):
ffmpeg_args.append("\"{}\"".format(output_path))
return ffmpeg_args
-
- def info_about_input(self, input_path):
- args = [self.render_executable(), "--info:format=xml", input_path]
- popen = subprocess.Popen(
- " ".join(args),
- shell=True,
- stdout=subprocess.PIPE
- )
- popen_output = popen.communicate()[0].replace(b"\r\n", b"")
-
- xmldoc = minidom.parseString(popen_output)
- image_spec = None
- for main_child in xmldoc.childNodes:
- if main_child.nodeName.lower() == "imagespec":
- image_spec = main_child
- break
-
- info = {}
- if not image_spec:
- return info
-
- def child_check(node):
- if len(node.childNodes) != 1:
- self.FailRender((
- "Implementation BUG. Node {} has more children than 1"
- ).format(node.nodeName))
-
- for child in image_spec.childNodes:
- if child.nodeName in LIST_KEYS:
- values = []
- for node in child.childNodes:
- child_check(node)
- values.append(node.childNodes[0].nodeValue)
-
- info[child.nodeName] = values
-
- elif child.nodeName in INT_KEYS:
- child_check(child)
- info[child.nodeName] = int(child.childNodes[0].nodeValue)
-
- else:
- child_check(child)
- info[child.nodeName] = child.childNodes[0].nodeValue
- return info
diff --git a/vendor/deadline/readme.md b/openpype/modules/deadline/repository/readme.md
similarity index 100%
rename from vendor/deadline/readme.md
rename to openpype/modules/deadline/repository/readme.md
diff --git a/openpype/modules/default_modules/deadline/plugins/publish/validate_expected_and_rendered_files.py b/openpype/modules/default_modules/deadline/plugins/publish/validate_expected_and_rendered_files.py
deleted file mode 100644
index 719c7dfe3e..0000000000
--- a/openpype/modules/default_modules/deadline/plugins/publish/validate_expected_and_rendered_files.py
+++ /dev/null
@@ -1,189 +0,0 @@
-import os
-import json
-import requests
-
-import pyblish.api
-
-from openpype.lib.abstract_submit_deadline import requests_get
-from openpype.lib.delivery import collect_frames
-
-
-class ValidateExpectedFiles(pyblish.api.InstancePlugin):
- """Compare rendered and expected files"""
-
- label = "Validate rendered files from Deadline"
- order = pyblish.api.ValidatorOrder
- families = ["render"]
- targets = ["deadline"]
-
- # check if actual frame range on render job wasn't different
- # case when artists wants to render only subset of frames
- allow_user_override = True
-
- def process(self, instance):
- self.instance = instance
- frame_list = self._get_frame_list(instance.data["render_job_id"])
-
- for repre in instance.data["representations"]:
- expected_files = self._get_expected_files(repre)
-
- staging_dir = repre["stagingDir"]
- existing_files = self._get_existing_files(staging_dir)
-
- expected_non_existent = expected_files.difference(
- existing_files)
- if len(expected_non_existent) != 0:
- self.log.info("Some expected files missing {}".format(
- expected_non_existent))
-
- if self.allow_user_override:
- file_name_template, frame_placeholder = \
- self._get_file_name_template_and_placeholder(
- expected_files)
-
- if not file_name_template:
- return
-
- real_expected_rendered = self._get_real_render_expected(
- file_name_template,
- frame_placeholder,
- frame_list)
-
- real_expected_non_existent = \
- real_expected_rendered.difference(existing_files)
- if len(real_expected_non_existent) != 0:
- raise RuntimeError("Still missing some files {}".
- format(real_expected_non_existent))
- self.log.info("Update range from actual job range")
- repre["files"] = sorted(list(real_expected_rendered))
- else:
- raise RuntimeError("Some expected files missing {}".format(
- expected_non_existent))
-
- def _get_frame_list(self, original_job_id):
- """
- Returns list of frame ranges from all render job.
-
- Render job might be requeried so job_id in metadata.json is invalid
- GlobalJobPreload injects current ids to RENDER_JOB_IDS.
-
- Args:
- original_job_id (str)
- Returns:
- (list)
- """
- all_frame_lists = []
- render_job_ids = os.environ.get("RENDER_JOB_IDS")
- if render_job_ids:
- render_job_ids = render_job_ids.split(',')
- else: # fallback
- render_job_ids = [original_job_id]
-
- for job_id in render_job_ids:
- job_info = self._get_job_info(job_id)
- frame_list = job_info["Props"]["Frames"]
- if frame_list:
- all_frame_lists.extend(frame_list.split(','))
-
- return all_frame_lists
-
- def _get_real_render_expected(self, file_name_template, frame_placeholder,
- frame_list):
- """
- Calculates list of names of expected rendered files.
-
- Might be different from job expected files if user explicitly and
- manually change frame list on Deadline job.
- """
- real_expected_rendered = set()
- src_padding_exp = "%0{}d".format(len(frame_placeholder))
- for frames in frame_list:
- if '-' not in frames: # single frame
- frames = "{}-{}".format(frames, frames)
-
- start, end = frames.split('-')
- for frame in range(int(start), int(end) + 1):
- ren_name = file_name_template.replace(
- frame_placeholder, src_padding_exp % frame)
- real_expected_rendered.add(ren_name)
-
- return real_expected_rendered
-
- def _get_file_name_template_and_placeholder(self, files):
- """Returns file name with frame replaced with # and this placeholder"""
- sources_and_frames = collect_frames(files)
-
- file_name_template = frame_placeholder = None
- for file_name, frame in sources_and_frames.items():
- frame_placeholder = "#" * len(frame)
- file_name_template = os.path.basename(
- file_name.replace(frame, frame_placeholder))
- break
-
- return file_name_template, frame_placeholder
-
- def _get_job_info(self, job_id):
- """
- Calls DL for actual job info for 'job_id'
-
- Might be different than job info saved in metadata.json if user
- manually changes job pre/during rendering.
- """
- # get default deadline webservice url from deadline module
- deadline_url = self.instance.context.data["defaultDeadline"]
- # if custom one is set in instance, use that
- if self.instance.data.get("deadlineUrl"):
- deadline_url = self.instance.data.get("deadlineUrl")
- assert deadline_url, "Requires Deadline Webservice URL"
-
- url = "{}/api/jobs?JobID={}".format(deadline_url, job_id)
- try:
- response = requests_get(url)
- except requests.exceptions.ConnectionError:
- print("Deadline is not accessible at {}".format(deadline_url))
- # self.log("Deadline is not accessible at {}".format(deadline_url))
- return {}
-
- if not response.ok:
- self.log.error("Submission failed!")
- self.log.error(response.status_code)
- self.log.error(response.content)
- raise RuntimeError(response.text)
-
- json_content = response.json()
- if json_content:
- return json_content.pop()
- return {}
-
- def _parse_metadata_json(self, json_path):
- if not os.path.exists(json_path):
- msg = "Metadata file {} doesn't exist".format(json_path)
- raise RuntimeError(msg)
-
- with open(json_path) as fp:
- try:
- return json.load(fp)
- except Exception as exc:
- self.log.error(
- "Error loading json: "
- "{} - Exception: {}".format(json_path, exc)
- )
-
- def _get_existing_files(self, out_dir):
- """Returns set of existing file names from 'out_dir'"""
- existing_files = set()
- for file_name in os.listdir(out_dir):
- existing_files.add(file_name)
- return existing_files
-
- def _get_expected_files(self, repre):
- """Returns set of file names from metadata.json"""
- expected_files = set()
-
- files = repre["files"]
- if not isinstance(files, list):
- files = [files]
-
- for file_name in files:
- expected_files.add(file_name)
- return expected_files
diff --git a/openpype/modules/default_modules/ftrack/event_handlers_user/action_job_killer.py b/openpype/modules/default_modules/ftrack/event_handlers_user/action_job_killer.py
deleted file mode 100644
index af24e0280d..0000000000
--- a/openpype/modules/default_modules/ftrack/event_handlers_user/action_job_killer.py
+++ /dev/null
@@ -1,117 +0,0 @@
-import json
-from openpype_modules.ftrack.lib import BaseAction, statics_icon
-
-
-class JobKiller(BaseAction):
- '''Edit meta data action.'''
-
- #: Action identifier.
- identifier = 'job.killer'
- #: Action label.
- label = "OpenPype Admin"
- variant = '- Job Killer'
- #: Action description.
- description = 'Killing selected running jobs'
- #: roles that are allowed to register this action
- icon = statics_icon("ftrack", "action_icons", "OpenPypeAdmin.svg")
- settings_key = "job_killer"
-
- def discover(self, session, entities, event):
- ''' Validation '''
- return self.valid_roles(session, entities, event)
-
- def interface(self, session, entities, event):
- if not event['data'].get('values', {}):
- title = 'Select jobs to kill'
-
- jobs = session.query(
- 'select id, status from Job'
- ' where status in ("queued", "running")'
- ).all()
-
- items = []
-
- item_splitter = {'type': 'label', 'value': '---'}
- for job in jobs:
- try:
- data = json.loads(job['data'])
- desctiption = data['description']
- except Exception:
- desctiption = '*No description*'
- user = job['user']['username']
- created = job['created_at'].strftime('%d.%m.%Y %H:%M:%S')
- label = '{} - {} - {}'.format(
- desctiption, created, user
- )
- item_label = {
- 'type': 'label',
- 'value': label
- }
- item = {
- 'name': job['id'],
- 'type': 'boolean',
- 'value': False
- }
- if len(items) > 0:
- items.append(item_splitter)
- items.append(item_label)
- items.append(item)
-
- if len(items) == 0:
- return {
- 'success': False,
- 'message': 'Didn\'t found any running jobs'
- }
- else:
- return {
- 'items': items,
- 'title': title
- }
-
- def launch(self, session, entities, event):
- """ GET JOB """
- if 'values' not in event['data']:
- return
-
- values = event['data']['values']
- if len(values) <= 0:
- return {
- 'success': True,
- 'message': 'No jobs to kill!'
- }
- jobs = []
- job_ids = []
-
- for k, v in values.items():
- if v is True:
- job_ids.append(k)
-
- for id in job_ids:
- query = 'Job where id is "{}"'.format(id)
- jobs.append(session.query(query).one())
- # Update all the queried jobs, setting the status to failed.
- for job in jobs:
- try:
- origin_status = job["status"]
- job['status'] = 'failed'
- session.commit()
- self.log.debug((
- 'Changing Job ({}) status: {} -> failed'
- ).format(job['id'], origin_status))
- except Exception:
- session.rollback()
- self.log.warning((
- 'Changing Job ({}) has failed'
- ).format(job['id']))
-
- self.log.info('All running jobs were killed Successfully!')
- return {
- 'success': True,
- 'message': 'All running jobs were killed Successfully!'
- }
-
-
-def register(session):
- '''Register plugin. Called when used as an plugin.'''
-
- JobKiller(session).register()
diff --git a/openpype/modules/default_modules/ftrack/python2_vendor/arrow b/openpype/modules/default_modules/ftrack/python2_vendor/arrow
deleted file mode 160000
index b746fedf72..0000000000
--- a/openpype/modules/default_modules/ftrack/python2_vendor/arrow
+++ /dev/null
@@ -1 +0,0 @@
-Subproject commit b746fedf7286c3755a46f07ab72f4c414cd41fc0
diff --git a/openpype/modules/default_modules/ftrack/python2_vendor/ftrack-python-api b/openpype/modules/default_modules/ftrack/python2_vendor/ftrack-python-api
deleted file mode 160000
index d277f474ab..0000000000
--- a/openpype/modules/default_modules/ftrack/python2_vendor/ftrack-python-api
+++ /dev/null
@@ -1 +0,0 @@
-Subproject commit d277f474ab016e7b53479c36af87cb861d0cc53e
diff --git a/openpype/modules/default_modules/ftrack/__init__.py b/openpype/modules/ftrack/__init__.py
similarity index 100%
rename from openpype/modules/default_modules/ftrack/__init__.py
rename to openpype/modules/ftrack/__init__.py
diff --git a/openpype/modules/default_modules/ftrack/event_handlers_server/action_clone_review_session.py b/openpype/modules/ftrack/event_handlers_server/action_clone_review_session.py
similarity index 100%
rename from openpype/modules/default_modules/ftrack/event_handlers_server/action_clone_review_session.py
rename to openpype/modules/ftrack/event_handlers_server/action_clone_review_session.py
diff --git a/openpype/modules/default_modules/ftrack/event_handlers_server/action_multiple_notes.py b/openpype/modules/ftrack/event_handlers_server/action_multiple_notes.py
similarity index 100%
rename from openpype/modules/default_modules/ftrack/event_handlers_server/action_multiple_notes.py
rename to openpype/modules/ftrack/event_handlers_server/action_multiple_notes.py
diff --git a/openpype/modules/default_modules/ftrack/event_handlers_server/action_prepare_project.py b/openpype/modules/ftrack/event_handlers_server/action_prepare_project.py
similarity index 100%
rename from openpype/modules/default_modules/ftrack/event_handlers_server/action_prepare_project.py
rename to openpype/modules/ftrack/event_handlers_server/action_prepare_project.py
diff --git a/openpype/modules/default_modules/ftrack/event_handlers_server/action_private_project_detection.py b/openpype/modules/ftrack/event_handlers_server/action_private_project_detection.py
similarity index 100%
rename from openpype/modules/default_modules/ftrack/event_handlers_server/action_private_project_detection.py
rename to openpype/modules/ftrack/event_handlers_server/action_private_project_detection.py
diff --git a/openpype/modules/default_modules/ftrack/event_handlers_server/action_push_frame_values_to_task.py b/openpype/modules/ftrack/event_handlers_server/action_push_frame_values_to_task.py
similarity index 100%
rename from openpype/modules/default_modules/ftrack/event_handlers_server/action_push_frame_values_to_task.py
rename to openpype/modules/ftrack/event_handlers_server/action_push_frame_values_to_task.py
diff --git a/openpype/modules/default_modules/ftrack/event_handlers_server/action_sync_to_avalon.py b/openpype/modules/ftrack/event_handlers_server/action_sync_to_avalon.py
similarity index 100%
rename from openpype/modules/default_modules/ftrack/event_handlers_server/action_sync_to_avalon.py
rename to openpype/modules/ftrack/event_handlers_server/action_sync_to_avalon.py
diff --git a/openpype/modules/default_modules/ftrack/event_handlers_server/event_del_avalon_id_from_new.py b/openpype/modules/ftrack/event_handlers_server/event_del_avalon_id_from_new.py
similarity index 100%
rename from openpype/modules/default_modules/ftrack/event_handlers_server/event_del_avalon_id_from_new.py
rename to openpype/modules/ftrack/event_handlers_server/event_del_avalon_id_from_new.py
diff --git a/openpype/modules/default_modules/ftrack/event_handlers_server/event_first_version_status.py b/openpype/modules/ftrack/event_handlers_server/event_first_version_status.py
similarity index 100%
rename from openpype/modules/default_modules/ftrack/event_handlers_server/event_first_version_status.py
rename to openpype/modules/ftrack/event_handlers_server/event_first_version_status.py
diff --git a/openpype/modules/default_modules/ftrack/event_handlers_server/event_next_task_update.py b/openpype/modules/ftrack/event_handlers_server/event_next_task_update.py
similarity index 100%
rename from openpype/modules/default_modules/ftrack/event_handlers_server/event_next_task_update.py
rename to openpype/modules/ftrack/event_handlers_server/event_next_task_update.py
diff --git a/openpype/modules/default_modules/ftrack/event_handlers_server/event_push_frame_values_to_task.py b/openpype/modules/ftrack/event_handlers_server/event_push_frame_values_to_task.py
similarity index 100%
rename from openpype/modules/default_modules/ftrack/event_handlers_server/event_push_frame_values_to_task.py
rename to openpype/modules/ftrack/event_handlers_server/event_push_frame_values_to_task.py
diff --git a/openpype/modules/default_modules/ftrack/event_handlers_server/event_radio_buttons.py b/openpype/modules/ftrack/event_handlers_server/event_radio_buttons.py
similarity index 100%
rename from openpype/modules/default_modules/ftrack/event_handlers_server/event_radio_buttons.py
rename to openpype/modules/ftrack/event_handlers_server/event_radio_buttons.py
diff --git a/openpype/modules/default_modules/ftrack/event_handlers_server/event_sync_links.py b/openpype/modules/ftrack/event_handlers_server/event_sync_links.py
similarity index 100%
rename from openpype/modules/default_modules/ftrack/event_handlers_server/event_sync_links.py
rename to openpype/modules/ftrack/event_handlers_server/event_sync_links.py
diff --git a/openpype/modules/default_modules/ftrack/event_handlers_server/event_sync_to_avalon.py b/openpype/modules/ftrack/event_handlers_server/event_sync_to_avalon.py
similarity index 98%
rename from openpype/modules/default_modules/ftrack/event_handlers_server/event_sync_to_avalon.py
rename to openpype/modules/ftrack/event_handlers_server/event_sync_to_avalon.py
index 9f85000dbb..eea6436b53 100644
--- a/openpype/modules/default_modules/ftrack/event_handlers_server/event_sync_to_avalon.py
+++ b/openpype/modules/ftrack/event_handlers_server/event_sync_to_avalon.py
@@ -20,11 +20,16 @@ from openpype_modules.ftrack.lib import (
query_custom_attributes,
CUST_ATTR_ID_KEY,
CUST_ATTR_AUTO_SYNC,
+ FPS_KEYS,
avalon_sync,
BaseEvent
)
+from openpype_modules.ftrack.lib.avalon_sync import (
+ convert_to_fps,
+ InvalidFpsValue
+)
from openpype.lib import CURRENT_DOC_SCHEMAS
@@ -1149,12 +1154,31 @@ class SyncToAvalonEvent(BaseEvent):
"description": ftrack_ent["description"]
}
}
+ invalid_fps_items = []
cust_attrs = self.get_cust_attr_values(ftrack_ent)
for key, val in cust_attrs.items():
if key.startswith("avalon_"):
continue
+
+ if key in FPS_KEYS:
+ try:
+ val = convert_to_fps(val)
+ except InvalidFpsValue:
+ invalid_fps_items.append((ftrack_ent["id"], val))
+ continue
+
final_entity["data"][key] = val
+ if invalid_fps_items:
+ fps_msg = (
+ "These entities have invalid fps value in custom attributes"
+ )
+ items = []
+ for entity_id, value in invalid_fps_items:
+ ent_path = self.get_ent_path(entity_id)
+ items.append("{} - \"{}\"".format(ent_path, value))
+ self.report_items["error"][fps_msg] = items
+
_mongo_id_str = cust_attrs.get(CUST_ATTR_ID_KEY)
if _mongo_id_str:
try:
@@ -2155,11 +2179,19 @@ class SyncToAvalonEvent(BaseEvent):
)
convert_types_by_id[attr_id] = convert_type
+ default_value = attr["default"]
+ if key in FPS_KEYS:
+ try:
+ default_value = convert_to_fps(default_value)
+ except InvalidFpsValue:
+ pass
+
entities_dict[ftrack_project_id]["hier_attrs"][key] = (
attr["default"]
)
# PREPARE DATA BEFORE THIS
+ invalid_fps_items = []
avalon_hier = []
for item in values:
value = item["value"]
@@ -2173,8 +2205,25 @@ class SyncToAvalonEvent(BaseEvent):
if convert_type:
value = convert_type(value)
+
+ if key in FPS_KEYS:
+ try:
+ value = convert_to_fps(value)
+ except InvalidFpsValue:
+ invalid_fps_items.append((entity_id, value))
+ continue
entities_dict[entity_id]["hier_attrs"][key] = value
+ if invalid_fps_items:
+ fps_msg = (
+ "These entities have invalid fps value in custom attributes"
+ )
+ items = []
+ for entity_id, value in invalid_fps_items:
+ ent_path = self.get_ent_path(entity_id)
+ items.append("{} - \"{}\"".format(ent_path, value))
+ self.report_items["error"][fps_msg] = items
+
# Get dictionary with not None hierarchical values to pull to childs
project_values = {}
for key, value in (
diff --git a/openpype/modules/default_modules/ftrack/event_handlers_server/event_task_to_parent_status.py b/openpype/modules/ftrack/event_handlers_server/event_task_to_parent_status.py
similarity index 100%
rename from openpype/modules/default_modules/ftrack/event_handlers_server/event_task_to_parent_status.py
rename to openpype/modules/ftrack/event_handlers_server/event_task_to_parent_status.py
diff --git a/openpype/modules/default_modules/ftrack/event_handlers_server/event_task_to_version_status.py b/openpype/modules/ftrack/event_handlers_server/event_task_to_version_status.py
similarity index 100%
rename from openpype/modules/default_modules/ftrack/event_handlers_server/event_task_to_version_status.py
rename to openpype/modules/ftrack/event_handlers_server/event_task_to_version_status.py
diff --git a/openpype/modules/default_modules/ftrack/event_handlers_server/event_thumbnail_updates.py b/openpype/modules/ftrack/event_handlers_server/event_thumbnail_updates.py
similarity index 100%
rename from openpype/modules/default_modules/ftrack/event_handlers_server/event_thumbnail_updates.py
rename to openpype/modules/ftrack/event_handlers_server/event_thumbnail_updates.py
diff --git a/openpype/modules/default_modules/ftrack/event_handlers_server/event_user_assigment.py b/openpype/modules/ftrack/event_handlers_server/event_user_assigment.py
similarity index 100%
rename from openpype/modules/default_modules/ftrack/event_handlers_server/event_user_assigment.py
rename to openpype/modules/ftrack/event_handlers_server/event_user_assigment.py
diff --git a/openpype/modules/default_modules/ftrack/event_handlers_server/event_version_to_task_statuses.py b/openpype/modules/ftrack/event_handlers_server/event_version_to_task_statuses.py
similarity index 100%
rename from openpype/modules/default_modules/ftrack/event_handlers_server/event_version_to_task_statuses.py
rename to openpype/modules/ftrack/event_handlers_server/event_version_to_task_statuses.py
diff --git a/openpype/modules/default_modules/ftrack/event_handlers_user/action_applications.py b/openpype/modules/ftrack/event_handlers_user/action_applications.py
similarity index 100%
rename from openpype/modules/default_modules/ftrack/event_handlers_user/action_applications.py
rename to openpype/modules/ftrack/event_handlers_user/action_applications.py
diff --git a/openpype/modules/default_modules/ftrack/event_handlers_user/action_batch_task_creation.py b/openpype/modules/ftrack/event_handlers_user/action_batch_task_creation.py
similarity index 100%
rename from openpype/modules/default_modules/ftrack/event_handlers_user/action_batch_task_creation.py
rename to openpype/modules/ftrack/event_handlers_user/action_batch_task_creation.py
diff --git a/openpype/modules/default_modules/ftrack/event_handlers_user/action_clean_hierarchical_attributes.py b/openpype/modules/ftrack/event_handlers_user/action_clean_hierarchical_attributes.py
similarity index 100%
rename from openpype/modules/default_modules/ftrack/event_handlers_user/action_clean_hierarchical_attributes.py
rename to openpype/modules/ftrack/event_handlers_user/action_clean_hierarchical_attributes.py
diff --git a/openpype/modules/default_modules/ftrack/event_handlers_user/action_client_review_sort.py b/openpype/modules/ftrack/event_handlers_user/action_client_review_sort.py
similarity index 100%
rename from openpype/modules/default_modules/ftrack/event_handlers_user/action_client_review_sort.py
rename to openpype/modules/ftrack/event_handlers_user/action_client_review_sort.py
diff --git a/openpype/modules/default_modules/ftrack/event_handlers_user/action_component_open.py b/openpype/modules/ftrack/event_handlers_user/action_component_open.py
similarity index 100%
rename from openpype/modules/default_modules/ftrack/event_handlers_user/action_component_open.py
rename to openpype/modules/ftrack/event_handlers_user/action_component_open.py
diff --git a/openpype/modules/default_modules/ftrack/event_handlers_user/action_create_cust_attrs.py b/openpype/modules/ftrack/event_handlers_user/action_create_cust_attrs.py
similarity index 97%
rename from openpype/modules/default_modules/ftrack/event_handlers_user/action_create_cust_attrs.py
rename to openpype/modules/ftrack/event_handlers_user/action_create_cust_attrs.py
index cb5b88ad50..88dc8213bd 100644
--- a/openpype/modules/default_modules/ftrack/event_handlers_user/action_create_cust_attrs.py
+++ b/openpype/modules/ftrack/event_handlers_user/action_create_cust_attrs.py
@@ -11,6 +11,7 @@ from openpype_modules.ftrack.lib import (
CUST_ATTR_TOOLS,
CUST_ATTR_APPLICATIONS,
CUST_ATTR_INTENT,
+ FPS_KEYS,
default_custom_attributes_definition,
app_definitions_from_app_manager,
@@ -519,20 +520,28 @@ class CustomAttributes(BaseAction):
self.show_message(event, msg)
def process_attribute(self, data):
- existing_attrs = self.session.query(
- "CustomAttributeConfiguration"
- ).all()
+ existing_attrs = self.session.query((
+ "select is_hierarchical, key, type, entity_type, object_type_id"
+ " from CustomAttributeConfiguration"
+ )).all()
matching = []
+ is_hierarchical = data.get("is_hierarchical", False)
for attr in existing_attrs:
if (
- attr["key"] != data["key"] or
- attr["type"]["name"] != data["type"]["name"]
+ is_hierarchical != attr["is_hierarchical"]
+ or attr["key"] != data["key"]
):
continue
- if data.get("is_hierarchical") is True:
- if attr["is_hierarchical"] is True:
- matching.append(attr)
+ if attr["type"]["name"] != data["type"]["name"]:
+ if data["key"] in FPS_KEYS and attr["type"]["name"] == "text":
+ self.log.info("Kept 'fps' as text custom attribute.")
+ return
+ continue
+
+ if is_hierarchical:
+ matching.append(attr)
+
elif "object_type_id" in data:
if (
attr["entity_type"] == data["entity_type"] and
diff --git a/openpype/modules/default_modules/ftrack/event_handlers_user/action_create_folders.py b/openpype/modules/ftrack/event_handlers_user/action_create_folders.py
similarity index 99%
rename from openpype/modules/default_modules/ftrack/event_handlers_user/action_create_folders.py
rename to openpype/modules/ftrack/event_handlers_user/action_create_folders.py
index 8bbef9ad73..d15a865124 100644
--- a/openpype/modules/default_modules/ftrack/event_handlers_user/action_create_folders.py
+++ b/openpype/modules/ftrack/event_handlers_user/action_create_folders.py
@@ -97,7 +97,6 @@ class CreateFolders(BaseAction):
all_entities = self.get_notask_children(entity)
anatomy = Anatomy(project_name)
- project_settings = get_project_settings(project_name)
work_keys = ["work", "folder"]
work_template = anatomy.templates
diff --git a/openpype/modules/default_modules/ftrack/event_handlers_user/action_create_project_structure.py b/openpype/modules/ftrack/event_handlers_user/action_create_project_structure.py
similarity index 100%
rename from openpype/modules/default_modules/ftrack/event_handlers_user/action_create_project_structure.py
rename to openpype/modules/ftrack/event_handlers_user/action_create_project_structure.py
diff --git a/openpype/modules/default_modules/ftrack/event_handlers_user/action_delete_asset.py b/openpype/modules/ftrack/event_handlers_user/action_delete_asset.py
similarity index 96%
rename from openpype/modules/default_modules/ftrack/event_handlers_user/action_delete_asset.py
rename to openpype/modules/ftrack/event_handlers_user/action_delete_asset.py
index 676dd80e93..94385a36c5 100644
--- a/openpype/modules/default_modules/ftrack/event_handlers_user/action_delete_asset.py
+++ b/openpype/modules/ftrack/event_handlers_user/action_delete_asset.py
@@ -3,8 +3,9 @@ import uuid
from datetime import datetime
from bson.objectid import ObjectId
-from openpype_modules.ftrack.lib import BaseAction, statics_icon
from avalon.api import AvalonMongoDB
+from openpype_modules.ftrack.lib import BaseAction, statics_icon
+from openpype_modules.ftrack.lib.avalon_sync import create_chunks
class DeleteAssetSubset(BaseAction):
@@ -554,8 +555,8 @@ class DeleteAssetSubset(BaseAction):
ftrack_proc_txt, ", ".join(ftrack_ids_to_delete)
))
- entities_by_link_len = (
- self._filter_entities_to_delete(ftrack_ids_to_delete, session)
+ entities_by_link_len = self._prepare_entities_before_delete(
+ ftrack_ids_to_delete, session
)
for link_len in sorted(entities_by_link_len.keys(), reverse=True):
for entity in entities_by_link_len[link_len]:
@@ -609,7 +610,7 @@ class DeleteAssetSubset(BaseAction):
return self.report_handle(report_messages, project_name, event)
- def _filter_entities_to_delete(self, ftrack_ids_to_delete, session):
+ def _prepare_entities_before_delete(self, ftrack_ids_to_delete, session):
"""Filter children entities to avoid CircularDependencyError."""
joined_ids_to_delete = ", ".join(
["\"{}\"".format(id) for id in ftrack_ids_to_delete]
@@ -638,6 +639,21 @@ class DeleteAssetSubset(BaseAction):
parent_ids_to_delete.append(entity["id"])
to_delete_entities.append(entity)
+ # Unset 'task_id' from AssetVersion entities
+ # - when task is deleted the asset version is not marked for deletion
+ task_ids = set(
+ entity["id"]
+ for entity in to_delete_entities
+ if entity.entity_type.lower() == "task"
+ )
+ for chunk in create_chunks(task_ids):
+ asset_versions = session.query((
+ "select id, task_id from AssetVersion where task_id in ({})"
+ ).format(self.join_query_keys(chunk))).all()
+ for asset_version in asset_versions:
+ asset_version["task_id"] = None
+ session.commit()
+
entities_by_link_len = collections.defaultdict(list)
for entity in to_delete_entities:
entities_by_link_len[len(entity["link"])].append(entity)
diff --git a/openpype/modules/default_modules/ftrack/event_handlers_user/action_delete_old_versions.py b/openpype/modules/ftrack/event_handlers_user/action_delete_old_versions.py
similarity index 100%
rename from openpype/modules/default_modules/ftrack/event_handlers_user/action_delete_old_versions.py
rename to openpype/modules/ftrack/event_handlers_user/action_delete_old_versions.py
diff --git a/openpype/modules/default_modules/ftrack/event_handlers_user/action_delivery.py b/openpype/modules/ftrack/event_handlers_user/action_delivery.py
similarity index 100%
rename from openpype/modules/default_modules/ftrack/event_handlers_user/action_delivery.py
rename to openpype/modules/ftrack/event_handlers_user/action_delivery.py
diff --git a/openpype/modules/default_modules/ftrack/event_handlers_user/action_djvview.py b/openpype/modules/ftrack/event_handlers_user/action_djvview.py
similarity index 100%
rename from openpype/modules/default_modules/ftrack/event_handlers_user/action_djvview.py
rename to openpype/modules/ftrack/event_handlers_user/action_djvview.py
diff --git a/openpype/modules/ftrack/event_handlers_user/action_job_killer.py b/openpype/modules/ftrack/event_handlers_user/action_job_killer.py
new file mode 100644
index 0000000000..f489c0c54c
--- /dev/null
+++ b/openpype/modules/ftrack/event_handlers_user/action_job_killer.py
@@ -0,0 +1,134 @@
+import json
+from openpype_modules.ftrack.lib import BaseAction, statics_icon
+
+
+class JobKiller(BaseAction):
+ """Kill jobs that are marked as running."""
+
+ identifier = "job.killer"
+ label = "OpenPype Admin"
+ variant = "- Job Killer"
+ description = "Killing selected running jobs"
+ icon = statics_icon("ftrack", "action_icons", "OpenPypeAdmin.svg")
+ settings_key = "job_killer"
+
+ def discover(self, session, entities, event):
+ """Check if action is available for user role."""
+ return self.valid_roles(session, entities, event)
+
+ def interface(self, session, entities, event):
+ if event["data"].get("values"):
+ return
+
+ title = "Select jobs to kill"
+
+ jobs = session.query(
+ "select id, user_id, status, created_at, data from Job"
+ " where status in (\"queued\", \"running\")"
+ ).all()
+ if not jobs:
+ return {
+ "success": True,
+ "message": "Didn't found any running jobs"
+ }
+
+ # Collect user ids from jobs
+ user_ids = set()
+ for job in jobs:
+ user_id = job["user_id"]
+ if user_id:
+ user_ids.add(user_id)
+
+ # Store usernames by their ids
+ usernames_by_id = {}
+ if user_ids:
+ users = session.query(
+ "select id, username from User where id in ({})".format(
+ self.join_query_keys(user_ids)
+ )
+ ).all()
+ for user in users:
+ usernames_by_id[user["id"]] = user["username"]
+
+ items = []
+ for job in jobs:
+ try:
+ data = json.loads(job["data"])
+ desctiption = data["description"]
+ except Exception:
+ desctiption = "*No description*"
+ user_id = job["user_id"]
+ username = usernames_by_id.get(user_id) or "Unknown user"
+ created = job["created_at"].strftime('%d.%m.%Y %H:%M:%S')
+ label = "{} - {} - {}".format(
+ username, desctiption, created
+ )
+ item_label = {
+ "type": "label",
+ "value": label
+ }
+ item = {
+ "name": job["id"],
+ "type": "boolean",
+ "value": False
+ }
+ if len(items) > 0:
+ items.append({"type": "label", "value": "---"})
+ items.append(item_label)
+ items.append(item)
+
+ return {
+ "items": items,
+ "title": title
+ }
+
+ def launch(self, session, entities, event):
+ if "values" not in event["data"]:
+ return
+
+ values = event["data"]["values"]
+ if len(values) < 1:
+ return {
+ "success": True,
+ "message": "No jobs to kill!"
+ }
+
+ job_ids = set()
+ for job_id, kill_job in values.items():
+ if kill_job:
+ job_ids.add(job_id)
+
+ jobs = session.query(
+ "select id, status from Job where id in ({})".format(
+ self.join_query_keys(job_ids)
+ )
+ ).all()
+
+ # Update all the queried jobs, setting the status to failed.
+ for job in jobs:
+ try:
+ origin_status = job["status"]
+ self.log.debug((
+ 'Changing Job ({}) status: {} -> failed'
+ ).format(job["id"], origin_status))
+
+ job["status"] = "failed"
+ session.commit()
+
+ except Exception:
+ session.rollback()
+ self.log.warning((
+ "Changing Job ({}) has failed"
+ ).format(job["id"]))
+
+ self.log.info("All selected jobs were killed Successfully!")
+ return {
+ "success": True,
+ "message": "All selected jobs were killed Successfully!"
+ }
+
+
+def register(session):
+ '''Register plugin. Called when used as an plugin.'''
+
+ JobKiller(session).register()
diff --git a/openpype/modules/default_modules/ftrack/event_handlers_user/action_multiple_notes.py b/openpype/modules/ftrack/event_handlers_user/action_multiple_notes.py
similarity index 100%
rename from openpype/modules/default_modules/ftrack/event_handlers_user/action_multiple_notes.py
rename to openpype/modules/ftrack/event_handlers_user/action_multiple_notes.py
diff --git a/openpype/modules/default_modules/ftrack/event_handlers_user/action_prepare_project.py b/openpype/modules/ftrack/event_handlers_user/action_prepare_project.py
similarity index 100%
rename from openpype/modules/default_modules/ftrack/event_handlers_user/action_prepare_project.py
rename to openpype/modules/ftrack/event_handlers_user/action_prepare_project.py
diff --git a/openpype/modules/default_modules/ftrack/event_handlers_user/action_rv.py b/openpype/modules/ftrack/event_handlers_user/action_rv.py
similarity index 98%
rename from openpype/modules/default_modules/ftrack/event_handlers_user/action_rv.py
rename to openpype/modules/ftrack/event_handlers_user/action_rv.py
index 71d790f7e7..bdb0eaf250 100644
--- a/openpype/modules/default_modules/ftrack/event_handlers_user/action_rv.py
+++ b/openpype/modules/ftrack/event_handlers_user/action_rv.py
@@ -3,9 +3,10 @@ import subprocess
import traceback
import json
-from openpype_modules.ftrack.lib import BaseAction, statics_icon
import ftrack_api
from avalon import io, api
+from openpype.pipeline import get_representation_path
+from openpype_modules.ftrack.lib import BaseAction, statics_icon
class RVAction(BaseAction):
@@ -307,7 +308,7 @@ class RVAction(BaseAction):
"name": "preview"
}
)
- paths.append(api.get_representation_path(representation))
+ paths.append(get_representation_path(representation))
return paths
diff --git a/openpype/modules/default_modules/ftrack/event_handlers_user/action_seed.py b/openpype/modules/ftrack/event_handlers_user/action_seed.py
similarity index 100%
rename from openpype/modules/default_modules/ftrack/event_handlers_user/action_seed.py
rename to openpype/modules/ftrack/event_handlers_user/action_seed.py
diff --git a/openpype/modules/default_modules/ftrack/event_handlers_user/action_store_thumbnails_to_avalon.py b/openpype/modules/ftrack/event_handlers_user/action_store_thumbnails_to_avalon.py
similarity index 100%
rename from openpype/modules/default_modules/ftrack/event_handlers_user/action_store_thumbnails_to_avalon.py
rename to openpype/modules/ftrack/event_handlers_user/action_store_thumbnails_to_avalon.py
diff --git a/openpype/modules/default_modules/ftrack/event_handlers_user/action_sync_to_avalon.py b/openpype/modules/ftrack/event_handlers_user/action_sync_to_avalon.py
similarity index 100%
rename from openpype/modules/default_modules/ftrack/event_handlers_user/action_sync_to_avalon.py
rename to openpype/modules/ftrack/event_handlers_user/action_sync_to_avalon.py
diff --git a/openpype/modules/default_modules/ftrack/event_handlers_user/action_test.py b/openpype/modules/ftrack/event_handlers_user/action_test.py
similarity index 100%
rename from openpype/modules/default_modules/ftrack/event_handlers_user/action_test.py
rename to openpype/modules/ftrack/event_handlers_user/action_test.py
diff --git a/openpype/modules/default_modules/ftrack/event_handlers_user/action_thumbnail_to_childern.py b/openpype/modules/ftrack/event_handlers_user/action_thumbnail_to_childern.py
similarity index 100%
rename from openpype/modules/default_modules/ftrack/event_handlers_user/action_thumbnail_to_childern.py
rename to openpype/modules/ftrack/event_handlers_user/action_thumbnail_to_childern.py
diff --git a/openpype/modules/default_modules/ftrack/event_handlers_user/action_thumbnail_to_parent.py b/openpype/modules/ftrack/event_handlers_user/action_thumbnail_to_parent.py
similarity index 100%
rename from openpype/modules/default_modules/ftrack/event_handlers_user/action_thumbnail_to_parent.py
rename to openpype/modules/ftrack/event_handlers_user/action_thumbnail_to_parent.py
diff --git a/openpype/modules/default_modules/ftrack/event_handlers_user/action_where_run_ask.py b/openpype/modules/ftrack/event_handlers_user/action_where_run_ask.py
similarity index 100%
rename from openpype/modules/default_modules/ftrack/event_handlers_user/action_where_run_ask.py
rename to openpype/modules/ftrack/event_handlers_user/action_where_run_ask.py
diff --git a/openpype/modules/default_modules/ftrack/ftrack_module.py b/openpype/modules/ftrack/ftrack_module.py
similarity index 100%
rename from openpype/modules/default_modules/ftrack/ftrack_module.py
rename to openpype/modules/ftrack/ftrack_module.py
diff --git a/openpype/modules/default_modules/ftrack/ftrack_server/__init__.py b/openpype/modules/ftrack/ftrack_server/__init__.py
similarity index 100%
rename from openpype/modules/default_modules/ftrack/ftrack_server/__init__.py
rename to openpype/modules/ftrack/ftrack_server/__init__.py
diff --git a/openpype/modules/default_modules/ftrack/ftrack_server/event_server_cli.py b/openpype/modules/ftrack/ftrack_server/event_server_cli.py
similarity index 100%
rename from openpype/modules/default_modules/ftrack/ftrack_server/event_server_cli.py
rename to openpype/modules/ftrack/ftrack_server/event_server_cli.py
diff --git a/openpype/modules/default_modules/ftrack/ftrack_server/ftrack_server.py b/openpype/modules/ftrack/ftrack_server/ftrack_server.py
similarity index 100%
rename from openpype/modules/default_modules/ftrack/ftrack_server/ftrack_server.py
rename to openpype/modules/ftrack/ftrack_server/ftrack_server.py
diff --git a/openpype/modules/default_modules/ftrack/ftrack_server/lib.py b/openpype/modules/ftrack/ftrack_server/lib.py
similarity index 100%
rename from openpype/modules/default_modules/ftrack/ftrack_server/lib.py
rename to openpype/modules/ftrack/ftrack_server/lib.py
diff --git a/openpype/modules/default_modules/ftrack/ftrack_server/socket_thread.py b/openpype/modules/ftrack/ftrack_server/socket_thread.py
similarity index 100%
rename from openpype/modules/default_modules/ftrack/ftrack_server/socket_thread.py
rename to openpype/modules/ftrack/ftrack_server/socket_thread.py
diff --git a/openpype/modules/default_modules/ftrack/launch_hooks/post_ftrack_changes.py b/openpype/modules/ftrack/launch_hooks/post_ftrack_changes.py
similarity index 100%
rename from openpype/modules/default_modules/ftrack/launch_hooks/post_ftrack_changes.py
rename to openpype/modules/ftrack/launch_hooks/post_ftrack_changes.py
diff --git a/openpype/modules/default_modules/ftrack/launch_hooks/pre_python2_vendor.py b/openpype/modules/ftrack/launch_hooks/pre_python2_vendor.py
similarity index 100%
rename from openpype/modules/default_modules/ftrack/launch_hooks/pre_python2_vendor.py
rename to openpype/modules/ftrack/launch_hooks/pre_python2_vendor.py
diff --git a/openpype/modules/default_modules/ftrack/lib/__init__.py b/openpype/modules/ftrack/lib/__init__.py
similarity index 93%
rename from openpype/modules/default_modules/ftrack/lib/__init__.py
rename to openpype/modules/ftrack/lib/__init__.py
index 80b4db9dd6..7fc2bc99eb 100644
--- a/openpype/modules/default_modules/ftrack/lib/__init__.py
+++ b/openpype/modules/ftrack/lib/__init__.py
@@ -4,7 +4,8 @@ from .constants import (
CUST_ATTR_GROUP,
CUST_ATTR_TOOLS,
CUST_ATTR_APPLICATIONS,
- CUST_ATTR_INTENT
+ CUST_ATTR_INTENT,
+ FPS_KEYS
)
from .settings import (
get_ftrack_event_mongo_info
@@ -30,6 +31,8 @@ __all__ = (
"CUST_ATTR_GROUP",
"CUST_ATTR_TOOLS",
"CUST_ATTR_APPLICATIONS",
+ "CUST_ATTR_INTENT",
+ "FPS_KEYS",
"get_ftrack_event_mongo_info",
diff --git a/openpype/modules/default_modules/ftrack/lib/avalon_sync.py b/openpype/modules/ftrack/lib/avalon_sync.py
similarity index 95%
rename from openpype/modules/default_modules/ftrack/lib/avalon_sync.py
rename to openpype/modules/ftrack/lib/avalon_sync.py
index 06e8784287..5301ec568e 100644
--- a/openpype/modules/default_modules/ftrack/lib/avalon_sync.py
+++ b/openpype/modules/ftrack/lib/avalon_sync.py
@@ -2,6 +2,9 @@ import re
import json
import collections
import copy
+import numbers
+
+import six
from avalon.api import AvalonMongoDB
@@ -14,7 +17,7 @@ from openpype.api import (
)
from openpype.lib import ApplicationManager
-from .constants import CUST_ATTR_ID_KEY
+from .constants import CUST_ATTR_ID_KEY, FPS_KEYS
from .custom_attributes import get_openpype_attr, query_custom_attributes
from bson.objectid import ObjectId
@@ -33,6 +36,130 @@ CURRENT_DOC_SCHEMAS = {
}
+class InvalidFpsValue(Exception):
+ pass
+
+
+def is_string_number(value):
+ """Can string value be converted to number (float)."""
+ if not isinstance(value, six.string_types):
+ raise TypeError("Expected {} got {}".format(
+ ", ".join(str(t) for t in six.string_types), str(type(value))
+ ))
+ if value == ".":
+ return False
+
+ if value.startswith("."):
+ value = "0" + value
+ elif value.endswith("."):
+ value = value + "0"
+
+ if re.match(r"^\d+(\.\d+)?$", value) is None:
+ return False
+ return True
+
+
+def convert_to_fps(source_value):
+ """Convert value into fps value.
+
+ Non string values are kept untouched. String is tried to convert.
+ Valid values:
+ "1000"
+ "1000.05"
+ "1000,05"
+ ",05"
+ ".05"
+ "1000,"
+ "1000."
+ "1000/1000"
+ "1000.05/1000"
+ "1000/1000.05"
+ "1000.05/1000.05"
+ "1000,05/1000"
+ "1000/1000,05"
+ "1000,05/1000,05"
+
+ Invalid values:
+ "/"
+ "/1000"
+ "1000/"
+ ","
+ "."
+ ...any other string
+
+ Returns:
+ float: Converted value.
+
+ Raises:
+ InvalidFpsValue: When value can't be converted to float.
+ """
+ if not isinstance(source_value, six.string_types):
+ if isinstance(source_value, numbers.Number):
+ return float(source_value)
+ return source_value
+
+ value = source_value.strip().replace(",", ".")
+ if not value:
+ raise InvalidFpsValue("Got empty value")
+
+ subs = value.split("/")
+ if len(subs) == 1:
+ str_value = subs[0]
+ if not is_string_number(str_value):
+ raise InvalidFpsValue(
+ "Value \"{}\" can't be converted to number.".format(value)
+ )
+ return float(str_value)
+
+ elif len(subs) == 2:
+ divident, divisor = subs
+ if not divident or not is_string_number(divident):
+ raise InvalidFpsValue(
+ "Divident value \"{}\" can't be converted to number".format(
+ divident
+ )
+ )
+
+ if not divisor or not is_string_number(divisor):
+ raise InvalidFpsValue(
+ "Divisor value \"{}\" can't be converted to number".format(
+ divident
+ )
+ )
+ divisor_float = float(divisor)
+ if divisor_float == 0.0:
+ raise InvalidFpsValue("Can't divide by zero")
+ return float(divident) / divisor_float
+
+ raise InvalidFpsValue(
+ "Value can't be converted to number \"{}\"".format(source_value)
+ )
+
+
+def create_chunks(iterable, chunk_size=None):
+ """Separate iterable into multiple chunks by size.
+
+ Args:
+ iterable(list|tuple|set): Object that will be separated into chunks.
+ chunk_size(int): Size of one chunk. Default value is 200.
+
+ Returns:
+ list: Chunked items.
+ """
+ chunks = []
+ if not iterable:
+ return chunks
+
+ tupled_iterable = tuple(iterable)
+ iterable_size = len(tupled_iterable)
+ if chunk_size is None:
+ chunk_size = 200
+
+ for idx in range(0, iterable_size, chunk_size):
+ chunks.append(tupled_iterable[idx:idx + chunk_size])
+ return chunks
+
+
def check_regex(name, entity_type, in_schema=None, schema_patterns=None):
schema_name = "asset-3.0"
if in_schema:
@@ -956,6 +1083,7 @@ class SyncEntitiesFactory:
sync_ids
)
+ invalid_fps_items = []
for item in items:
entity_id = item["entity_id"]
attr_id = item["configuration_id"]
@@ -968,8 +1096,24 @@ class SyncEntitiesFactory:
value = item["value"]
if convert_type:
value = convert_type(value)
+
+ if key in FPS_KEYS:
+ try:
+ value = convert_to_fps(value)
+ except InvalidFpsValue:
+ invalid_fps_items.append((entity_id, value))
self.entities_dict[entity_id][store_key][key] = value
+ if invalid_fps_items:
+ fps_msg = (
+ "These entities have invalid fps value in custom attributes"
+ )
+ items = []
+ for entity_id, value in invalid_fps_items:
+ ent_path = self.get_ent_path(entity_id)
+ items.append("{} - \"{}\"".format(ent_path, value))
+ self.report_items["error"][fps_msg] = items
+
# process hierarchical attributes
self.set_hierarchical_attribute(
hier_attrs, sync_ids, cust_attr_type_name_by_id
@@ -1002,8 +1146,15 @@ class SyncEntitiesFactory:
if key.startswith("avalon_"):
store_key = "avalon_attrs"
+ default_value = attr["default"]
+ if key in FPS_KEYS:
+ try:
+ default_value = convert_to_fps(default_value)
+ except InvalidFpsValue:
+ pass
+
self.entities_dict[self.ft_project_id][store_key][key] = (
- attr["default"]
+ default_value
)
# Add attribute ids to entities dictionary
@@ -1045,6 +1196,7 @@ class SyncEntitiesFactory:
True
)
+ invalid_fps_items = []
avalon_hier = []
for item in items:
value = item["value"]
@@ -1064,6 +1216,13 @@ class SyncEntitiesFactory:
entity_id = item["entity_id"]
key = attribute_key_by_id[attr_id]
+ if key in FPS_KEYS:
+ try:
+ value = convert_to_fps(value)
+ except InvalidFpsValue:
+ invalid_fps_items.append((entity_id, value))
+ continue
+
if key.startswith("avalon_"):
store_key = "avalon_attrs"
avalon_hier.append(key)
@@ -1071,6 +1230,16 @@ class SyncEntitiesFactory:
store_key = "hier_attrs"
self.entities_dict[entity_id][store_key][key] = value
+ if invalid_fps_items:
+ fps_msg = (
+ "These entities have invalid fps value in custom attributes"
+ )
+ items = []
+ for entity_id, value in invalid_fps_items:
+ ent_path = self.get_ent_path(entity_id)
+ items.append("{} - \"{}\"".format(ent_path, value))
+ self.report_items["error"][fps_msg] = items
+
# Get dictionary with not None hierarchical values to pull to childs
top_id = self.ft_project_id
project_values = {}
@@ -1147,10 +1316,8 @@ class SyncEntitiesFactory:
ids_len = len(tupled_ids)
chunk_size = int(5000 / ids_len)
all_links = []
- for idx in range(0, ids_len, chunk_size):
- entity_ids_joined = join_query_keys(
- tupled_ids[idx:idx + chunk_size]
- )
+ for chunk in create_chunks(ftrack_ids, chunk_size):
+ entity_ids_joined = join_query_keys(chunk)
all_links.extend(self.session.query((
"select from_id, to_id from"
diff --git a/openpype/modules/default_modules/ftrack/lib/constants.py b/openpype/modules/ftrack/lib/constants.py
similarity index 85%
rename from openpype/modules/default_modules/ftrack/lib/constants.py
rename to openpype/modules/ftrack/lib/constants.py
index e6e2013d2b..636dcfbc3d 100644
--- a/openpype/modules/default_modules/ftrack/lib/constants.py
+++ b/openpype/modules/ftrack/lib/constants.py
@@ -12,3 +12,9 @@ CUST_ATTR_APPLICATIONS = "applications"
CUST_ATTR_TOOLS = "tools_env"
# Intent custom attribute name
CUST_ATTR_INTENT = "intent"
+
+FPS_KEYS = {
+ "fps",
+ # For development purposes
+ "fps_string"
+}
diff --git a/openpype/modules/default_modules/ftrack/lib/credentials.py b/openpype/modules/ftrack/lib/credentials.py
similarity index 100%
rename from openpype/modules/default_modules/ftrack/lib/credentials.py
rename to openpype/modules/ftrack/lib/credentials.py
diff --git a/openpype/modules/default_modules/ftrack/lib/custom_attributes.json b/openpype/modules/ftrack/lib/custom_attributes.json
similarity index 100%
rename from openpype/modules/default_modules/ftrack/lib/custom_attributes.json
rename to openpype/modules/ftrack/lib/custom_attributes.json
diff --git a/openpype/modules/default_modules/ftrack/lib/custom_attributes.py b/openpype/modules/ftrack/lib/custom_attributes.py
similarity index 100%
rename from openpype/modules/default_modules/ftrack/lib/custom_attributes.py
rename to openpype/modules/ftrack/lib/custom_attributes.py
diff --git a/openpype/modules/default_modules/ftrack/lib/ftrack_action_handler.py b/openpype/modules/ftrack/lib/ftrack_action_handler.py
similarity index 100%
rename from openpype/modules/default_modules/ftrack/lib/ftrack_action_handler.py
rename to openpype/modules/ftrack/lib/ftrack_action_handler.py
diff --git a/openpype/modules/default_modules/ftrack/lib/ftrack_base_handler.py b/openpype/modules/ftrack/lib/ftrack_base_handler.py
similarity index 100%
rename from openpype/modules/default_modules/ftrack/lib/ftrack_base_handler.py
rename to openpype/modules/ftrack/lib/ftrack_base_handler.py
diff --git a/openpype/modules/default_modules/ftrack/lib/ftrack_event_handler.py b/openpype/modules/ftrack/lib/ftrack_event_handler.py
similarity index 100%
rename from openpype/modules/default_modules/ftrack/lib/ftrack_event_handler.py
rename to openpype/modules/ftrack/lib/ftrack_event_handler.py
diff --git a/openpype/modules/default_modules/ftrack/lib/settings.py b/openpype/modules/ftrack/lib/settings.py
similarity index 100%
rename from openpype/modules/default_modules/ftrack/lib/settings.py
rename to openpype/modules/ftrack/lib/settings.py
diff --git a/openpype/modules/default_modules/ftrack/plugins/_unused_publish/integrate_ftrack_comments.py b/openpype/modules/ftrack/plugins/_unused_publish/integrate_ftrack_comments.py
similarity index 100%
rename from openpype/modules/default_modules/ftrack/plugins/_unused_publish/integrate_ftrack_comments.py
rename to openpype/modules/ftrack/plugins/_unused_publish/integrate_ftrack_comments.py
diff --git a/openpype/modules/default_modules/ftrack/plugins/publish/collect_ftrack_api.py b/openpype/modules/ftrack/plugins/publish/collect_ftrack_api.py
similarity index 84%
rename from openpype/modules/default_modules/ftrack/plugins/publish/collect_ftrack_api.py
rename to openpype/modules/ftrack/plugins/publish/collect_ftrack_api.py
index a348617cfc..07af217fb6 100644
--- a/openpype/modules/default_modules/ftrack/plugins/publish/collect_ftrack_api.py
+++ b/openpype/modules/ftrack/plugins/publish/collect_ftrack_api.py
@@ -1,4 +1,3 @@
-import os
import logging
import pyblish.api
import avalon.api
@@ -43,37 +42,48 @@ class CollectFtrackApi(pyblish.api.ContextPlugin):
).format(project_name))
project_entity = project_entities[0]
+
self.log.debug("Project found: {0}".format(project_entity))
- # Find asset entity
- entity_query = (
- 'TypedContext where project_id is "{0}"'
- ' and name is "{1}"'
- ).format(project_entity["id"], asset_name)
- self.log.debug("Asset entity query: < {0} >".format(entity_query))
- asset_entities = []
- for entity in session.query(entity_query).all():
- # Skip tasks
- if entity.entity_type.lower() != "task":
- asset_entities.append(entity)
+ asset_entity = None
+ if asset_name:
+ # Find asset entity
+ entity_query = (
+ 'TypedContext where project_id is "{0}"'
+ ' and name is "{1}"'
+ ).format(project_entity["id"], asset_name)
+ self.log.debug("Asset entity query: < {0} >".format(entity_query))
+ asset_entities = []
+ for entity in session.query(entity_query).all():
+ # Skip tasks
+ if entity.entity_type.lower() != "task":
+ asset_entities.append(entity)
- if len(asset_entities) == 0:
- raise AssertionError((
- "Entity with name \"{0}\" not found"
- " in Ftrack project \"{1}\"."
- ).format(asset_name, project_name))
+ if len(asset_entities) == 0:
+ raise AssertionError((
+ "Entity with name \"{0}\" not found"
+ " in Ftrack project \"{1}\"."
+ ).format(asset_name, project_name))
- elif len(asset_entities) > 1:
- raise AssertionError((
- "Found more than one entity with name \"{0}\""
- " in Ftrack project \"{1}\"."
- ).format(asset_name, project_name))
+ elif len(asset_entities) > 1:
+ raise AssertionError((
+ "Found more than one entity with name \"{0}\""
+ " in Ftrack project \"{1}\"."
+ ).format(asset_name, project_name))
+
+ asset_entity = asset_entities[0]
- asset_entity = asset_entities[0]
self.log.debug("Asset found: {0}".format(asset_entity))
+ task_entity = None
# Find task entity if task is set
- if task_name:
+ if not asset_entity:
+ self.log.warning(
+ "Asset entity is not set. Skipping query of task entity."
+ )
+ elif not task_name:
+ self.log.warning("Task name is not set.")
+ else:
task_query = (
'Task where name is "{0}" and parent_id is "{1}"'
).format(task_name, asset_entity["id"])
@@ -88,10 +98,6 @@ class CollectFtrackApi(pyblish.api.ContextPlugin):
else:
self.log.debug("Task entity found: {0}".format(task_entity))
- else:
- task_entity = None
- self.log.warning("Task name is not set.")
-
context.data["ftrackSession"] = session
context.data["ftrackPythonModule"] = ftrack_api
context.data["ftrackProject"] = project_entity
diff --git a/openpype/modules/default_modules/ftrack/plugins/publish/collect_ftrack_family.py b/openpype/modules/ftrack/plugins/publish/collect_ftrack_family.py
similarity index 100%
rename from openpype/modules/default_modules/ftrack/plugins/publish/collect_ftrack_family.py
rename to openpype/modules/ftrack/plugins/publish/collect_ftrack_family.py
diff --git a/openpype/modules/default_modules/ftrack/plugins/publish/collect_local_ftrack_creds.py b/openpype/modules/ftrack/plugins/publish/collect_local_ftrack_creds.py
similarity index 100%
rename from openpype/modules/default_modules/ftrack/plugins/publish/collect_local_ftrack_creds.py
rename to openpype/modules/ftrack/plugins/publish/collect_local_ftrack_creds.py
diff --git a/openpype/modules/default_modules/ftrack/plugins/publish/collect_username.py b/openpype/modules/ftrack/plugins/publish/collect_username.py
similarity index 80%
rename from openpype/modules/default_modules/ftrack/plugins/publish/collect_username.py
rename to openpype/modules/ftrack/plugins/publish/collect_username.py
index 303490189b..a9b746ea51 100644
--- a/openpype/modules/default_modules/ftrack/plugins/publish/collect_username.py
+++ b/openpype/modules/ftrack/plugins/publish/collect_username.py
@@ -23,8 +23,11 @@ class CollectUsername(pyblish.api.ContextPlugin):
Expects "pype.club" user created on Ftrack and FTRACK_BOT_API_KEY env
var set up.
+ Resets `context.data["user"] to correctly populate `version.author` and
+ `representation.context.username`
+
"""
- order = pyblish.api.CollectorOrder - 0.488
+ order = pyblish.api.CollectorOrder + 0.0015
label = "Collect ftrack username"
hosts = ["webpublisher", "photoshop"]
targets = ["remotepublish", "filespublish", "tvpaint_worker"]
@@ -33,7 +36,6 @@ class CollectUsername(pyblish.api.ContextPlugin):
def process(self, context):
self.log.info("CollectUsername")
-
os.environ["FTRACK_API_USER"] = os.environ["FTRACK_BOT_API_USER"]
os.environ["FTRACK_API_KEY"] = os.environ["FTRACK_BOT_API_KEY"]
@@ -57,7 +59,13 @@ class CollectUsername(pyblish.api.ContextPlugin):
if not user:
raise ValueError(
"Couldn't find user with {} email".format(user_email))
-
- username = user[0].get("username")
+ user = user[0]
+ username = user.get("username")
self.log.debug("Resolved ftrack username:: {}".format(username))
os.environ["FTRACK_API_USER"] = username
+
+ burnin_name = username
+ if '@' in burnin_name:
+ burnin_name = burnin_name[:burnin_name.index('@')]
+ os.environ["WEBPUBLISH_OPENPYPE_USERNAME"] = burnin_name
+ context.data["user"] = burnin_name
diff --git a/openpype/modules/default_modules/ftrack/plugins/publish/integrate_ftrack_api.py b/openpype/modules/ftrack/plugins/publish/integrate_ftrack_api.py
similarity index 100%
rename from openpype/modules/default_modules/ftrack/plugins/publish/integrate_ftrack_api.py
rename to openpype/modules/ftrack/plugins/publish/integrate_ftrack_api.py
diff --git a/openpype/modules/default_modules/ftrack/plugins/publish/integrate_ftrack_component_overwrite.py b/openpype/modules/ftrack/plugins/publish/integrate_ftrack_component_overwrite.py
similarity index 100%
rename from openpype/modules/default_modules/ftrack/plugins/publish/integrate_ftrack_component_overwrite.py
rename to openpype/modules/ftrack/plugins/publish/integrate_ftrack_component_overwrite.py
diff --git a/openpype/modules/default_modules/ftrack/plugins/publish/integrate_ftrack_instances.py b/openpype/modules/ftrack/plugins/publish/integrate_ftrack_instances.py
similarity index 100%
rename from openpype/modules/default_modules/ftrack/plugins/publish/integrate_ftrack_instances.py
rename to openpype/modules/ftrack/plugins/publish/integrate_ftrack_instances.py
diff --git a/openpype/modules/default_modules/ftrack/plugins/publish/integrate_ftrack_note.py b/openpype/modules/ftrack/plugins/publish/integrate_ftrack_note.py
similarity index 100%
rename from openpype/modules/default_modules/ftrack/plugins/publish/integrate_ftrack_note.py
rename to openpype/modules/ftrack/plugins/publish/integrate_ftrack_note.py
diff --git a/openpype/modules/default_modules/ftrack/plugins/publish/integrate_hierarchy_ftrack.py b/openpype/modules/ftrack/plugins/publish/integrate_hierarchy_ftrack.py
similarity index 100%
rename from openpype/modules/default_modules/ftrack/plugins/publish/integrate_hierarchy_ftrack.py
rename to openpype/modules/ftrack/plugins/publish/integrate_hierarchy_ftrack.py
diff --git a/openpype/modules/default_modules/ftrack/plugins/publish/validate_custom_ftrack_attributes.py b/openpype/modules/ftrack/plugins/publish/validate_custom_ftrack_attributes.py
similarity index 100%
rename from openpype/modules/default_modules/ftrack/plugins/publish/validate_custom_ftrack_attributes.py
rename to openpype/modules/ftrack/plugins/publish/validate_custom_ftrack_attributes.py
diff --git a/openpype/modules/ftrack/python2_vendor/arrow/.gitignore b/openpype/modules/ftrack/python2_vendor/arrow/.gitignore
new file mode 100644
index 0000000000..0448d0cf0c
--- /dev/null
+++ b/openpype/modules/ftrack/python2_vendor/arrow/.gitignore
@@ -0,0 +1,211 @@
+README.rst.new
+
+# Small entry point file for debugging tasks
+test.py
+
+# Byte-compiled / optimized / DLL files
+__pycache__/
+*.py[cod]
+*$py.class
+
+# C extensions
+*.so
+
+# Distribution / packaging
+.Python
+build/
+develop-eggs/
+dist/
+downloads/
+eggs/
+.eggs/
+lib/
+lib64/
+parts/
+sdist/
+var/
+wheels/
+pip-wheel-metadata/
+share/python-wheels/
+*.egg-info/
+.installed.cfg
+*.egg
+
+# PyInstaller
+# Usually these files are written by a python script from a template
+# before PyInstaller builds the exe, so as to inject date/other infos into it.
+*.manifest
+*.spec
+
+# Installer logs
+pip-log.txt
+pip-delete-this-directory.txt
+
+# Unit test / coverage reports
+htmlcov/
+.tox/
+.nox/
+.coverage
+.coverage.*
+.cache
+nosetests.xml
+coverage.xml
+*.cover
+.hypothesis/
+.pytest_cache/
+
+# Translations
+*.mo
+*.pot
+
+# Django stuff:
+*.log
+local_settings.py
+db.sqlite3
+db.sqlite3-journal
+
+# Flask stuff:
+instance/
+.webassets-cache
+
+# Scrapy stuff:
+.scrapy
+
+# Sphinx documentation
+docs/_build/
+
+# PyBuilder
+target/
+
+# Jupyter Notebook
+.ipynb_checkpoints
+
+# IPython
+profile_default/
+ipython_config.py
+
+# pyenv
+.python-version
+
+# celery beat schedule file
+celerybeat-schedule
+
+# SageMath parsed files
+*.sage.py
+
+# Environments
+.env
+.venv
+env/
+venv/
+ENV/
+local/
+env.bak/
+venv.bak/
+
+# Spyder project settings
+.spyderproject
+.spyproject
+
+# Rope project settings
+.ropeproject
+
+# mkdocs documentation
+/site
+
+# mypy
+.mypy_cache/
+.dmypy.json
+dmypy.json
+
+# Pyre type checker
+.pyre/
+
+# Swap
+[._]*.s[a-v][a-z]
+[._]*.sw[a-p]
+[._]s[a-rt-v][a-z]
+[._]ss[a-gi-z]
+[._]sw[a-p]
+
+# Session
+Session.vim
+Sessionx.vim
+
+# Temporary
+.netrwhist
+*~
+# Auto-generated tag files
+tags
+# Persistent undo
+[._]*.un~
+
+.idea/
+.vscode/
+
+# General
+.DS_Store
+.AppleDouble
+.LSOverride
+
+# Icon must end with two \r
+Icon
+
+
+# Thumbnails
+._*
+
+# Files that might appear in the root of a volume
+.DocumentRevisions-V100
+.fseventsd
+.Spotlight-V100
+.TemporaryItems
+.Trashes
+.VolumeIcon.icns
+.com.apple.timemachine.donotpresent
+
+# Directories potentially created on remote AFP share
+.AppleDB
+.AppleDesktop
+Network Trash Folder
+Temporary Items
+.apdisk
+
+*~
+
+# temporary files which can be created if a process still has a handle open of a deleted file
+.fuse_hidden*
+
+# KDE directory preferences
+.directory
+
+# Linux trash folder which might appear on any partition or disk
+.Trash-*
+
+# .nfs files are created when an open file is removed but is still being accessed
+.nfs*
+
+# Windows thumbnail cache files
+Thumbs.db
+Thumbs.db:encryptable
+ehthumbs.db
+ehthumbs_vista.db
+
+# Dump file
+*.stackdump
+
+# Folder config file
+[Dd]esktop.ini
+
+# Recycle Bin used on file shares
+$RECYCLE.BIN/
+
+# Windows Installer files
+*.cab
+*.msi
+*.msix
+*.msm
+*.msp
+
+# Windows shortcuts
+*.lnk
diff --git a/openpype/modules/ftrack/python2_vendor/arrow/.pre-commit-config.yaml b/openpype/modules/ftrack/python2_vendor/arrow/.pre-commit-config.yaml
new file mode 100644
index 0000000000..1f5128595b
--- /dev/null
+++ b/openpype/modules/ftrack/python2_vendor/arrow/.pre-commit-config.yaml
@@ -0,0 +1,41 @@
+default_language_version:
+ python: python3
+repos:
+ - repo: https://github.com/pre-commit/pre-commit-hooks
+ rev: v3.2.0
+ hooks:
+ - id: trailing-whitespace
+ - id: end-of-file-fixer
+ - id: fix-encoding-pragma
+ exclude: ^arrow/_version.py
+ - id: requirements-txt-fixer
+ - id: check-ast
+ - id: check-yaml
+ - id: check-case-conflict
+ - id: check-docstring-first
+ - id: check-merge-conflict
+ - id: debug-statements
+ - repo: https://github.com/timothycrosley/isort
+ rev: 5.4.2
+ hooks:
+ - id: isort
+ - repo: https://github.com/asottile/pyupgrade
+ rev: v2.7.2
+ hooks:
+ - id: pyupgrade
+ - repo: https://github.com/pre-commit/pygrep-hooks
+ rev: v1.6.0
+ hooks:
+ - id: python-no-eval
+ - id: python-check-blanket-noqa
+ - id: rst-backticks
+ - repo: https://github.com/psf/black
+ rev: 20.8b1
+ hooks:
+ - id: black
+ args: [--safe, --quiet]
+ - repo: https://gitlab.com/pycqa/flake8
+ rev: 3.8.3
+ hooks:
+ - id: flake8
+ additional_dependencies: [flake8-bugbear]
diff --git a/openpype/modules/ftrack/python2_vendor/arrow/CHANGELOG.rst b/openpype/modules/ftrack/python2_vendor/arrow/CHANGELOG.rst
new file mode 100644
index 0000000000..0b55a4522c
--- /dev/null
+++ b/openpype/modules/ftrack/python2_vendor/arrow/CHANGELOG.rst
@@ -0,0 +1,598 @@
+Changelog
+=========
+
+0.17.0 (2020-10-2)
+-------------------
+
+- [WARN] Arrow will **drop support** for Python 2.7 and 3.5 in the upcoming 1.0.0 release. This is the last major release to support Python 2.7 and Python 3.5.
+- [NEW] Arrow now properly handles imaginary datetimes during DST shifts. For example:
+
+..code-block:: python
+ >>> just_before = arrow.get(2013, 3, 31, 1, 55, tzinfo="Europe/Paris")
+ >>> just_before.shift(minutes=+10)
+
+
+..code-block:: python
+ >>> before = arrow.get("2018-03-10 23:00:00", "YYYY-MM-DD HH:mm:ss", tzinfo="US/Pacific")
+ >>> after = arrow.get("2018-03-11 04:00:00", "YYYY-MM-DD HH:mm:ss", tzinfo="US/Pacific")
+ >>> result=[(t, t.to("utc")) for t in arrow.Arrow.range("hour", before, after)]
+ >>> for r in result:
+ ... print(r)
+ ...
+ (, )
+ (, )
+ (, )
+ (, )
+ (, )
+
+- [NEW] Added ``humanize`` week granularity translation for Tagalog.
+- [CHANGE] Calls to the ``timestamp`` property now emit a ``DeprecationWarning``. In a future release, ``timestamp`` will be changed to a method to align with Python's datetime module. If you would like to continue using the property, please change your code to use the ``int_timestamp`` or ``float_timestamp`` properties instead.
+- [CHANGE] Expanded and improved Catalan locale.
+- [FIX] Fixed a bug that caused ``Arrow.range()`` to incorrectly cut off ranges in certain scenarios when using month, quarter, or year endings.
+- [FIX] Fixed a bug that caused day of week token parsing to be case sensitive.
+- [INTERNAL] A number of functions were reordered in arrow.py for better organization and grouping of related methods. This change will have no impact on usage.
+- [INTERNAL] A minimum tox version is now enforced for compatibility reasons. Contributors must use tox >3.18.0 going forward.
+
+0.16.0 (2020-08-23)
+-------------------
+
+- [WARN] Arrow will **drop support** for Python 2.7 and 3.5 in the upcoming 1.0.0 release. The 0.16.x and 0.17.x releases are the last to support Python 2.7 and 3.5.
+- [NEW] Implemented `PEP 495 `_ to handle ambiguous datetimes. This is achieved by the addition of the ``fold`` attribute for Arrow objects. For example:
+
+.. code-block:: python
+
+ >>> before = Arrow(2017, 10, 29, 2, 0, tzinfo='Europe/Stockholm')
+
+ >>> before.fold
+ 0
+ >>> before.ambiguous
+ True
+ >>> after = Arrow(2017, 10, 29, 2, 0, tzinfo='Europe/Stockholm', fold=1)
+
+ >>> after = before.replace(fold=1)
+
+
+- [NEW] Added ``normalize_whitespace`` flag to ``arrow.get``. This is useful for parsing log files and/or any files that may contain inconsistent spacing. For example:
+
+.. code-block:: python
+
+ >>> arrow.get("Jun 1 2005 1:33PM", "MMM D YYYY H:mmA", normalize_whitespace=True)
+
+ >>> arrow.get("2013-036 \t 04:05:06Z", normalize_whitespace=True)
+
+
+0.15.8 (2020-07-23)
+-------------------
+
+- [WARN] Arrow will **drop support** for Python 2.7 and 3.5 in the upcoming 1.0.0 release. The 0.15.x, 0.16.x, and 0.17.x releases are the last to support Python 2.7 and 3.5.
+- [NEW] Added ``humanize`` week granularity translation for Czech.
+- [FIX] ``arrow.get`` will now pick sane defaults when weekdays are passed with particular token combinations, see `#446 `_.
+- [INTERNAL] Moved arrow to an organization. The repo can now be found `here `_.
+- [INTERNAL] Started issuing deprecation warnings for Python 2.7 and 3.5.
+- [INTERNAL] Added Python 3.9 to CI pipeline.
+
+0.15.7 (2020-06-19)
+-------------------
+
+- [NEW] Added a number of built-in format strings. See the `docs `_ for a complete list of supported formats. For example:
+
+.. code-block:: python
+
+ >>> arw = arrow.utcnow()
+ >>> arw.format(arrow.FORMAT_COOKIE)
+ 'Wednesday, 27-May-2020 10:30:35 UTC'
+
+- [NEW] Arrow is now fully compatible with Python 3.9 and PyPy3.
+- [NEW] Added Makefile, tox.ini, and requirements.txt files to the distribution bundle.
+- [NEW] Added French Canadian and Swahili locales.
+- [NEW] Added ``humanize`` week granularity translation for Hebrew, Greek, Macedonian, Swedish, Slovak.
+- [FIX] ms and μs timestamps are now normalized in ``arrow.get()``, ``arrow.fromtimestamp()``, and ``arrow.utcfromtimestamp()``. For example:
+
+.. code-block:: python
+
+ >>> ts = 1591161115194556
+ >>> arw = arrow.get(ts)
+
+ >>> arw.timestamp
+ 1591161115
+
+- [FIX] Refactored and updated Macedonian, Hebrew, Korean, and Portuguese locales.
+
+0.15.6 (2020-04-29)
+-------------------
+
+- [NEW] Added support for parsing and formatting `ISO 8601 week dates `_ via a new token ``W``, for example:
+
+.. code-block:: python
+
+ >>> arrow.get("2013-W29-6", "W")
+
+ >>> utc=arrow.utcnow()
+ >>> utc
+
+ >>> utc.format("W")
+ '2020-W04-4'
+
+- [NEW] Formatting with ``x`` token (microseconds) is now possible, for example:
+
+.. code-block:: python
+
+ >>> dt = arrow.utcnow()
+ >>> dt.format("x")
+ '1585669870688329'
+ >>> dt.format("X")
+ '1585669870'
+
+- [NEW] Added ``humanize`` week granularity translation for German, Italian, Polish & Taiwanese locales.
+- [FIX] Consolidated and simplified German locales.
+- [INTERNAL] Moved testing suite from nosetest/Chai to pytest/pytest-mock.
+- [INTERNAL] Converted xunit-style setup and teardown functions in tests to pytest fixtures.
+- [INTERNAL] Setup Github Actions for CI alongside Travis.
+- [INTERNAL] Help support Arrow's future development by donating to the project on `Open Collective `_.
+
+0.15.5 (2020-01-03)
+-------------------
+
+- [WARN] Python 2 reached EOL on 2020-01-01. arrow will **drop support** for Python 2 in a future release to be decided (see `#739 `_).
+- [NEW] Added bounds parameter to ``span_range``, ``interval`` and ``span`` methods. This allows you to include or exclude the start and end values.
+- [NEW] ``arrow.get()`` can now create arrow objects from a timestamp with a timezone, for example:
+
+.. code-block:: python
+
+ >>> arrow.get(1367900664, tzinfo=tz.gettz('US/Pacific'))
+
+
+- [NEW] ``humanize`` can now combine multiple levels of granularity, for example:
+
+.. code-block:: python
+
+ >>> later140 = arrow.utcnow().shift(seconds=+8400)
+ >>> later140.humanize(granularity="minute")
+ 'in 139 minutes'
+ >>> later140.humanize(granularity=["hour", "minute"])
+ 'in 2 hours and 19 minutes'
+
+- [NEW] Added Hong Kong locale (``zh_hk``).
+- [NEW] Added ``humanize`` week granularity translation for Dutch.
+- [NEW] Numbers are now displayed when using the seconds granularity in ``humanize``.
+- [CHANGE] ``range`` now supports both the singular and plural forms of the ``frames`` argument (e.g. day and days).
+- [FIX] Improved parsing of strings that contain punctuation.
+- [FIX] Improved behaviour of ``humanize`` when singular seconds are involved.
+
+0.15.4 (2019-11-02)
+-------------------
+
+- [FIX] Fixed an issue that caused package installs to fail on Conda Forge.
+
+0.15.3 (2019-11-02)
+-------------------
+
+- [NEW] ``factory.get()`` can now create arrow objects from a ISO calendar tuple, for example:
+
+.. code-block:: python
+
+ >>> arrow.get((2013, 18, 7))
+
+
+- [NEW] Added a new token ``x`` to allow parsing of integer timestamps with milliseconds and microseconds.
+- [NEW] Formatting now supports escaping of characters using the same syntax as parsing, for example:
+
+.. code-block:: python
+
+ >>> arw = arrow.now()
+ >>> fmt = "YYYY-MM-DD h [h] m"
+ >>> arw.format(fmt)
+ '2019-11-02 3 h 32'
+
+- [NEW] Added ``humanize`` week granularity translations for Chinese, Spanish and Vietnamese.
+- [CHANGE] Added ``ParserError`` to module exports.
+- [FIX] Added support for midnight at end of day. See `#703 `_ for details.
+- [INTERNAL] Created Travis build for macOS.
+- [INTERNAL] Test parsing and formatting against full timezone database.
+
+0.15.2 (2019-09-14)
+-------------------
+
+- [NEW] Added ``humanize`` week granularity translations for Portuguese and Brazilian Portuguese.
+- [NEW] Embedded changelog within docs and added release dates to versions.
+- [FIX] Fixed a bug that caused test failures on Windows only, see `#668 `_ for details.
+
+0.15.1 (2019-09-10)
+-------------------
+
+- [NEW] Added ``humanize`` week granularity translations for Japanese.
+- [FIX] Fixed a bug that caused Arrow to fail when passed a negative timestamp string.
+- [FIX] Fixed a bug that caused Arrow to fail when passed a datetime object with ``tzinfo`` of type ``StaticTzInfo``.
+
+0.15.0 (2019-09-08)
+-------------------
+
+- [NEW] Added support for DDD and DDDD ordinal date tokens. The following functionality is now possible: ``arrow.get("1998-045")``, ``arrow.get("1998-45", "YYYY-DDD")``, ``arrow.get("1998-045", "YYYY-DDDD")``.
+- [NEW] ISO 8601 basic format for dates and times is now supported (e.g. ``YYYYMMDDTHHmmssZ``).
+- [NEW] Added ``humanize`` week granularity translations for French, Russian and Swiss German locales.
+- [CHANGE] Timestamps of type ``str`` are no longer supported **without a format string** in the ``arrow.get()`` method. This change was made to support the ISO 8601 basic format and to address bugs such as `#447 `_.
+
+The following will NOT work in v0.15.0:
+
+.. code-block:: python
+
+ >>> arrow.get("1565358758")
+ >>> arrow.get("1565358758.123413")
+
+The following will work in v0.15.0:
+
+.. code-block:: python
+
+ >>> arrow.get("1565358758", "X")
+ >>> arrow.get("1565358758.123413", "X")
+ >>> arrow.get(1565358758)
+ >>> arrow.get(1565358758.123413)
+
+- [CHANGE] When a meridian token (a|A) is passed and no meridians are available for the specified locale (e.g. unsupported or untranslated) a ``ParserError`` is raised.
+- [CHANGE] The timestamp token (``X``) will now match float timestamps of type ``str``: ``arrow.get(“1565358758.123415”, “X”)``.
+- [CHANGE] Strings with leading and/or trailing whitespace will no longer be parsed without a format string. Please see `the docs `_ for ways to handle this.
+- [FIX] The timestamp token (``X``) will now only match on strings that **strictly contain integers and floats**, preventing incorrect matches.
+- [FIX] Most instances of ``arrow.get()`` returning an incorrect ``Arrow`` object from a partial parsing match have been eliminated. The following issue have been addressed: `#91 `_, `#196 `_, `#396 `_, `#434 `_, `#447 `_, `#456 `_, `#519 `_, `#538 `_, `#560 `_.
+
+0.14.7 (2019-09-04)
+-------------------
+
+- [CHANGE] ``ArrowParseWarning`` will no longer be printed on every call to ``arrow.get()`` with a datetime string. The purpose of the warning was to start a conversation about the upcoming 0.15.0 changes and we appreciate all the feedback that the community has given us!
+
+0.14.6 (2019-08-28)
+-------------------
+
+- [NEW] Added support for ``week`` granularity in ``Arrow.humanize()``. For example, ``arrow.utcnow().shift(weeks=-1).humanize(granularity="week")`` outputs "a week ago". This change introduced two new untranslated words, ``week`` and ``weeks``, to all locale dictionaries, so locale contributions are welcome!
+- [NEW] Fully translated the Brazilian Portugese locale.
+- [CHANGE] Updated the Macedonian locale to inherit from a Slavic base.
+- [FIX] Fixed a bug that caused ``arrow.get()`` to ignore tzinfo arguments of type string (e.g. ``arrow.get(tzinfo="Europe/Paris")``).
+- [FIX] Fixed a bug that occurred when ``arrow.Arrow()`` was instantiated with a ``pytz`` tzinfo object.
+- [FIX] Fixed a bug that caused Arrow to fail when passed a sub-second token, that when rounded, had a value greater than 999999 (e.g. ``arrow.get("2015-01-12T01:13:15.9999995")``). Arrow should now accurately propagate the rounding for large sub-second tokens.
+
+0.14.5 (2019-08-09)
+-------------------
+
+- [NEW] Added Afrikaans locale.
+- [CHANGE] Removed deprecated ``replace`` shift functionality. Users looking to pass plural properties to the ``replace`` function to shift values should use ``shift`` instead.
+- [FIX] Fixed bug that occurred when ``factory.get()`` was passed a locale kwarg.
+
+0.14.4 (2019-07-30)
+-------------------
+
+- [FIX] Fixed a regression in 0.14.3 that prevented a tzinfo argument of type string to be passed to the ``get()`` function. Functionality such as ``arrow.get("2019072807", "YYYYMMDDHH", tzinfo="UTC")`` should work as normal again.
+- [CHANGE] Moved ``backports.functools_lru_cache`` dependency from ``extra_requires`` to ``install_requires`` for ``Python 2.7`` installs to fix `#495 `_.
+
+0.14.3 (2019-07-28)
+-------------------
+
+- [NEW] Added full support for Python 3.8.
+- [CHANGE] Added warnings for upcoming factory.get() parsing changes in 0.15.0. Please see `#612 `_ for full details.
+- [FIX] Extensive refactor and update of documentation.
+- [FIX] factory.get() can now construct from kwargs.
+- [FIX] Added meridians to Spanish Locale.
+
+0.14.2 (2019-06-06)
+-------------------
+
+- [CHANGE] Travis CI builds now use tox to lint and run tests.
+- [FIX] Fixed UnicodeDecodeError on certain locales (#600).
+
+0.14.1 (2019-06-06)
+-------------------
+
+- [FIX] Fixed ``ImportError: No module named 'dateutil'`` (#598).
+
+0.14.0 (2019-06-06)
+-------------------
+
+- [NEW] Added provisional support for Python 3.8.
+- [CHANGE] Removed support for EOL Python 3.4.
+- [FIX] Updated setup.py with modern Python standards.
+- [FIX] Upgraded dependencies to latest versions.
+- [FIX] Enabled flake8 and black on travis builds.
+- [FIX] Formatted code using black and isort.
+
+0.13.2 (2019-05-30)
+-------------------
+
+- [NEW] Add is_between method.
+- [FIX] Improved humanize behaviour for near zero durations (#416).
+- [FIX] Correct humanize behaviour with future days (#541).
+- [FIX] Documentation updates.
+- [FIX] Improvements to German Locale.
+
+0.13.1 (2019-02-17)
+-------------------
+
+- [NEW] Add support for Python 3.7.
+- [CHANGE] Remove deprecation decorators for Arrow.range(), Arrow.span_range() and Arrow.interval(), all now return generators, wrap with list() to get old behavior.
+- [FIX] Documentation and docstring updates.
+
+0.13.0 (2019-01-09)
+-------------------
+
+- [NEW] Added support for Python 3.6.
+- [CHANGE] Drop support for Python 2.6/3.3.
+- [CHANGE] Return generator instead of list for Arrow.range(), Arrow.span_range() and Arrow.interval().
+- [FIX] Make arrow.get() work with str & tzinfo combo.
+- [FIX] Make sure special RegEx characters are escaped in format string.
+- [NEW] Added support for ZZZ when formatting.
+- [FIX] Stop using datetime.utcnow() in internals, use datetime.now(UTC) instead.
+- [FIX] Return NotImplemented instead of TypeError in arrow math internals.
+- [NEW] Added Estonian Locale.
+- [FIX] Small fixes to Greek locale.
+- [FIX] TagalogLocale improvements.
+- [FIX] Added test requirements to setup.
+- [FIX] Improve docs for get, now and utcnow methods.
+- [FIX] Correct typo in depreciation warning.
+
+0.12.1
+------
+
+- [FIX] Allow universal wheels to be generated and reliably installed.
+- [FIX] Make humanize respect only_distance when granularity argument is also given.
+
+0.12.0
+------
+
+- [FIX] Compatibility fix for Python 2.x
+
+0.11.0
+------
+
+- [FIX] Fix grammar of ArabicLocale
+- [NEW] Add Nepali Locale
+- [FIX] Fix month name + rename AustriaLocale -> AustrianLocale
+- [FIX] Fix typo in Basque Locale
+- [FIX] Fix grammar in PortugueseBrazilian locale
+- [FIX] Remove pip --user-mirrors flag
+- [NEW] Add Indonesian Locale
+
+0.10.0
+------
+
+- [FIX] Fix getattr off by one for quarter
+- [FIX] Fix negative offset for UTC
+- [FIX] Update arrow.py
+
+0.9.0
+-----
+
+- [NEW] Remove duplicate code
+- [NEW] Support gnu date iso 8601
+- [NEW] Add support for universal wheels
+- [NEW] Slovenian locale
+- [NEW] Slovak locale
+- [NEW] Romanian locale
+- [FIX] respect limit even if end is defined range
+- [FIX] Separate replace & shift functions
+- [NEW] Added tox
+- [FIX] Fix supported Python versions in documentation
+- [NEW] Azerbaijani locale added, locale issue fixed in Turkish.
+- [FIX] Format ParserError's raise message
+
+0.8.0
+-----
+
+- []
+
+0.7.1
+-----
+
+- [NEW] Esperanto locale (batisteo)
+
+0.7.0
+-----
+
+- [FIX] Parse localized strings #228 (swistakm)
+- [FIX] Modify tzinfo parameter in ``get`` api #221 (bottleimp)
+- [FIX] Fix Czech locale (PrehistoricTeam)
+- [FIX] Raise TypeError when adding/subtracting non-dates (itsmeolivia)
+- [FIX] Fix pytz conversion error (Kudo)
+- [FIX] Fix overzealous time truncation in span_range (kdeldycke)
+- [NEW] Humanize for time duration #232 (ybrs)
+- [NEW] Add Thai locale (sipp11)
+- [NEW] Adding Belarusian (be) locale (oire)
+- [NEW] Search date in strings (beenje)
+- [NEW] Note that arrow's tokens differ from strptime's. (offby1)
+
+0.6.0
+-----
+
+- [FIX] Added support for Python 3
+- [FIX] Avoid truncating oversized epoch timestamps. Fixes #216.
+- [FIX] Fixed month abbreviations for Ukrainian
+- [FIX] Fix typo timezone
+- [FIX] A couple of dialect fixes and two new languages
+- [FIX] Spanish locale: ``Miercoles`` should have acute accent
+- [Fix] Fix Finnish grammar
+- [FIX] Fix typo in 'Arrow.floor' docstring
+- [FIX] Use read() utility to open README
+- [FIX] span_range for week frame
+- [NEW] Add minimal support for fractional seconds longer than six digits.
+- [NEW] Adding locale support for Marathi (mr)
+- [NEW] Add count argument to span method
+- [NEW] Improved docs
+
+0.5.1 - 0.5.4
+-------------
+
+- [FIX] test the behavior of simplejson instead of calling for_json directly (tonyseek)
+- [FIX] Add Hebrew Locale (doodyparizada)
+- [FIX] Update documentation location (andrewelkins)
+- [FIX] Update setup.py Development Status level (andrewelkins)
+- [FIX] Case insensitive month match (cshowe)
+
+0.5.0
+-----
+
+- [NEW] struct_time addition. (mhworth)
+- [NEW] Version grep (eirnym)
+- [NEW] Default to ISO 8601 format (emonty)
+- [NEW] Raise TypeError on comparison (sniekamp)
+- [NEW] Adding Macedonian(mk) locale (krisfremen)
+- [FIX] Fix for ISO seconds and fractional seconds (sdispater) (andrewelkins)
+- [FIX] Use correct Dutch wording for "hours" (wbolster)
+- [FIX] Complete the list of english locales (indorilftw)
+- [FIX] Change README to reStructuredText (nyuszika7h)
+- [FIX] Parse lower-cased 'h' (tamentis)
+- [FIX] Slight modifications to Dutch locale (nvie)
+
+0.4.4
+-----
+
+- [NEW] Include the docs in the released tarball
+- [NEW] Czech localization Czech localization for Arrow
+- [NEW] Add fa_ir to locales
+- [FIX] Fixes parsing of time strings with a final Z
+- [FIX] Fixes ISO parsing and formatting for fractional seconds
+- [FIX] test_fromtimestamp sp
+- [FIX] some typos fixed
+- [FIX] removed an unused import statement
+- [FIX] docs table fix
+- [FIX] Issue with specify 'X' template and no template at all to arrow.get
+- [FIX] Fix "import" typo in docs/index.rst
+- [FIX] Fix unit tests for zero passed
+- [FIX] Update layout.html
+- [FIX] In Norwegian and new Norwegian months and weekdays should not be capitalized
+- [FIX] Fixed discrepancy between specifying 'X' to arrow.get and specifying no template
+
+0.4.3
+-----
+
+- [NEW] Turkish locale (Emre)
+- [NEW] Arabic locale (Mosab Ahmad)
+- [NEW] Danish locale (Holmars)
+- [NEW] Icelandic locale (Holmars)
+- [NEW] Hindi locale (Atmb4u)
+- [NEW] Malayalam locale (Atmb4u)
+- [NEW] Finnish locale (Stormpat)
+- [NEW] Portuguese locale (Danielcorreia)
+- [NEW] ``h`` and ``hh`` strings are now supported (Averyonghub)
+- [FIX] An incorrect inflection in the Polish locale has been fixed (Avalanchy)
+- [FIX] ``arrow.get`` now properly handles ``Date`` (Jaapz)
+- [FIX] Tests are now declared in ``setup.py`` and the manifest (Pypingou)
+- [FIX] ``__version__`` has been added to ``__init__.py`` (Sametmax)
+- [FIX] ISO 8601 strings can be parsed without a separator (Ivandiguisto / Root)
+- [FIX] Documentation is now more clear regarding some inputs on ``arrow.get`` (Eriktaubeneck)
+- [FIX] Some documentation links have been fixed (Vrutsky)
+- [FIX] Error messages for parse errors are now more descriptive (Maciej Albin)
+- [FIX] The parser now correctly checks for separators in strings (Mschwager)
+
+0.4.2
+-----
+
+- [NEW] Factory ``get`` method now accepts a single ``Arrow`` argument.
+- [NEW] Tokens SSSS, SSSSS and SSSSSS are supported in parsing.
+- [NEW] ``Arrow`` objects have a ``float_timestamp`` property.
+- [NEW] Vietnamese locale (Iu1nguoi)
+- [NEW] Factory ``get`` method now accepts a list of format strings (Dgilland)
+- [NEW] A MANIFEST.in file has been added (Pypingou)
+- [NEW] Tests can be run directly from ``setup.py`` (Pypingou)
+- [FIX] Arrow docs now list 'day of week' format tokens correctly (Rudolphfroger)
+- [FIX] Several issues with the Korean locale have been resolved (Yoloseem)
+- [FIX] ``humanize`` now correctly returns unicode (Shvechikov)
+- [FIX] ``Arrow`` objects now pickle / unpickle correctly (Yoloseem)
+
+0.4.1
+-----
+
+- [NEW] Table / explanation of formatting & parsing tokens in docs
+- [NEW] Brazilian locale (Augusto2112)
+- [NEW] Dutch locale (OrangeTux)
+- [NEW] Italian locale (Pertux)
+- [NEW] Austrain locale (LeChewbacca)
+- [NEW] Tagalog locale (Marksteve)
+- [FIX] Corrected spelling and day numbers in German locale (LeChewbacca)
+- [FIX] Factory ``get`` method should now handle unicode strings correctly (Bwells)
+- [FIX] Midnight and noon should now parse and format correctly (Bwells)
+
+0.4.0
+-----
+
+- [NEW] Format-free ISO 8601 parsing in factory ``get`` method
+- [NEW] Support for 'week' / 'weeks' in ``span``, ``range``, ``span_range``, ``floor`` and ``ceil``
+- [NEW] Support for 'weeks' in ``replace``
+- [NEW] Norwegian locale (Martinp)
+- [NEW] Japanese locale (CortYuming)
+- [FIX] Timezones no longer show the wrong sign when formatted (Bean)
+- [FIX] Microseconds are parsed correctly from strings (Bsidhom)
+- [FIX] Locale day-of-week is no longer off by one (Cynddl)
+- [FIX] Corrected plurals of Ukrainian and Russian nouns (Catchagain)
+- [CHANGE] Old 0.1 ``arrow`` module method removed
+- [CHANGE] Dropped timestamp support in ``range`` and ``span_range`` (never worked correctly)
+- [CHANGE] Dropped parsing of single string as tz string in factory ``get`` method (replaced by ISO 8601)
+
+0.3.5
+-----
+
+- [NEW] French locale (Cynddl)
+- [NEW] Spanish locale (Slapresta)
+- [FIX] Ranges handle multiple timezones correctly (Ftobia)
+
+0.3.4
+-----
+
+- [FIX] Humanize no longer sometimes returns the wrong month delta
+- [FIX] ``__format__`` works correctly with no format string
+
+0.3.3
+-----
+
+- [NEW] Python 2.6 support
+- [NEW] Initial support for locale-based parsing and formatting
+- [NEW] ArrowFactory class, now proxied as the module API
+- [NEW] ``factory`` api method to obtain a factory for a custom type
+- [FIX] Python 3 support and tests completely ironed out
+
+0.3.2
+-----
+
+- [NEW] Python 3+ support
+
+0.3.1
+-----
+
+- [FIX] The old ``arrow`` module function handles timestamps correctly as it used to
+
+0.3.0
+-----
+
+- [NEW] ``Arrow.replace`` method
+- [NEW] Accept timestamps, datetimes and Arrows for datetime inputs, where reasonable
+- [FIX] ``range`` and ``span_range`` respect end and limit parameters correctly
+- [CHANGE] Arrow objects are no longer mutable
+- [CHANGE] Plural attribute name semantics altered: single -> absolute, plural -> relative
+- [CHANGE] Plural names no longer supported as properties (e.g. ``arrow.utcnow().years``)
+
+0.2.1
+-----
+
+- [NEW] Support for localized humanization
+- [NEW] English, Russian, Greek, Korean, Chinese locales
+
+0.2.0
+-----
+
+- **REWRITE**
+- [NEW] Date parsing
+- [NEW] Date formatting
+- [NEW] ``floor``, ``ceil`` and ``span`` methods
+- [NEW] ``datetime`` interface implementation
+- [NEW] ``clone`` method
+- [NEW] ``get``, ``now`` and ``utcnow`` API methods
+
+0.1.6
+-----
+
+- [NEW] Humanized time deltas
+- [NEW] ``__eq__`` implemented
+- [FIX] Issues with conversions related to daylight savings time resolved
+- [CHANGE] ``__str__`` uses ISO formatting
+
+0.1.5
+-----
+
+- **Started tracking changes**
+- [NEW] Parsing of ISO-formatted time zone offsets (e.g. '+02:30', '-05:00')
+- [NEW] Resolved some issues with timestamps and delta / Olson time zones
diff --git a/openpype/modules/ftrack/python2_vendor/arrow/LICENSE b/openpype/modules/ftrack/python2_vendor/arrow/LICENSE
new file mode 100644
index 0000000000..2bef500de7
--- /dev/null
+++ b/openpype/modules/ftrack/python2_vendor/arrow/LICENSE
@@ -0,0 +1,201 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright 2019 Chris Smith
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/openpype/modules/ftrack/python2_vendor/arrow/MANIFEST.in b/openpype/modules/ftrack/python2_vendor/arrow/MANIFEST.in
new file mode 100644
index 0000000000..d9955ed96a
--- /dev/null
+++ b/openpype/modules/ftrack/python2_vendor/arrow/MANIFEST.in
@@ -0,0 +1,3 @@
+include LICENSE CHANGELOG.rst README.rst Makefile requirements.txt tox.ini
+recursive-include tests *.py
+recursive-include docs *.py *.rst *.bat Makefile
diff --git a/openpype/modules/ftrack/python2_vendor/arrow/Makefile b/openpype/modules/ftrack/python2_vendor/arrow/Makefile
new file mode 100644
index 0000000000..f294985dc6
--- /dev/null
+++ b/openpype/modules/ftrack/python2_vendor/arrow/Makefile
@@ -0,0 +1,44 @@
+.PHONY: auto test docs clean
+
+auto: build38
+
+build27: PYTHON_VER = python2.7
+build35: PYTHON_VER = python3.5
+build36: PYTHON_VER = python3.6
+build37: PYTHON_VER = python3.7
+build38: PYTHON_VER = python3.8
+build39: PYTHON_VER = python3.9
+
+build27 build35 build36 build37 build38 build39: clean
+ virtualenv venv --python=$(PYTHON_VER)
+ . venv/bin/activate; \
+ pip install -r requirements.txt; \
+ pre-commit install
+
+test:
+ rm -f .coverage coverage.xml
+ . venv/bin/activate; pytest
+
+lint:
+ . venv/bin/activate; pre-commit run --all-files --show-diff-on-failure
+
+docs:
+ rm -rf docs/_build
+ . venv/bin/activate; cd docs; make html
+
+clean: clean-dist
+ rm -rf venv .pytest_cache ./**/__pycache__
+ rm -f .coverage coverage.xml ./**/*.pyc
+
+clean-dist:
+ rm -rf dist build .egg .eggs arrow.egg-info
+
+build-dist:
+ . venv/bin/activate; \
+ pip install -U setuptools twine wheel; \
+ python setup.py sdist bdist_wheel
+
+upload-dist:
+ . venv/bin/activate; twine upload dist/*
+
+publish: test clean-dist build-dist upload-dist clean-dist
diff --git a/openpype/modules/ftrack/python2_vendor/arrow/README.rst b/openpype/modules/ftrack/python2_vendor/arrow/README.rst
new file mode 100644
index 0000000000..69f6c50d81
--- /dev/null
+++ b/openpype/modules/ftrack/python2_vendor/arrow/README.rst
@@ -0,0 +1,133 @@
+Arrow: Better dates & times for Python
+======================================
+
+.. start-inclusion-marker-do-not-remove
+
+.. image:: https://github.com/arrow-py/arrow/workflows/tests/badge.svg?branch=master
+ :alt: Build Status
+ :target: https://github.com/arrow-py/arrow/actions?query=workflow%3Atests+branch%3Amaster
+
+.. image:: https://codecov.io/gh/arrow-py/arrow/branch/master/graph/badge.svg
+ :alt: Coverage
+ :target: https://codecov.io/gh/arrow-py/arrow
+
+.. image:: https://img.shields.io/pypi/v/arrow.svg
+ :alt: PyPI Version
+ :target: https://pypi.python.org/pypi/arrow
+
+.. image:: https://img.shields.io/pypi/pyversions/arrow.svg
+ :alt: Supported Python Versions
+ :target: https://pypi.python.org/pypi/arrow
+
+.. image:: https://img.shields.io/pypi/l/arrow.svg
+ :alt: License
+ :target: https://pypi.python.org/pypi/arrow
+
+.. image:: https://img.shields.io/badge/code%20style-black-000000.svg
+ :alt: Code Style: Black
+ :target: https://github.com/psf/black
+
+
+**Arrow** is a Python library that offers a sensible and human-friendly approach to creating, manipulating, formatting and converting dates, times and timestamps. It implements and updates the datetime type, plugging gaps in functionality and providing an intelligent module API that supports many common creation scenarios. Simply put, it helps you work with dates and times with fewer imports and a lot less code.
+
+Arrow is named after the `arrow of time `_ and is heavily inspired by `moment.js `_ and `requests `_.
+
+Why use Arrow over built-in modules?
+------------------------------------
+
+Python's standard library and some other low-level modules have near-complete date, time and timezone functionality, but don't work very well from a usability perspective:
+
+- Too many modules: datetime, time, calendar, dateutil, pytz and more
+- Too many types: date, time, datetime, tzinfo, timedelta, relativedelta, etc.
+- Timezones and timestamp conversions are verbose and unpleasant
+- Timezone naivety is the norm
+- Gaps in functionality: ISO 8601 parsing, timespans, humanization
+
+Features
+--------
+
+- Fully-implemented, drop-in replacement for datetime
+- Supports Python 2.7, 3.5, 3.6, 3.7, 3.8 and 3.9
+- Timezone-aware and UTC by default
+- Provides super-simple creation options for many common input scenarios
+- :code:`shift` method with support for relative offsets, including weeks
+- Formats and parses strings automatically
+- Wide support for ISO 8601
+- Timezone conversion
+- Timestamp available as a property
+- Generates time spans, ranges, floors and ceilings for time frames ranging from microsecond to year
+- Humanizes and supports a growing list of contributed locales
+- Extensible for your own Arrow-derived types
+
+Quick Start
+-----------
+
+Installation
+~~~~~~~~~~~~
+
+To install Arrow, use `pip `_ or `pipenv `_:
+
+.. code-block:: console
+
+ $ pip install -U arrow
+
+Example Usage
+~~~~~~~~~~~~~
+
+.. code-block:: python
+
+ >>> import arrow
+ >>> arrow.get('2013-05-11T21:23:58.970460+07:00')
+
+
+ >>> utc = arrow.utcnow()
+ >>> utc
+
+
+ >>> utc = utc.shift(hours=-1)
+ >>> utc
+
+
+ >>> local = utc.to('US/Pacific')
+ >>> local
+
+
+ >>> local.timestamp
+ 1368303838
+
+ >>> local.format()
+ '2013-05-11 13:23:58 -07:00'
+
+ >>> local.format('YYYY-MM-DD HH:mm:ss ZZ')
+ '2013-05-11 13:23:58 -07:00'
+
+ >>> local.humanize()
+ 'an hour ago'
+
+ >>> local.humanize(locale='ko_kr')
+ '1시간 전'
+
+.. end-inclusion-marker-do-not-remove
+
+Documentation
+-------------
+
+For full documentation, please visit `arrow.readthedocs.io `_.
+
+Contributing
+------------
+
+Contributions are welcome for both code and localizations (adding and updating locales). Begin by gaining familiarity with the Arrow library and its features. Then, jump into contributing:
+
+#. Find an issue or feature to tackle on the `issue tracker `_. Issues marked with the `"good first issue" label `_ may be a great place to start!
+#. Fork `this repository `_ on GitHub and begin making changes in a branch.
+#. Add a few tests to ensure that the bug was fixed or the feature works as expected.
+#. Run the entire test suite and linting checks by running one of the following commands: :code:`tox` (if you have `tox `_ installed) **OR** :code:`make build38 && make test && make lint` (if you do not have Python 3.8 installed, replace :code:`build38` with the latest Python version on your system).
+#. Submit a pull request and await feedback 😃.
+
+If you have any questions along the way, feel free to ask them `here `_.
+
+Support Arrow
+-------------
+
+`Open Collective `_ is an online funding platform that provides tools to raise money and share your finances with full transparency. It is the platform of choice for individuals and companies to make one-time or recurring donations directly to the project. If you are interested in making a financial contribution, please visit the `Arrow collective `_.
diff --git a/openpype/modules/ftrack/python2_vendor/arrow/arrow/__init__.py b/openpype/modules/ftrack/python2_vendor/arrow/arrow/__init__.py
new file mode 100644
index 0000000000..2883527be8
--- /dev/null
+++ b/openpype/modules/ftrack/python2_vendor/arrow/arrow/__init__.py
@@ -0,0 +1,18 @@
+# -*- coding: utf-8 -*-
+from ._version import __version__
+from .api import get, now, utcnow
+from .arrow import Arrow
+from .factory import ArrowFactory
+from .formatter import (
+ FORMAT_ATOM,
+ FORMAT_COOKIE,
+ FORMAT_RFC822,
+ FORMAT_RFC850,
+ FORMAT_RFC1036,
+ FORMAT_RFC1123,
+ FORMAT_RFC2822,
+ FORMAT_RFC3339,
+ FORMAT_RSS,
+ FORMAT_W3C,
+)
+from .parser import ParserError
diff --git a/openpype/modules/ftrack/python2_vendor/arrow/arrow/_version.py b/openpype/modules/ftrack/python2_vendor/arrow/arrow/_version.py
new file mode 100644
index 0000000000..fd86b3ee91
--- /dev/null
+++ b/openpype/modules/ftrack/python2_vendor/arrow/arrow/_version.py
@@ -0,0 +1 @@
+__version__ = "0.17.0"
diff --git a/openpype/modules/ftrack/python2_vendor/arrow/arrow/api.py b/openpype/modules/ftrack/python2_vendor/arrow/arrow/api.py
new file mode 100644
index 0000000000..a6b7be3de2
--- /dev/null
+++ b/openpype/modules/ftrack/python2_vendor/arrow/arrow/api.py
@@ -0,0 +1,54 @@
+# -*- coding: utf-8 -*-
+"""
+Provides the default implementation of :class:`ArrowFactory `
+methods for use as a module API.
+
+"""
+
+from __future__ import absolute_import
+
+from arrow.factory import ArrowFactory
+
+# internal default factory.
+_factory = ArrowFactory()
+
+
+def get(*args, **kwargs):
+ """Calls the default :class:`ArrowFactory ` ``get`` method."""
+
+ return _factory.get(*args, **kwargs)
+
+
+get.__doc__ = _factory.get.__doc__
+
+
+def utcnow():
+ """Calls the default :class:`ArrowFactory ` ``utcnow`` method."""
+
+ return _factory.utcnow()
+
+
+utcnow.__doc__ = _factory.utcnow.__doc__
+
+
+def now(tz=None):
+ """Calls the default :class:`ArrowFactory ` ``now`` method."""
+
+ return _factory.now(tz)
+
+
+now.__doc__ = _factory.now.__doc__
+
+
+def factory(type):
+ """Returns an :class:`.ArrowFactory` for the specified :class:`Arrow `
+ or derived type.
+
+ :param type: the type, :class:`Arrow ` or derived.
+
+ """
+
+ return ArrowFactory(type)
+
+
+__all__ = ["get", "utcnow", "now", "factory"]
diff --git a/openpype/modules/ftrack/python2_vendor/arrow/arrow/arrow.py b/openpype/modules/ftrack/python2_vendor/arrow/arrow/arrow.py
new file mode 100644
index 0000000000..4fe9541789
--- /dev/null
+++ b/openpype/modules/ftrack/python2_vendor/arrow/arrow/arrow.py
@@ -0,0 +1,1584 @@
+# -*- coding: utf-8 -*-
+"""
+Provides the :class:`Arrow ` class, an enhanced ``datetime``
+replacement.
+
+"""
+
+from __future__ import absolute_import
+
+import calendar
+import sys
+import warnings
+from datetime import datetime, timedelta
+from datetime import tzinfo as dt_tzinfo
+from math import trunc
+
+from dateutil import tz as dateutil_tz
+from dateutil.relativedelta import relativedelta
+
+from arrow import formatter, locales, parser, util
+
+if sys.version_info[:2] < (3, 6): # pragma: no cover
+ with warnings.catch_warnings():
+ warnings.simplefilter("default", DeprecationWarning)
+ warnings.warn(
+ "Arrow will drop support for Python 2.7 and 3.5 in the upcoming v1.0.0 release. Please upgrade to "
+ "Python 3.6+ to continue receiving updates for Arrow.",
+ DeprecationWarning,
+ )
+
+
+class Arrow(object):
+ """An :class:`Arrow ` object.
+
+ Implements the ``datetime`` interface, behaving as an aware ``datetime`` while implementing
+ additional functionality.
+
+ :param year: the calendar year.
+ :param month: the calendar month.
+ :param day: the calendar day.
+ :param hour: (optional) the hour. Defaults to 0.
+ :param minute: (optional) the minute, Defaults to 0.
+ :param second: (optional) the second, Defaults to 0.
+ :param microsecond: (optional) the microsecond. Defaults to 0.
+ :param tzinfo: (optional) A timezone expression. Defaults to UTC.
+ :param fold: (optional) 0 or 1, used to disambiguate repeated times. Defaults to 0.
+
+ .. _tz-expr:
+
+ Recognized timezone expressions:
+
+ - A ``tzinfo`` object.
+ - A ``str`` describing a timezone, similar to 'US/Pacific', or 'Europe/Berlin'.
+ - A ``str`` in ISO 8601 style, as in '+07:00'.
+ - A ``str``, one of the following: 'local', 'utc', 'UTC'.
+
+ Usage::
+
+ >>> import arrow
+ >>> arrow.Arrow(2013, 5, 5, 12, 30, 45)
+
+
+ """
+
+ resolution = datetime.resolution
+
+ _ATTRS = ["year", "month", "day", "hour", "minute", "second", "microsecond"]
+ _ATTRS_PLURAL = ["{}s".format(a) for a in _ATTRS]
+ _MONTHS_PER_QUARTER = 3
+ _SECS_PER_MINUTE = float(60)
+ _SECS_PER_HOUR = float(60 * 60)
+ _SECS_PER_DAY = float(60 * 60 * 24)
+ _SECS_PER_WEEK = float(60 * 60 * 24 * 7)
+ _SECS_PER_MONTH = float(60 * 60 * 24 * 30.5)
+ _SECS_PER_YEAR = float(60 * 60 * 24 * 365.25)
+
+ def __init__(
+ self,
+ year,
+ month,
+ day,
+ hour=0,
+ minute=0,
+ second=0,
+ microsecond=0,
+ tzinfo=None,
+ **kwargs
+ ):
+ if tzinfo is None:
+ tzinfo = dateutil_tz.tzutc()
+ # detect that tzinfo is a pytz object (issue #626)
+ elif (
+ isinstance(tzinfo, dt_tzinfo)
+ and hasattr(tzinfo, "localize")
+ and hasattr(tzinfo, "zone")
+ and tzinfo.zone
+ ):
+ tzinfo = parser.TzinfoParser.parse(tzinfo.zone)
+ elif util.isstr(tzinfo):
+ tzinfo = parser.TzinfoParser.parse(tzinfo)
+
+ fold = kwargs.get("fold", 0)
+
+ # use enfold here to cover direct arrow.Arrow init on 2.7/3.5
+ self._datetime = dateutil_tz.enfold(
+ datetime(year, month, day, hour, minute, second, microsecond, tzinfo),
+ fold=fold,
+ )
+
+ # factories: single object, both original and from datetime.
+
+ @classmethod
+ def now(cls, tzinfo=None):
+ """Constructs an :class:`Arrow ` object, representing "now" in the given
+ timezone.
+
+ :param tzinfo: (optional) a ``tzinfo`` object. Defaults to local time.
+
+ Usage::
+
+ >>> arrow.now('Asia/Baku')
+
+
+ """
+
+ if tzinfo is None:
+ tzinfo = dateutil_tz.tzlocal()
+
+ dt = datetime.now(tzinfo)
+
+ return cls(
+ dt.year,
+ dt.month,
+ dt.day,
+ dt.hour,
+ dt.minute,
+ dt.second,
+ dt.microsecond,
+ dt.tzinfo,
+ fold=getattr(dt, "fold", 0),
+ )
+
+ @classmethod
+ def utcnow(cls):
+ """Constructs an :class:`Arrow ` object, representing "now" in UTC
+ time.
+
+ Usage::
+
+ >>> arrow.utcnow()
+
+
+ """
+
+ dt = datetime.now(dateutil_tz.tzutc())
+
+ return cls(
+ dt.year,
+ dt.month,
+ dt.day,
+ dt.hour,
+ dt.minute,
+ dt.second,
+ dt.microsecond,
+ dt.tzinfo,
+ fold=getattr(dt, "fold", 0),
+ )
+
+ @classmethod
+ def fromtimestamp(cls, timestamp, tzinfo=None):
+ """Constructs an :class:`Arrow ` object from a timestamp, converted to
+ the given timezone.
+
+ :param timestamp: an ``int`` or ``float`` timestamp, or a ``str`` that converts to either.
+ :param tzinfo: (optional) a ``tzinfo`` object. Defaults to local time.
+ """
+
+ if tzinfo is None:
+ tzinfo = dateutil_tz.tzlocal()
+ elif util.isstr(tzinfo):
+ tzinfo = parser.TzinfoParser.parse(tzinfo)
+
+ if not util.is_timestamp(timestamp):
+ raise ValueError(
+ "The provided timestamp '{}' is invalid.".format(timestamp)
+ )
+
+ timestamp = util.normalize_timestamp(float(timestamp))
+ dt = datetime.fromtimestamp(timestamp, tzinfo)
+
+ return cls(
+ dt.year,
+ dt.month,
+ dt.day,
+ dt.hour,
+ dt.minute,
+ dt.second,
+ dt.microsecond,
+ dt.tzinfo,
+ fold=getattr(dt, "fold", 0),
+ )
+
+ @classmethod
+ def utcfromtimestamp(cls, timestamp):
+ """Constructs an :class:`Arrow ` object from a timestamp, in UTC time.
+
+ :param timestamp: an ``int`` or ``float`` timestamp, or a ``str`` that converts to either.
+
+ """
+
+ if not util.is_timestamp(timestamp):
+ raise ValueError(
+ "The provided timestamp '{}' is invalid.".format(timestamp)
+ )
+
+ timestamp = util.normalize_timestamp(float(timestamp))
+ dt = datetime.utcfromtimestamp(timestamp)
+
+ return cls(
+ dt.year,
+ dt.month,
+ dt.day,
+ dt.hour,
+ dt.minute,
+ dt.second,
+ dt.microsecond,
+ dateutil_tz.tzutc(),
+ fold=getattr(dt, "fold", 0),
+ )
+
+ @classmethod
+ def fromdatetime(cls, dt, tzinfo=None):
+ """Constructs an :class:`Arrow ` object from a ``datetime`` and
+ optional replacement timezone.
+
+ :param dt: the ``datetime``
+ :param tzinfo: (optional) A :ref:`timezone expression `. Defaults to ``dt``'s
+ timezone, or UTC if naive.
+
+ If you only want to replace the timezone of naive datetimes::
+
+ >>> dt
+ datetime.datetime(2013, 5, 5, 0, 0, tzinfo=tzutc())
+ >>> arrow.Arrow.fromdatetime(dt, dt.tzinfo or 'US/Pacific')
+
+
+ """
+
+ if tzinfo is None:
+ if dt.tzinfo is None:
+ tzinfo = dateutil_tz.tzutc()
+ else:
+ tzinfo = dt.tzinfo
+
+ return cls(
+ dt.year,
+ dt.month,
+ dt.day,
+ dt.hour,
+ dt.minute,
+ dt.second,
+ dt.microsecond,
+ tzinfo,
+ fold=getattr(dt, "fold", 0),
+ )
+
+ @classmethod
+ def fromdate(cls, date, tzinfo=None):
+ """Constructs an :class:`Arrow ` object from a ``date`` and optional
+ replacement timezone. Time values are set to 0.
+
+ :param date: the ``date``
+ :param tzinfo: (optional) A :ref:`timezone expression `. Defaults to UTC.
+ """
+
+ if tzinfo is None:
+ tzinfo = dateutil_tz.tzutc()
+
+ return cls(date.year, date.month, date.day, tzinfo=tzinfo)
+
+ @classmethod
+ def strptime(cls, date_str, fmt, tzinfo=None):
+ """Constructs an :class:`Arrow ` object from a date string and format,
+ in the style of ``datetime.strptime``. Optionally replaces the parsed timezone.
+
+ :param date_str: the date string.
+ :param fmt: the format string.
+ :param tzinfo: (optional) A :ref:`timezone expression `. Defaults to the parsed
+ timezone if ``fmt`` contains a timezone directive, otherwise UTC.
+
+ Usage::
+
+ >>> arrow.Arrow.strptime('20-01-2019 15:49:10', '%d-%m-%Y %H:%M:%S')
+
+
+ """
+
+ dt = datetime.strptime(date_str, fmt)
+ if tzinfo is None:
+ tzinfo = dt.tzinfo
+
+ return cls(
+ dt.year,
+ dt.month,
+ dt.day,
+ dt.hour,
+ dt.minute,
+ dt.second,
+ dt.microsecond,
+ tzinfo,
+ fold=getattr(dt, "fold", 0),
+ )
+
+ # factories: ranges and spans
+
+ @classmethod
+ def range(cls, frame, start, end=None, tz=None, limit=None):
+ """Returns an iterator of :class:`Arrow ` objects, representing
+ points in time between two inputs.
+
+ :param frame: The timeframe. Can be any ``datetime`` property (day, hour, minute...).
+ :param start: A datetime expression, the start of the range.
+ :param end: (optional) A datetime expression, the end of the range.
+ :param tz: (optional) A :ref:`timezone expression `. Defaults to
+ ``start``'s timezone, or UTC if ``start`` is naive.
+ :param limit: (optional) A maximum number of tuples to return.
+
+ **NOTE**: The ``end`` or ``limit`` must be provided. Call with ``end`` alone to
+ return the entire range. Call with ``limit`` alone to return a maximum # of results from
+ the start. Call with both to cap a range at a maximum # of results.
+
+ **NOTE**: ``tz`` internally **replaces** the timezones of both ``start`` and ``end`` before
+ iterating. As such, either call with naive objects and ``tz``, or aware objects from the
+ same timezone and no ``tz``.
+
+ Supported frame values: year, quarter, month, week, day, hour, minute, second.
+
+ Recognized datetime expressions:
+
+ - An :class:`Arrow ` object.
+ - A ``datetime`` object.
+
+ Usage::
+
+ >>> start = datetime(2013, 5, 5, 12, 30)
+ >>> end = datetime(2013, 5, 5, 17, 15)
+ >>> for r in arrow.Arrow.range('hour', start, end):
+ ... print(repr(r))
+ ...
+
+
+
+
+
+
+ **NOTE**: Unlike Python's ``range``, ``end`` *may* be included in the returned iterator::
+
+ >>> start = datetime(2013, 5, 5, 12, 30)
+ >>> end = datetime(2013, 5, 5, 13, 30)
+ >>> for r in arrow.Arrow.range('hour', start, end):
+ ... print(repr(r))
+ ...
+
+
+
+ """
+
+ _, frame_relative, relative_steps = cls._get_frames(frame)
+
+ tzinfo = cls._get_tzinfo(start.tzinfo if tz is None else tz)
+
+ start = cls._get_datetime(start).replace(tzinfo=tzinfo)
+ end, limit = cls._get_iteration_params(end, limit)
+ end = cls._get_datetime(end).replace(tzinfo=tzinfo)
+
+ current = cls.fromdatetime(start)
+ original_day = start.day
+ day_is_clipped = False
+ i = 0
+
+ while current <= end and i < limit:
+ i += 1
+ yield current
+
+ values = [getattr(current, f) for f in cls._ATTRS]
+ current = cls(*values, tzinfo=tzinfo).shift(
+ **{frame_relative: relative_steps}
+ )
+
+ if frame in ["month", "quarter", "year"] and current.day < original_day:
+ day_is_clipped = True
+
+ if day_is_clipped and not cls._is_last_day_of_month(current):
+ current = current.replace(day=original_day)
+
+ def span(self, frame, count=1, bounds="[)"):
+ """Returns two new :class:`Arrow ` objects, representing the timespan
+ of the :class:`Arrow ` object in a given timeframe.
+
+ :param frame: the timeframe. Can be any ``datetime`` property (day, hour, minute...).
+ :param count: (optional) the number of frames to span.
+ :param bounds: (optional) a ``str`` of either '()', '(]', '[)', or '[]' that specifies
+ whether to include or exclude the start and end values in the span. '(' excludes
+ the start, '[' includes the start, ')' excludes the end, and ']' includes the end.
+ If the bounds are not specified, the default bound '[)' is used.
+
+ Supported frame values: year, quarter, month, week, day, hour, minute, second.
+
+ Usage::
+
+ >>> arrow.utcnow()
+
+
+ >>> arrow.utcnow().span('hour')
+ (, )
+
+ >>> arrow.utcnow().span('day')
+ (, )
+
+ >>> arrow.utcnow().span('day', count=2)
+ (, )
+
+ >>> arrow.utcnow().span('day', bounds='[]')
+ (, )
+
+ """
+
+ util.validate_bounds(bounds)
+
+ frame_absolute, frame_relative, relative_steps = self._get_frames(frame)
+
+ if frame_absolute == "week":
+ attr = "day"
+ elif frame_absolute == "quarter":
+ attr = "month"
+ else:
+ attr = frame_absolute
+
+ index = self._ATTRS.index(attr)
+ frames = self._ATTRS[: index + 1]
+
+ values = [getattr(self, f) for f in frames]
+
+ for _ in range(3 - len(values)):
+ values.append(1)
+
+ floor = self.__class__(*values, tzinfo=self.tzinfo)
+
+ if frame_absolute == "week":
+ floor = floor.shift(days=-(self.isoweekday() - 1))
+ elif frame_absolute == "quarter":
+ floor = floor.shift(months=-((self.month - 1) % 3))
+
+ ceil = floor.shift(**{frame_relative: count * relative_steps})
+
+ if bounds[0] == "(":
+ floor = floor.shift(microseconds=+1)
+
+ if bounds[1] == ")":
+ ceil = ceil.shift(microseconds=-1)
+
+ return floor, ceil
+
+ def floor(self, frame):
+ """Returns a new :class:`Arrow ` object, representing the "floor"
+ of the timespan of the :class:`Arrow ` object in a given timeframe.
+ Equivalent to the first element in the 2-tuple returned by
+ :func:`span `.
+
+ :param frame: the timeframe. Can be any ``datetime`` property (day, hour, minute...).
+
+ Usage::
+
+ >>> arrow.utcnow().floor('hour')
+
+ """
+
+ return self.span(frame)[0]
+
+ def ceil(self, frame):
+ """Returns a new :class:`Arrow ` object, representing the "ceiling"
+ of the timespan of the :class:`Arrow ` object in a given timeframe.
+ Equivalent to the second element in the 2-tuple returned by
+ :func:`span `.
+
+ :param frame: the timeframe. Can be any ``datetime`` property (day, hour, minute...).
+
+ Usage::
+
+ >>> arrow.utcnow().ceil('hour')
+
+ """
+
+ return self.span(frame)[1]
+
+ @classmethod
+ def span_range(cls, frame, start, end, tz=None, limit=None, bounds="[)"):
+ """Returns an iterator of tuples, each :class:`Arrow ` objects,
+ representing a series of timespans between two inputs.
+
+ :param frame: The timeframe. Can be any ``datetime`` property (day, hour, minute...).
+ :param start: A datetime expression, the start of the range.
+ :param end: (optional) A datetime expression, the end of the range.
+ :param tz: (optional) A :ref:`timezone expression `. Defaults to
+ ``start``'s timezone, or UTC if ``start`` is naive.
+ :param limit: (optional) A maximum number of tuples to return.
+ :param bounds: (optional) a ``str`` of either '()', '(]', '[)', or '[]' that specifies
+ whether to include or exclude the start and end values in each span in the range. '(' excludes
+ the start, '[' includes the start, ')' excludes the end, and ']' includes the end.
+ If the bounds are not specified, the default bound '[)' is used.
+
+ **NOTE**: The ``end`` or ``limit`` must be provided. Call with ``end`` alone to
+ return the entire range. Call with ``limit`` alone to return a maximum # of results from
+ the start. Call with both to cap a range at a maximum # of results.
+
+ **NOTE**: ``tz`` internally **replaces** the timezones of both ``start`` and ``end`` before
+ iterating. As such, either call with naive objects and ``tz``, or aware objects from the
+ same timezone and no ``tz``.
+
+ Supported frame values: year, quarter, month, week, day, hour, minute, second.
+
+ Recognized datetime expressions:
+
+ - An :class:`Arrow ` object.
+ - A ``datetime`` object.
+
+ **NOTE**: Unlike Python's ``range``, ``end`` will *always* be included in the returned
+ iterator of timespans.
+
+ Usage:
+
+ >>> start = datetime(2013, 5, 5, 12, 30)
+ >>> end = datetime(2013, 5, 5, 17, 15)
+ >>> for r in arrow.Arrow.span_range('hour', start, end):
+ ... print(r)
+ ...
+ (, )
+ (, )
+ (, )
+ (, )
+ (, )
+ (, )
+
+ """
+
+ tzinfo = cls._get_tzinfo(start.tzinfo if tz is None else tz)
+ start = cls.fromdatetime(start, tzinfo).span(frame)[0]
+ _range = cls.range(frame, start, end, tz, limit)
+ return (r.span(frame, bounds=bounds) for r in _range)
+
+ @classmethod
+ def interval(cls, frame, start, end, interval=1, tz=None, bounds="[)"):
+ """Returns an iterator of tuples, each :class:`Arrow ` objects,
+ representing a series of intervals between two inputs.
+
+ :param frame: The timeframe. Can be any ``datetime`` property (day, hour, minute...).
+ :param start: A datetime expression, the start of the range.
+ :param end: (optional) A datetime expression, the end of the range.
+ :param interval: (optional) Time interval for the given time frame.
+ :param tz: (optional) A timezone expression. Defaults to UTC.
+ :param bounds: (optional) a ``str`` of either '()', '(]', '[)', or '[]' that specifies
+ whether to include or exclude the start and end values in the intervals. '(' excludes
+ the start, '[' includes the start, ')' excludes the end, and ']' includes the end.
+ If the bounds are not specified, the default bound '[)' is used.
+
+ Supported frame values: year, quarter, month, week, day, hour, minute, second
+
+ Recognized datetime expressions:
+
+ - An :class:`Arrow ` object.
+ - A ``datetime`` object.
+
+ Recognized timezone expressions:
+
+ - A ``tzinfo`` object.
+ - A ``str`` describing a timezone, similar to 'US/Pacific', or 'Europe/Berlin'.
+ - A ``str`` in ISO 8601 style, as in '+07:00'.
+ - A ``str``, one of the following: 'local', 'utc', 'UTC'.
+
+ Usage:
+
+ >>> start = datetime(2013, 5, 5, 12, 30)
+ >>> end = datetime(2013, 5, 5, 17, 15)
+ >>> for r in arrow.Arrow.interval('hour', start, end, 2):
+ ... print r
+ ...
+ (, )
+ (, )
+ (, )
+ """
+ if interval < 1:
+ raise ValueError("interval has to be a positive integer")
+
+ spanRange = iter(cls.span_range(frame, start, end, tz, bounds=bounds))
+ while True:
+ try:
+ intvlStart, intvlEnd = next(spanRange)
+ for _ in range(interval - 1):
+ _, intvlEnd = next(spanRange)
+ yield intvlStart, intvlEnd
+ except StopIteration:
+ return
+
+ # representations
+
+ def __repr__(self):
+ return "<{} [{}]>".format(self.__class__.__name__, self.__str__())
+
+ def __str__(self):
+ return self._datetime.isoformat()
+
+ def __format__(self, formatstr):
+
+ if len(formatstr) > 0:
+ return self.format(formatstr)
+
+ return str(self)
+
+ def __hash__(self):
+ return self._datetime.__hash__()
+
+ # attributes and properties
+
+ def __getattr__(self, name):
+
+ if name == "week":
+ return self.isocalendar()[1]
+
+ if name == "quarter":
+ return int((self.month - 1) / self._MONTHS_PER_QUARTER) + 1
+
+ if not name.startswith("_"):
+ value = getattr(self._datetime, name, None)
+
+ if value is not None:
+ return value
+
+ return object.__getattribute__(self, name)
+
+ @property
+ def tzinfo(self):
+ """Gets the ``tzinfo`` of the :class:`Arrow ` object.
+
+ Usage::
+
+ >>> arw=arrow.utcnow()
+ >>> arw.tzinfo
+ tzutc()
+
+ """
+
+ return self._datetime.tzinfo
+
+ @tzinfo.setter
+ def tzinfo(self, tzinfo):
+ """ Sets the ``tzinfo`` of the :class:`Arrow ` object. """
+
+ self._datetime = self._datetime.replace(tzinfo=tzinfo)
+
+ @property
+ def datetime(self):
+ """Returns a datetime representation of the :class:`Arrow ` object.
+
+ Usage::
+
+ >>> arw=arrow.utcnow()
+ >>> arw.datetime
+ datetime.datetime(2019, 1, 24, 16, 35, 27, 276649, tzinfo=tzutc())
+
+ """
+
+ return self._datetime
+
+ @property
+ def naive(self):
+ """Returns a naive datetime representation of the :class:`Arrow `
+ object.
+
+ Usage::
+
+ >>> nairobi = arrow.now('Africa/Nairobi')
+ >>> nairobi
+
+ >>> nairobi.naive
+ datetime.datetime(2019, 1, 23, 19, 27, 12, 297999)
+
+ """
+
+ return self._datetime.replace(tzinfo=None)
+
+ @property
+ def timestamp(self):
+ """Returns a timestamp representation of the :class:`Arrow ` object, in
+ UTC time.
+
+ Usage::
+
+ >>> arrow.utcnow().timestamp
+ 1548260567
+
+ """
+
+ warnings.warn(
+ "For compatibility with the datetime.timestamp() method this property will be replaced with a method in "
+ "the 1.0.0 release, please switch to the .int_timestamp property for identical behaviour as soon as "
+ "possible.",
+ DeprecationWarning,
+ )
+ return calendar.timegm(self._datetime.utctimetuple())
+
+ @property
+ def int_timestamp(self):
+ """Returns a timestamp representation of the :class:`Arrow ` object, in
+ UTC time.
+
+ Usage::
+
+ >>> arrow.utcnow().int_timestamp
+ 1548260567
+
+ """
+
+ return calendar.timegm(self._datetime.utctimetuple())
+
+ @property
+ def float_timestamp(self):
+ """Returns a floating-point representation of the :class:`Arrow `
+ object, in UTC time.
+
+ Usage::
+
+ >>> arrow.utcnow().float_timestamp
+ 1548260516.830896
+
+ """
+
+ # IDEA get rid of this in 1.0.0 and wrap datetime.timestamp()
+ # Or for compatibility retain this but make it call the timestamp method
+ with warnings.catch_warnings():
+ warnings.simplefilter("ignore", DeprecationWarning)
+ return self.timestamp + float(self.microsecond) / 1000000
+
+ @property
+ def fold(self):
+ """ Returns the ``fold`` value of the :class:`Arrow ` object. """
+
+ # in python < 3.6 _datetime will be a _DatetimeWithFold if fold=1 and a datetime with no fold attribute
+ # otherwise, so we need to return zero to cover the latter case
+ return getattr(self._datetime, "fold", 0)
+
+ @property
+ def ambiguous(self):
+ """ Returns a boolean indicating whether the :class:`Arrow ` object is ambiguous."""
+
+ return dateutil_tz.datetime_ambiguous(self._datetime)
+
+ @property
+ def imaginary(self):
+ """Indicates whether the :class: `Arrow ` object exists in the current timezone."""
+
+ return not dateutil_tz.datetime_exists(self._datetime)
+
+ # mutation and duplication.
+
+ def clone(self):
+ """Returns a new :class:`Arrow ` object, cloned from the current one.
+
+ Usage:
+
+ >>> arw = arrow.utcnow()
+ >>> cloned = arw.clone()
+
+ """
+
+ return self.fromdatetime(self._datetime)
+
+ def replace(self, **kwargs):
+ """Returns a new :class:`Arrow ` object with attributes updated
+ according to inputs.
+
+ Use property names to set their value absolutely::
+
+ >>> import arrow
+ >>> arw = arrow.utcnow()
+ >>> arw
+
+ >>> arw.replace(year=2014, month=6)
+
+
+ You can also replace the timezone without conversion, using a
+ :ref:`timezone expression `::
+
+ >>> arw.replace(tzinfo=tz.tzlocal())
+
+
+ """
+
+ absolute_kwargs = {}
+
+ for key, value in kwargs.items():
+
+ if key in self._ATTRS:
+ absolute_kwargs[key] = value
+ elif key in ["week", "quarter"]:
+ raise AttributeError("setting absolute {} is not supported".format(key))
+ elif key not in ["tzinfo", "fold"]:
+ raise AttributeError('unknown attribute: "{}"'.format(key))
+
+ current = self._datetime.replace(**absolute_kwargs)
+
+ tzinfo = kwargs.get("tzinfo")
+
+ if tzinfo is not None:
+ tzinfo = self._get_tzinfo(tzinfo)
+ current = current.replace(tzinfo=tzinfo)
+
+ fold = kwargs.get("fold")
+
+ # TODO revisit this once we drop support for 2.7/3.5
+ if fold is not None:
+ current = dateutil_tz.enfold(current, fold=fold)
+
+ return self.fromdatetime(current)
+
+ def shift(self, **kwargs):
+ """Returns a new :class:`Arrow ` object with attributes updated
+ according to inputs.
+
+ Use pluralized property names to relatively shift their current value:
+
+ >>> import arrow
+ >>> arw = arrow.utcnow()
+ >>> arw
+
+ >>> arw.shift(years=1, months=-1)
+
+
+ Day-of-the-week relative shifting can use either Python's weekday numbers
+ (Monday = 0, Tuesday = 1 .. Sunday = 6) or using dateutil.relativedelta's
+ day instances (MO, TU .. SU). When using weekday numbers, the returned
+ date will always be greater than or equal to the starting date.
+
+ Using the above code (which is a Saturday) and asking it to shift to Saturday:
+
+ >>> arw.shift(weekday=5)
+
+
+ While asking for a Monday:
+
+ >>> arw.shift(weekday=0)
+
+
+ """
+
+ relative_kwargs = {}
+ additional_attrs = ["weeks", "quarters", "weekday"]
+
+ for key, value in kwargs.items():
+
+ if key in self._ATTRS_PLURAL or key in additional_attrs:
+ relative_kwargs[key] = value
+ else:
+ raise AttributeError(
+ "Invalid shift time frame. Please select one of the following: {}.".format(
+ ", ".join(self._ATTRS_PLURAL + additional_attrs)
+ )
+ )
+
+ # core datetime does not support quarters, translate to months.
+ relative_kwargs.setdefault("months", 0)
+ relative_kwargs["months"] += (
+ relative_kwargs.pop("quarters", 0) * self._MONTHS_PER_QUARTER
+ )
+
+ current = self._datetime + relativedelta(**relative_kwargs)
+
+ if not dateutil_tz.datetime_exists(current):
+ current = dateutil_tz.resolve_imaginary(current)
+
+ return self.fromdatetime(current)
+
+ def to(self, tz):
+ """Returns a new :class:`Arrow ` object, converted
+ to the target timezone.
+
+ :param tz: A :ref:`timezone expression `.
+
+ Usage::
+
+ >>> utc = arrow.utcnow()
+ >>> utc
+
+
+ >>> utc.to('US/Pacific')
+
+
+ >>> utc.to(tz.tzlocal())
+
+
+ >>> utc.to('-07:00')
+
+
+ >>> utc.to('local')
+
+
+ >>> utc.to('local').to('utc')
+
+
+ """
+
+ if not isinstance(tz, dt_tzinfo):
+ tz = parser.TzinfoParser.parse(tz)
+
+ dt = self._datetime.astimezone(tz)
+
+ return self.__class__(
+ dt.year,
+ dt.month,
+ dt.day,
+ dt.hour,
+ dt.minute,
+ dt.second,
+ dt.microsecond,
+ dt.tzinfo,
+ fold=getattr(dt, "fold", 0),
+ )
+
+ # string output and formatting
+
+ def format(self, fmt="YYYY-MM-DD HH:mm:ssZZ", locale="en_us"):
+ """Returns a string representation of the :class:`Arrow ` object,
+ formatted according to a format string.
+
+ :param fmt: the format string.
+
+ Usage::
+
+ >>> arrow.utcnow().format('YYYY-MM-DD HH:mm:ss ZZ')
+ '2013-05-09 03:56:47 -00:00'
+
+ >>> arrow.utcnow().format('X')
+ '1368071882'
+
+ >>> arrow.utcnow().format('MMMM DD, YYYY')
+ 'May 09, 2013'
+
+ >>> arrow.utcnow().format()
+ '2013-05-09 03:56:47 -00:00'
+
+ """
+
+ return formatter.DateTimeFormatter(locale).format(self._datetime, fmt)
+
+ def humanize(
+ self, other=None, locale="en_us", only_distance=False, granularity="auto"
+ ):
+ """Returns a localized, humanized representation of a relative difference in time.
+
+ :param other: (optional) an :class:`Arrow ` or ``datetime`` object.
+ Defaults to now in the current :class:`Arrow ` object's timezone.
+ :param locale: (optional) a ``str`` specifying a locale. Defaults to 'en_us'.
+ :param only_distance: (optional) returns only time difference eg: "11 seconds" without "in" or "ago" part.
+ :param granularity: (optional) defines the precision of the output. Set it to strings 'second', 'minute',
+ 'hour', 'day', 'week', 'month' or 'year' or a list of any combination of these strings
+
+ Usage::
+
+ >>> earlier = arrow.utcnow().shift(hours=-2)
+ >>> earlier.humanize()
+ '2 hours ago'
+
+ >>> later = earlier.shift(hours=4)
+ >>> later.humanize(earlier)
+ 'in 4 hours'
+
+ """
+
+ locale_name = locale
+ locale = locales.get_locale(locale)
+
+ if other is None:
+ utc = datetime.utcnow().replace(tzinfo=dateutil_tz.tzutc())
+ dt = utc.astimezone(self._datetime.tzinfo)
+
+ elif isinstance(other, Arrow):
+ dt = other._datetime
+
+ elif isinstance(other, datetime):
+ if other.tzinfo is None:
+ dt = other.replace(tzinfo=self._datetime.tzinfo)
+ else:
+ dt = other.astimezone(self._datetime.tzinfo)
+
+ else:
+ raise TypeError(
+ "Invalid 'other' argument of type '{}'. "
+ "Argument must be of type None, Arrow, or datetime.".format(
+ type(other).__name__
+ )
+ )
+
+ if isinstance(granularity, list) and len(granularity) == 1:
+ granularity = granularity[0]
+
+ delta = int(round(util.total_seconds(self._datetime - dt)))
+ sign = -1 if delta < 0 else 1
+ diff = abs(delta)
+ delta = diff
+
+ try:
+ if granularity == "auto":
+ if diff < 10:
+ return locale.describe("now", only_distance=only_distance)
+
+ if diff < 45:
+ seconds = sign * delta
+ return locale.describe(
+ "seconds", seconds, only_distance=only_distance
+ )
+
+ elif diff < 90:
+ return locale.describe("minute", sign, only_distance=only_distance)
+ elif diff < 2700:
+ minutes = sign * int(max(delta / 60, 2))
+ return locale.describe(
+ "minutes", minutes, only_distance=only_distance
+ )
+
+ elif diff < 5400:
+ return locale.describe("hour", sign, only_distance=only_distance)
+ elif diff < 79200:
+ hours = sign * int(max(delta / 3600, 2))
+ return locale.describe("hours", hours, only_distance=only_distance)
+
+ # anything less than 48 hours should be 1 day
+ elif diff < 172800:
+ return locale.describe("day", sign, only_distance=only_distance)
+ elif diff < 554400:
+ days = sign * int(max(delta / 86400, 2))
+ return locale.describe("days", days, only_distance=only_distance)
+
+ elif diff < 907200:
+ return locale.describe("week", sign, only_distance=only_distance)
+ elif diff < 2419200:
+ weeks = sign * int(max(delta / 604800, 2))
+ return locale.describe("weeks", weeks, only_distance=only_distance)
+
+ elif diff < 3888000:
+ return locale.describe("month", sign, only_distance=only_distance)
+ elif diff < 29808000:
+ self_months = self._datetime.year * 12 + self._datetime.month
+ other_months = dt.year * 12 + dt.month
+
+ months = sign * int(max(abs(other_months - self_months), 2))
+
+ return locale.describe(
+ "months", months, only_distance=only_distance
+ )
+
+ elif diff < 47260800:
+ return locale.describe("year", sign, only_distance=only_distance)
+ else:
+ years = sign * int(max(delta / 31536000, 2))
+ return locale.describe("years", years, only_distance=only_distance)
+
+ elif util.isstr(granularity):
+ if granularity == "second":
+ delta = sign * delta
+ if abs(delta) < 2:
+ return locale.describe("now", only_distance=only_distance)
+ elif granularity == "minute":
+ delta = sign * delta / self._SECS_PER_MINUTE
+ elif granularity == "hour":
+ delta = sign * delta / self._SECS_PER_HOUR
+ elif granularity == "day":
+ delta = sign * delta / self._SECS_PER_DAY
+ elif granularity == "week":
+ delta = sign * delta / self._SECS_PER_WEEK
+ elif granularity == "month":
+ delta = sign * delta / self._SECS_PER_MONTH
+ elif granularity == "year":
+ delta = sign * delta / self._SECS_PER_YEAR
+ else:
+ raise AttributeError(
+ "Invalid level of granularity. Please select between 'second', 'minute', 'hour', 'day', 'week', 'month' or 'year'"
+ )
+
+ if trunc(abs(delta)) != 1:
+ granularity += "s"
+ return locale.describe(granularity, delta, only_distance=only_distance)
+
+ else:
+ timeframes = []
+ if "year" in granularity:
+ years = sign * delta / self._SECS_PER_YEAR
+ delta %= self._SECS_PER_YEAR
+ timeframes.append(["year", years])
+
+ if "month" in granularity:
+ months = sign * delta / self._SECS_PER_MONTH
+ delta %= self._SECS_PER_MONTH
+ timeframes.append(["month", months])
+
+ if "week" in granularity:
+ weeks = sign * delta / self._SECS_PER_WEEK
+ delta %= self._SECS_PER_WEEK
+ timeframes.append(["week", weeks])
+
+ if "day" in granularity:
+ days = sign * delta / self._SECS_PER_DAY
+ delta %= self._SECS_PER_DAY
+ timeframes.append(["day", days])
+
+ if "hour" in granularity:
+ hours = sign * delta / self._SECS_PER_HOUR
+ delta %= self._SECS_PER_HOUR
+ timeframes.append(["hour", hours])
+
+ if "minute" in granularity:
+ minutes = sign * delta / self._SECS_PER_MINUTE
+ delta %= self._SECS_PER_MINUTE
+ timeframes.append(["minute", minutes])
+
+ if "second" in granularity:
+ seconds = sign * delta
+ timeframes.append(["second", seconds])
+
+ if len(timeframes) < len(granularity):
+ raise AttributeError(
+ "Invalid level of granularity. "
+ "Please select between 'second', 'minute', 'hour', 'day', 'week', 'month' or 'year'."
+ )
+
+ for tf in timeframes:
+ # Make granularity plural if the delta is not equal to 1
+ if trunc(abs(tf[1])) != 1:
+ tf[0] += "s"
+ return locale.describe_multi(timeframes, only_distance=only_distance)
+
+ except KeyError as e:
+ raise ValueError(
+ "Humanization of the {} granularity is not currently translated in the '{}' locale. "
+ "Please consider making a contribution to this locale.".format(
+ e, locale_name
+ )
+ )
+
+ # query functions
+
+ def is_between(self, start, end, bounds="()"):
+ """Returns a boolean denoting whether the specified date and time is between
+ the start and end dates and times.
+
+ :param start: an :class:`Arrow ` object.
+ :param end: an :class:`Arrow ` object.
+ :param bounds: (optional) a ``str`` of either '()', '(]', '[)', or '[]' that specifies
+ whether to include or exclude the start and end values in the range. '(' excludes
+ the start, '[' includes the start, ')' excludes the end, and ']' includes the end.
+ If the bounds are not specified, the default bound '()' is used.
+
+ Usage::
+
+ >>> start = arrow.get(datetime(2013, 5, 5, 12, 30, 10))
+ >>> end = arrow.get(datetime(2013, 5, 5, 12, 30, 36))
+ >>> arrow.get(datetime(2013, 5, 5, 12, 30, 27)).is_between(start, end)
+ True
+
+ >>> start = arrow.get(datetime(2013, 5, 5))
+ >>> end = arrow.get(datetime(2013, 5, 8))
+ >>> arrow.get(datetime(2013, 5, 8)).is_between(start, end, '[]')
+ True
+
+ >>> start = arrow.get(datetime(2013, 5, 5))
+ >>> end = arrow.get(datetime(2013, 5, 8))
+ >>> arrow.get(datetime(2013, 5, 8)).is_between(start, end, '[)')
+ False
+
+ """
+
+ util.validate_bounds(bounds)
+
+ if not isinstance(start, Arrow):
+ raise TypeError(
+ "Can't parse start date argument type of '{}'".format(type(start))
+ )
+
+ if not isinstance(end, Arrow):
+ raise TypeError(
+ "Can't parse end date argument type of '{}'".format(type(end))
+ )
+
+ include_start = bounds[0] == "["
+ include_end = bounds[1] == "]"
+
+ target_timestamp = self.float_timestamp
+ start_timestamp = start.float_timestamp
+ end_timestamp = end.float_timestamp
+
+ if include_start and include_end:
+ return (
+ target_timestamp >= start_timestamp
+ and target_timestamp <= end_timestamp
+ )
+ elif include_start and not include_end:
+ return (
+ target_timestamp >= start_timestamp and target_timestamp < end_timestamp
+ )
+ elif not include_start and include_end:
+ return (
+ target_timestamp > start_timestamp and target_timestamp <= end_timestamp
+ )
+ else:
+ return (
+ target_timestamp > start_timestamp and target_timestamp < end_timestamp
+ )
+
+ # datetime methods
+
+ def date(self):
+ """Returns a ``date`` object with the same year, month and day.
+
+ Usage::
+
+ >>> arrow.utcnow().date()
+ datetime.date(2019, 1, 23)
+
+ """
+
+ return self._datetime.date()
+
+ def time(self):
+ """Returns a ``time`` object with the same hour, minute, second, microsecond.
+
+ Usage::
+
+ >>> arrow.utcnow().time()
+ datetime.time(12, 15, 34, 68352)
+
+ """
+
+ return self._datetime.time()
+
+ def timetz(self):
+ """Returns a ``time`` object with the same hour, minute, second, microsecond and
+ tzinfo.
+
+ Usage::
+
+ >>> arrow.utcnow().timetz()
+ datetime.time(12, 5, 18, 298893, tzinfo=tzutc())
+
+ """
+
+ return self._datetime.timetz()
+
+ def astimezone(self, tz):
+ """Returns a ``datetime`` object, converted to the specified timezone.
+
+ :param tz: a ``tzinfo`` object.
+
+ Usage::
+
+ >>> pacific=arrow.now('US/Pacific')
+ >>> nyc=arrow.now('America/New_York').tzinfo
+ >>> pacific.astimezone(nyc)
+ datetime.datetime(2019, 1, 20, 10, 24, 22, 328172, tzinfo=tzfile('/usr/share/zoneinfo/America/New_York'))
+
+ """
+
+ return self._datetime.astimezone(tz)
+
+ def utcoffset(self):
+ """Returns a ``timedelta`` object representing the whole number of minutes difference from
+ UTC time.
+
+ Usage::
+
+ >>> arrow.now('US/Pacific').utcoffset()
+ datetime.timedelta(-1, 57600)
+
+ """
+
+ return self._datetime.utcoffset()
+
+ def dst(self):
+ """Returns the daylight savings time adjustment.
+
+ Usage::
+
+ >>> arrow.utcnow().dst()
+ datetime.timedelta(0)
+
+ """
+
+ return self._datetime.dst()
+
+ def timetuple(self):
+ """Returns a ``time.struct_time``, in the current timezone.
+
+ Usage::
+
+ >>> arrow.utcnow().timetuple()
+ time.struct_time(tm_year=2019, tm_mon=1, tm_mday=20, tm_hour=15, tm_min=17, tm_sec=8, tm_wday=6, tm_yday=20, tm_isdst=0)
+
+ """
+
+ return self._datetime.timetuple()
+
+ def utctimetuple(self):
+ """Returns a ``time.struct_time``, in UTC time.
+
+ Usage::
+
+ >>> arrow.utcnow().utctimetuple()
+ time.struct_time(tm_year=2019, tm_mon=1, tm_mday=19, tm_hour=21, tm_min=41, tm_sec=7, tm_wday=5, tm_yday=19, tm_isdst=0)
+
+ """
+
+ return self._datetime.utctimetuple()
+
+ def toordinal(self):
+ """Returns the proleptic Gregorian ordinal of the date.
+
+ Usage::
+
+ >>> arrow.utcnow().toordinal()
+ 737078
+
+ """
+
+ return self._datetime.toordinal()
+
+ def weekday(self):
+ """Returns the day of the week as an integer (0-6).
+
+ Usage::
+
+ >>> arrow.utcnow().weekday()
+ 5
+
+ """
+
+ return self._datetime.weekday()
+
+ def isoweekday(self):
+ """Returns the ISO day of the week as an integer (1-7).
+
+ Usage::
+
+ >>> arrow.utcnow().isoweekday()
+ 6
+
+ """
+
+ return self._datetime.isoweekday()
+
+ def isocalendar(self):
+ """Returns a 3-tuple, (ISO year, ISO week number, ISO weekday).
+
+ Usage::
+
+ >>> arrow.utcnow().isocalendar()
+ (2019, 3, 6)
+
+ """
+
+ return self._datetime.isocalendar()
+
+ def isoformat(self, sep="T"):
+ """Returns an ISO 8601 formatted representation of the date and time.
+
+ Usage::
+
+ >>> arrow.utcnow().isoformat()
+ '2019-01-19T18:30:52.442118+00:00'
+
+ """
+
+ return self._datetime.isoformat(sep)
+
+ def ctime(self):
+ """Returns a ctime formatted representation of the date and time.
+
+ Usage::
+
+ >>> arrow.utcnow().ctime()
+ 'Sat Jan 19 18:26:50 2019'
+
+ """
+
+ return self._datetime.ctime()
+
+ def strftime(self, format):
+ """Formats in the style of ``datetime.strftime``.
+
+ :param format: the format string.
+
+ Usage::
+
+ >>> arrow.utcnow().strftime('%d-%m-%Y %H:%M:%S')
+ '23-01-2019 12:28:17'
+
+ """
+
+ return self._datetime.strftime(format)
+
+ def for_json(self):
+ """Serializes for the ``for_json`` protocol of simplejson.
+
+ Usage::
+
+ >>> arrow.utcnow().for_json()
+ '2019-01-19T18:25:36.760079+00:00'
+
+ """
+
+ return self.isoformat()
+
+ # math
+
+ def __add__(self, other):
+
+ if isinstance(other, (timedelta, relativedelta)):
+ return self.fromdatetime(self._datetime + other, self._datetime.tzinfo)
+
+ return NotImplemented
+
+ def __radd__(self, other):
+ return self.__add__(other)
+
+ def __sub__(self, other):
+
+ if isinstance(other, (timedelta, relativedelta)):
+ return self.fromdatetime(self._datetime - other, self._datetime.tzinfo)
+
+ elif isinstance(other, datetime):
+ return self._datetime - other
+
+ elif isinstance(other, Arrow):
+ return self._datetime - other._datetime
+
+ return NotImplemented
+
+ def __rsub__(self, other):
+
+ if isinstance(other, datetime):
+ return other - self._datetime
+
+ return NotImplemented
+
+ # comparisons
+
+ def __eq__(self, other):
+
+ if not isinstance(other, (Arrow, datetime)):
+ return False
+
+ return self._datetime == self._get_datetime(other)
+
+ def __ne__(self, other):
+
+ if not isinstance(other, (Arrow, datetime)):
+ return True
+
+ return not self.__eq__(other)
+
+ def __gt__(self, other):
+
+ if not isinstance(other, (Arrow, datetime)):
+ return NotImplemented
+
+ return self._datetime > self._get_datetime(other)
+
+ def __ge__(self, other):
+
+ if not isinstance(other, (Arrow, datetime)):
+ return NotImplemented
+
+ return self._datetime >= self._get_datetime(other)
+
+ def __lt__(self, other):
+
+ if not isinstance(other, (Arrow, datetime)):
+ return NotImplemented
+
+ return self._datetime < self._get_datetime(other)
+
+ def __le__(self, other):
+
+ if not isinstance(other, (Arrow, datetime)):
+ return NotImplemented
+
+ return self._datetime <= self._get_datetime(other)
+
+ def __cmp__(self, other):
+ if sys.version_info[0] < 3: # pragma: no cover
+ if not isinstance(other, (Arrow, datetime)):
+ raise TypeError(
+ "can't compare '{}' to '{}'".format(type(self), type(other))
+ )
+
+ # internal methods
+
+ @staticmethod
+ def _get_tzinfo(tz_expr):
+
+ if tz_expr is None:
+ return dateutil_tz.tzutc()
+ if isinstance(tz_expr, dt_tzinfo):
+ return tz_expr
+ else:
+ try:
+ return parser.TzinfoParser.parse(tz_expr)
+ except parser.ParserError:
+ raise ValueError("'{}' not recognized as a timezone".format(tz_expr))
+
+ @classmethod
+ def _get_datetime(cls, expr):
+ """Get datetime object for a specified expression."""
+ if isinstance(expr, Arrow):
+ return expr.datetime
+ elif isinstance(expr, datetime):
+ return expr
+ elif util.is_timestamp(expr):
+ timestamp = float(expr)
+ return cls.utcfromtimestamp(timestamp).datetime
+ else:
+ raise ValueError(
+ "'{}' not recognized as a datetime or timestamp.".format(expr)
+ )
+
+ @classmethod
+ def _get_frames(cls, name):
+
+ if name in cls._ATTRS:
+ return name, "{}s".format(name), 1
+ elif name[-1] == "s" and name[:-1] in cls._ATTRS:
+ return name[:-1], name, 1
+ elif name in ["week", "weeks"]:
+ return "week", "weeks", 1
+ elif name in ["quarter", "quarters"]:
+ return "quarter", "months", 3
+
+ supported = ", ".join(
+ [
+ "year(s)",
+ "month(s)",
+ "day(s)",
+ "hour(s)",
+ "minute(s)",
+ "second(s)",
+ "microsecond(s)",
+ "week(s)",
+ "quarter(s)",
+ ]
+ )
+ raise AttributeError(
+ "range/span over frame {} not supported. Supported frames: {}".format(
+ name, supported
+ )
+ )
+
+ @classmethod
+ def _get_iteration_params(cls, end, limit):
+
+ if end is None:
+
+ if limit is None:
+ raise ValueError("one of 'end' or 'limit' is required")
+
+ return cls.max, limit
+
+ else:
+ if limit is None:
+ return end, sys.maxsize
+ return end, limit
+
+ @staticmethod
+ def _is_last_day_of_month(date):
+ return date.day == calendar.monthrange(date.year, date.month)[1]
+
+
+Arrow.min = Arrow.fromdatetime(datetime.min)
+Arrow.max = Arrow.fromdatetime(datetime.max)
diff --git a/openpype/modules/ftrack/python2_vendor/arrow/arrow/constants.py b/openpype/modules/ftrack/python2_vendor/arrow/arrow/constants.py
new file mode 100644
index 0000000000..81e37b26de
--- /dev/null
+++ b/openpype/modules/ftrack/python2_vendor/arrow/arrow/constants.py
@@ -0,0 +1,9 @@
+# -*- coding: utf-8 -*-
+
+# Output of time.mktime(datetime.max.timetuple()) on macOS
+# This value must be hardcoded for compatibility with Windows
+# Platform-independent max timestamps are hard to form
+# https://stackoverflow.com/q/46133223
+MAX_TIMESTAMP = 253402318799.0
+MAX_TIMESTAMP_MS = MAX_TIMESTAMP * 1000
+MAX_TIMESTAMP_US = MAX_TIMESTAMP * 1000000
diff --git a/openpype/modules/ftrack/python2_vendor/arrow/arrow/factory.py b/openpype/modules/ftrack/python2_vendor/arrow/arrow/factory.py
new file mode 100644
index 0000000000..05933e8151
--- /dev/null
+++ b/openpype/modules/ftrack/python2_vendor/arrow/arrow/factory.py
@@ -0,0 +1,301 @@
+# -*- coding: utf-8 -*-
+"""
+Implements the :class:`ArrowFactory ` class,
+providing factory methods for common :class:`Arrow `
+construction scenarios.
+
+"""
+
+from __future__ import absolute_import
+
+import calendar
+from datetime import date, datetime
+from datetime import tzinfo as dt_tzinfo
+from time import struct_time
+
+from dateutil import tz as dateutil_tz
+
+from arrow import parser
+from arrow.arrow import Arrow
+from arrow.util import is_timestamp, iso_to_gregorian, isstr
+
+
+class ArrowFactory(object):
+ """A factory for generating :class:`Arrow ` objects.
+
+ :param type: (optional) the :class:`Arrow `-based class to construct from.
+ Defaults to :class:`Arrow `.
+
+ """
+
+ def __init__(self, type=Arrow):
+ self.type = type
+
+ def get(self, *args, **kwargs):
+ """Returns an :class:`Arrow ` object based on flexible inputs.
+
+ :param locale: (optional) a ``str`` specifying a locale for the parser. Defaults to 'en_us'.
+ :param tzinfo: (optional) a :ref:`timezone expression ` or tzinfo object.
+ Replaces the timezone unless using an input form that is explicitly UTC or specifies
+ the timezone in a positional argument. Defaults to UTC.
+ :param normalize_whitespace: (optional) a ``bool`` specifying whether or not to normalize
+ redundant whitespace (spaces, tabs, and newlines) in a datetime string before parsing.
+ Defaults to false.
+
+ Usage::
+
+ >>> import arrow
+
+ **No inputs** to get current UTC time::
+
+ >>> arrow.get()
+
+
+ **None** to also get current UTC time::
+
+ >>> arrow.get(None)
+
+
+ **One** :class:`Arrow ` object, to get a copy.
+
+ >>> arw = arrow.utcnow()
+ >>> arrow.get(arw)
+
+
+ **One** ``float`` or ``int``, convertible to a floating-point timestamp, to get
+ that timestamp in UTC::
+
+ >>> arrow.get(1367992474.293378)
+
+
+ >>> arrow.get(1367992474)
+
+
+ **One** ISO 8601-formatted ``str``, to parse it::
+
+ >>> arrow.get('2013-09-29T01:26:43.830580')
+
+
+ **One** ISO 8601-formatted ``str``, in basic format, to parse it::
+
+ >>> arrow.get('20160413T133656.456289')
+
+
+ **One** ``tzinfo``, to get the current time **converted** to that timezone::
+
+ >>> arrow.get(tz.tzlocal())
+
+
+ **One** naive ``datetime``, to get that datetime in UTC::
+
+ >>> arrow.get(datetime(2013, 5, 5))
+
+
+ **One** aware ``datetime``, to get that datetime::
+
+ >>> arrow.get(datetime(2013, 5, 5, tzinfo=tz.tzlocal()))
+
+
+ **One** naive ``date``, to get that date in UTC::
+
+ >>> arrow.get(date(2013, 5, 5))
+
+
+ **One** time.struct time::
+
+ >>> arrow.get(gmtime(0))
+
+
+ **One** iso calendar ``tuple``, to get that week date in UTC::
+
+ >>> arrow.get((2013, 18, 7))
+
+
+ **Two** arguments, a naive or aware ``datetime``, and a replacement
+ :ref:`timezone expression `::
+
+ >>> arrow.get(datetime(2013, 5, 5), 'US/Pacific')
+
+
+ **Two** arguments, a naive ``date``, and a replacement
+ :ref:`timezone expression `::
+
+ >>> arrow.get(date(2013, 5, 5), 'US/Pacific')
+
+
+ **Two** arguments, both ``str``, to parse the first according to the format of the second::
+
+ >>> arrow.get('2013-05-05 12:30:45 America/Chicago', 'YYYY-MM-DD HH:mm:ss ZZZ')
+
+
+ **Two** arguments, first a ``str`` to parse and second a ``list`` of formats to try::
+
+ >>> arrow.get('2013-05-05 12:30:45', ['MM/DD/YYYY', 'YYYY-MM-DD HH:mm:ss'])
+
+
+ **Three or more** arguments, as for the constructor of a ``datetime``::
+
+ >>> arrow.get(2013, 5, 5, 12, 30, 45)
+
+
+ """
+
+ arg_count = len(args)
+ locale = kwargs.pop("locale", "en_us")
+ tz = kwargs.get("tzinfo", None)
+ normalize_whitespace = kwargs.pop("normalize_whitespace", False)
+
+ # if kwargs given, send to constructor unless only tzinfo provided
+ if len(kwargs) > 1:
+ arg_count = 3
+
+ # tzinfo kwarg is not provided
+ if len(kwargs) == 1 and tz is None:
+ arg_count = 3
+
+ # () -> now, @ utc.
+ if arg_count == 0:
+ if isstr(tz):
+ tz = parser.TzinfoParser.parse(tz)
+ return self.type.now(tz)
+
+ if isinstance(tz, dt_tzinfo):
+ return self.type.now(tz)
+
+ return self.type.utcnow()
+
+ if arg_count == 1:
+ arg = args[0]
+
+ # (None) -> now, @ utc.
+ if arg is None:
+ return self.type.utcnow()
+
+ # try (int, float) -> from timestamp with tz
+ elif not isstr(arg) and is_timestamp(arg):
+ if tz is None:
+ # set to UTC by default
+ tz = dateutil_tz.tzutc()
+ return self.type.fromtimestamp(arg, tzinfo=tz)
+
+ # (Arrow) -> from the object's datetime.
+ elif isinstance(arg, Arrow):
+ return self.type.fromdatetime(arg.datetime)
+
+ # (datetime) -> from datetime.
+ elif isinstance(arg, datetime):
+ return self.type.fromdatetime(arg)
+
+ # (date) -> from date.
+ elif isinstance(arg, date):
+ return self.type.fromdate(arg)
+
+ # (tzinfo) -> now, @ tzinfo.
+ elif isinstance(arg, dt_tzinfo):
+ return self.type.now(arg)
+
+ # (str) -> parse.
+ elif isstr(arg):
+ dt = parser.DateTimeParser(locale).parse_iso(arg, normalize_whitespace)
+ return self.type.fromdatetime(dt, tz)
+
+ # (struct_time) -> from struct_time
+ elif isinstance(arg, struct_time):
+ return self.type.utcfromtimestamp(calendar.timegm(arg))
+
+ # (iso calendar) -> convert then from date
+ elif isinstance(arg, tuple) and len(arg) == 3:
+ dt = iso_to_gregorian(*arg)
+ return self.type.fromdate(dt)
+
+ else:
+ raise TypeError(
+ "Can't parse single argument of type '{}'".format(type(arg))
+ )
+
+ elif arg_count == 2:
+
+ arg_1, arg_2 = args[0], args[1]
+
+ if isinstance(arg_1, datetime):
+
+ # (datetime, tzinfo/str) -> fromdatetime replace tzinfo.
+ if isinstance(arg_2, dt_tzinfo) or isstr(arg_2):
+ return self.type.fromdatetime(arg_1, arg_2)
+ else:
+ raise TypeError(
+ "Can't parse two arguments of types 'datetime', '{}'".format(
+ type(arg_2)
+ )
+ )
+
+ elif isinstance(arg_1, date):
+
+ # (date, tzinfo/str) -> fromdate replace tzinfo.
+ if isinstance(arg_2, dt_tzinfo) or isstr(arg_2):
+ return self.type.fromdate(arg_1, tzinfo=arg_2)
+ else:
+ raise TypeError(
+ "Can't parse two arguments of types 'date', '{}'".format(
+ type(arg_2)
+ )
+ )
+
+ # (str, format) -> parse.
+ elif isstr(arg_1) and (isstr(arg_2) or isinstance(arg_2, list)):
+ dt = parser.DateTimeParser(locale).parse(
+ args[0], args[1], normalize_whitespace
+ )
+ return self.type.fromdatetime(dt, tzinfo=tz)
+
+ else:
+ raise TypeError(
+ "Can't parse two arguments of types '{}' and '{}'".format(
+ type(arg_1), type(arg_2)
+ )
+ )
+
+ # 3+ args -> datetime-like via constructor.
+ else:
+ return self.type(*args, **kwargs)
+
+ def utcnow(self):
+ """Returns an :class:`Arrow ` object, representing "now" in UTC time.
+
+ Usage::
+
+ >>> import arrow
+ >>> arrow.utcnow()
+
+ """
+
+ return self.type.utcnow()
+
+ def now(self, tz=None):
+ """Returns an :class:`Arrow ` object, representing "now" in the given
+ timezone.
+
+ :param tz: (optional) A :ref:`timezone expression `. Defaults to local time.
+
+ Usage::
+
+ >>> import arrow
+ >>> arrow.now()
+
+
+ >>> arrow.now('US/Pacific')
+
+
+ >>> arrow.now('+02:00')
+
+
+ >>> arrow.now('local')
+
+ """
+
+ if tz is None:
+ tz = dateutil_tz.tzlocal()
+ elif not isinstance(tz, dt_tzinfo):
+ tz = parser.TzinfoParser.parse(tz)
+
+ return self.type.now(tz)
diff --git a/openpype/modules/ftrack/python2_vendor/arrow/arrow/formatter.py b/openpype/modules/ftrack/python2_vendor/arrow/arrow/formatter.py
new file mode 100644
index 0000000000..9f9d7a44da
--- /dev/null
+++ b/openpype/modules/ftrack/python2_vendor/arrow/arrow/formatter.py
@@ -0,0 +1,139 @@
+# -*- coding: utf-8 -*-
+from __future__ import absolute_import, division
+
+import calendar
+import re
+
+from dateutil import tz as dateutil_tz
+
+from arrow import locales, util
+
+FORMAT_ATOM = "YYYY-MM-DD HH:mm:ssZZ"
+FORMAT_COOKIE = "dddd, DD-MMM-YYYY HH:mm:ss ZZZ"
+FORMAT_RFC822 = "ddd, DD MMM YY HH:mm:ss Z"
+FORMAT_RFC850 = "dddd, DD-MMM-YY HH:mm:ss ZZZ"
+FORMAT_RFC1036 = "ddd, DD MMM YY HH:mm:ss Z"
+FORMAT_RFC1123 = "ddd, DD MMM YYYY HH:mm:ss Z"
+FORMAT_RFC2822 = "ddd, DD MMM YYYY HH:mm:ss Z"
+FORMAT_RFC3339 = "YYYY-MM-DD HH:mm:ssZZ"
+FORMAT_RSS = "ddd, DD MMM YYYY HH:mm:ss Z"
+FORMAT_W3C = "YYYY-MM-DD HH:mm:ssZZ"
+
+
+class DateTimeFormatter(object):
+
+ # This pattern matches characters enclosed in square brackets are matched as
+ # an atomic group. For more info on atomic groups and how to they are
+ # emulated in Python's re library, see https://stackoverflow.com/a/13577411/2701578
+
+ _FORMAT_RE = re.compile(
+ r"(\[(?:(?=(?P[^]]))(?P=literal))*\]|YYY?Y?|MM?M?M?|Do|DD?D?D?|d?dd?d?|HH?|hh?|mm?|ss?|SS?S?S?S?S?|ZZ?Z?|a|A|X|x|W)"
+ )
+
+ def __init__(self, locale="en_us"):
+
+ self.locale = locales.get_locale(locale)
+
+ def format(cls, dt, fmt):
+
+ return cls._FORMAT_RE.sub(lambda m: cls._format_token(dt, m.group(0)), fmt)
+
+ def _format_token(self, dt, token):
+
+ if token and token.startswith("[") and token.endswith("]"):
+ return token[1:-1]
+
+ if token == "YYYY":
+ return self.locale.year_full(dt.year)
+ if token == "YY":
+ return self.locale.year_abbreviation(dt.year)
+
+ if token == "MMMM":
+ return self.locale.month_name(dt.month)
+ if token == "MMM":
+ return self.locale.month_abbreviation(dt.month)
+ if token == "MM":
+ return "{:02d}".format(dt.month)
+ if token == "M":
+ return str(dt.month)
+
+ if token == "DDDD":
+ return "{:03d}".format(dt.timetuple().tm_yday)
+ if token == "DDD":
+ return str(dt.timetuple().tm_yday)
+ if token == "DD":
+ return "{:02d}".format(dt.day)
+ if token == "D":
+ return str(dt.day)
+
+ if token == "Do":
+ return self.locale.ordinal_number(dt.day)
+
+ if token == "dddd":
+ return self.locale.day_name(dt.isoweekday())
+ if token == "ddd":
+ return self.locale.day_abbreviation(dt.isoweekday())
+ if token == "d":
+ return str(dt.isoweekday())
+
+ if token == "HH":
+ return "{:02d}".format(dt.hour)
+ if token == "H":
+ return str(dt.hour)
+ if token == "hh":
+ return "{:02d}".format(dt.hour if 0 < dt.hour < 13 else abs(dt.hour - 12))
+ if token == "h":
+ return str(dt.hour if 0 < dt.hour < 13 else abs(dt.hour - 12))
+
+ if token == "mm":
+ return "{:02d}".format(dt.minute)
+ if token == "m":
+ return str(dt.minute)
+
+ if token == "ss":
+ return "{:02d}".format(dt.second)
+ if token == "s":
+ return str(dt.second)
+
+ if token == "SSSSSS":
+ return str("{:06d}".format(int(dt.microsecond)))
+ if token == "SSSSS":
+ return str("{:05d}".format(int(dt.microsecond / 10)))
+ if token == "SSSS":
+ return str("{:04d}".format(int(dt.microsecond / 100)))
+ if token == "SSS":
+ return str("{:03d}".format(int(dt.microsecond / 1000)))
+ if token == "SS":
+ return str("{:02d}".format(int(dt.microsecond / 10000)))
+ if token == "S":
+ return str(int(dt.microsecond / 100000))
+
+ if token == "X":
+ # TODO: replace with a call to dt.timestamp() when we drop Python 2.7
+ return str(calendar.timegm(dt.utctimetuple()))
+
+ if token == "x":
+ # TODO: replace with a call to dt.timestamp() when we drop Python 2.7
+ ts = calendar.timegm(dt.utctimetuple()) + (dt.microsecond / 1000000)
+ return str(int(ts * 1000000))
+
+ if token == "ZZZ":
+ return dt.tzname()
+
+ if token in ["ZZ", "Z"]:
+ separator = ":" if token == "ZZ" else ""
+ tz = dateutil_tz.tzutc() if dt.tzinfo is None else dt.tzinfo
+ total_minutes = int(util.total_seconds(tz.utcoffset(dt)) / 60)
+
+ sign = "+" if total_minutes >= 0 else "-"
+ total_minutes = abs(total_minutes)
+ hour, minute = divmod(total_minutes, 60)
+
+ return "{}{:02d}{}{:02d}".format(sign, hour, separator, minute)
+
+ if token in ("a", "A"):
+ return self.locale.meridian(dt.hour, token)
+
+ if token == "W":
+ year, week, day = dt.isocalendar()
+ return "{}-W{:02d}-{}".format(year, week, day)
diff --git a/openpype/modules/ftrack/python2_vendor/arrow/arrow/locales.py b/openpype/modules/ftrack/python2_vendor/arrow/arrow/locales.py
new file mode 100644
index 0000000000..6833da5a78
--- /dev/null
+++ b/openpype/modules/ftrack/python2_vendor/arrow/arrow/locales.py
@@ -0,0 +1,4267 @@
+# -*- coding: utf-8 -*-
+from __future__ import absolute_import, unicode_literals
+
+import inspect
+import sys
+from math import trunc
+
+
+def get_locale(name):
+ """Returns an appropriate :class:`Locale `
+ corresponding to an inpute locale name.
+
+ :param name: the name of the locale.
+
+ """
+
+ locale_cls = _locales.get(name.lower())
+
+ if locale_cls is None:
+ raise ValueError("Unsupported locale '{}'".format(name))
+
+ return locale_cls()
+
+
+def get_locale_by_class_name(name):
+ """Returns an appropriate :class:`Locale `
+ corresponding to an locale class name.
+
+ :param name: the name of the locale class.
+
+ """
+ locale_cls = globals().get(name)
+
+ if locale_cls is None:
+ raise ValueError("Unsupported locale '{}'".format(name))
+
+ return locale_cls()
+
+
+# base locale type.
+
+
+class Locale(object):
+ """ Represents locale-specific data and functionality. """
+
+ names = []
+
+ timeframes = {
+ "now": "",
+ "second": "",
+ "seconds": "",
+ "minute": "",
+ "minutes": "",
+ "hour": "",
+ "hours": "",
+ "day": "",
+ "days": "",
+ "week": "",
+ "weeks": "",
+ "month": "",
+ "months": "",
+ "year": "",
+ "years": "",
+ }
+
+ meridians = {"am": "", "pm": "", "AM": "", "PM": ""}
+
+ past = None
+ future = None
+ and_word = None
+
+ month_names = []
+ month_abbreviations = []
+
+ day_names = []
+ day_abbreviations = []
+
+ ordinal_day_re = r"(\d+)"
+
+ def __init__(self):
+
+ self._month_name_to_ordinal = None
+
+ def describe(self, timeframe, delta=0, only_distance=False):
+ """Describes a delta within a timeframe in plain language.
+
+ :param timeframe: a string representing a timeframe.
+ :param delta: a quantity representing a delta in a timeframe.
+ :param only_distance: return only distance eg: "11 seconds" without "in" or "ago" keywords
+ """
+
+ humanized = self._format_timeframe(timeframe, delta)
+ if not only_distance:
+ humanized = self._format_relative(humanized, timeframe, delta)
+
+ return humanized
+
+ def describe_multi(self, timeframes, only_distance=False):
+ """Describes a delta within multiple timeframes in plain language.
+
+ :param timeframes: a list of string, quantity pairs each representing a timeframe and delta.
+ :param only_distance: return only distance eg: "2 hours and 11 seconds" without "in" or "ago" keywords
+ """
+
+ humanized = ""
+ for index, (timeframe, delta) in enumerate(timeframes):
+ humanized += self._format_timeframe(timeframe, delta)
+ if index == len(timeframes) - 2 and self.and_word:
+ humanized += " " + self.and_word + " "
+ elif index < len(timeframes) - 1:
+ humanized += " "
+
+ if not only_distance:
+ humanized = self._format_relative(humanized, timeframe, delta)
+
+ return humanized
+
+ def day_name(self, day):
+ """Returns the day name for a specified day of the week.
+
+ :param day: the ``int`` day of the week (1-7).
+
+ """
+
+ return self.day_names[day]
+
+ def day_abbreviation(self, day):
+ """Returns the day abbreviation for a specified day of the week.
+
+ :param day: the ``int`` day of the week (1-7).
+
+ """
+
+ return self.day_abbreviations[day]
+
+ def month_name(self, month):
+ """Returns the month name for a specified month of the year.
+
+ :param month: the ``int`` month of the year (1-12).
+
+ """
+
+ return self.month_names[month]
+
+ def month_abbreviation(self, month):
+ """Returns the month abbreviation for a specified month of the year.
+
+ :param month: the ``int`` month of the year (1-12).
+
+ """
+
+ return self.month_abbreviations[month]
+
+ def month_number(self, name):
+ """Returns the month number for a month specified by name or abbreviation.
+
+ :param name: the month name or abbreviation.
+
+ """
+
+ if self._month_name_to_ordinal is None:
+ self._month_name_to_ordinal = self._name_to_ordinal(self.month_names)
+ self._month_name_to_ordinal.update(
+ self._name_to_ordinal(self.month_abbreviations)
+ )
+
+ return self._month_name_to_ordinal.get(name)
+
+ def year_full(self, year):
+ """Returns the year for specific locale if available
+
+ :param name: the ``int`` year (4-digit)
+ """
+ return "{:04d}".format(year)
+
+ def year_abbreviation(self, year):
+ """Returns the year for specific locale if available
+
+ :param name: the ``int`` year (4-digit)
+ """
+ return "{:04d}".format(year)[2:]
+
+ def meridian(self, hour, token):
+ """Returns the meridian indicator for a specified hour and format token.
+
+ :param hour: the ``int`` hour of the day.
+ :param token: the format token.
+ """
+
+ if token == "a":
+ return self.meridians["am"] if hour < 12 else self.meridians["pm"]
+ if token == "A":
+ return self.meridians["AM"] if hour < 12 else self.meridians["PM"]
+
+ def ordinal_number(self, n):
+ """Returns the ordinal format of a given integer
+
+ :param n: an integer
+ """
+ return self._ordinal_number(n)
+
+ def _ordinal_number(self, n):
+ return "{}".format(n)
+
+ def _name_to_ordinal(self, lst):
+ return dict(map(lambda i: (i[1].lower(), i[0] + 1), enumerate(lst[1:])))
+
+ def _format_timeframe(self, timeframe, delta):
+ return self.timeframes[timeframe].format(trunc(abs(delta)))
+
+ def _format_relative(self, humanized, timeframe, delta):
+
+ if timeframe == "now":
+ return humanized
+
+ direction = self.past if delta < 0 else self.future
+
+ return direction.format(humanized)
+
+
+# base locale type implementations.
+
+
+class EnglishLocale(Locale):
+
+ names = [
+ "en",
+ "en_us",
+ "en_gb",
+ "en_au",
+ "en_be",
+ "en_jp",
+ "en_za",
+ "en_ca",
+ "en_ph",
+ ]
+
+ past = "{0} ago"
+ future = "in {0}"
+ and_word = "and"
+
+ timeframes = {
+ "now": "just now",
+ "second": "a second",
+ "seconds": "{0} seconds",
+ "minute": "a minute",
+ "minutes": "{0} minutes",
+ "hour": "an hour",
+ "hours": "{0} hours",
+ "day": "a day",
+ "days": "{0} days",
+ "week": "a week",
+ "weeks": "{0} weeks",
+ "month": "a month",
+ "months": "{0} months",
+ "year": "a year",
+ "years": "{0} years",
+ }
+
+ meridians = {"am": "am", "pm": "pm", "AM": "AM", "PM": "PM"}
+
+ month_names = [
+ "",
+ "January",
+ "February",
+ "March",
+ "April",
+ "May",
+ "June",
+ "July",
+ "August",
+ "September",
+ "October",
+ "November",
+ "December",
+ ]
+ month_abbreviations = [
+ "",
+ "Jan",
+ "Feb",
+ "Mar",
+ "Apr",
+ "May",
+ "Jun",
+ "Jul",
+ "Aug",
+ "Sep",
+ "Oct",
+ "Nov",
+ "Dec",
+ ]
+
+ day_names = [
+ "",
+ "Monday",
+ "Tuesday",
+ "Wednesday",
+ "Thursday",
+ "Friday",
+ "Saturday",
+ "Sunday",
+ ]
+ day_abbreviations = ["", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat", "Sun"]
+
+ ordinal_day_re = r"((?P[2-3]?1(?=st)|[2-3]?2(?=nd)|[2-3]?3(?=rd)|[1-3]?[04-9](?=th)|1[1-3](?=th))(st|nd|rd|th))"
+
+ def _ordinal_number(self, n):
+ if n % 100 not in (11, 12, 13):
+ remainder = abs(n) % 10
+ if remainder == 1:
+ return "{}st".format(n)
+ elif remainder == 2:
+ return "{}nd".format(n)
+ elif remainder == 3:
+ return "{}rd".format(n)
+ return "{}th".format(n)
+
+ def describe(self, timeframe, delta=0, only_distance=False):
+ """Describes a delta within a timeframe in plain language.
+
+ :param timeframe: a string representing a timeframe.
+ :param delta: a quantity representing a delta in a timeframe.
+ :param only_distance: return only distance eg: "11 seconds" without "in" or "ago" keywords
+ """
+
+ humanized = super(EnglishLocale, self).describe(timeframe, delta, only_distance)
+ if only_distance and timeframe == "now":
+ humanized = "instantly"
+
+ return humanized
+
+
+class ItalianLocale(Locale):
+ names = ["it", "it_it"]
+ past = "{0} fa"
+ future = "tra {0}"
+ and_word = "e"
+
+ timeframes = {
+ "now": "adesso",
+ "second": "un secondo",
+ "seconds": "{0} qualche secondo",
+ "minute": "un minuto",
+ "minutes": "{0} minuti",
+ "hour": "un'ora",
+ "hours": "{0} ore",
+ "day": "un giorno",
+ "days": "{0} giorni",
+ "week": "una settimana,",
+ "weeks": "{0} settimane",
+ "month": "un mese",
+ "months": "{0} mesi",
+ "year": "un anno",
+ "years": "{0} anni",
+ }
+
+ month_names = [
+ "",
+ "gennaio",
+ "febbraio",
+ "marzo",
+ "aprile",
+ "maggio",
+ "giugno",
+ "luglio",
+ "agosto",
+ "settembre",
+ "ottobre",
+ "novembre",
+ "dicembre",
+ ]
+ month_abbreviations = [
+ "",
+ "gen",
+ "feb",
+ "mar",
+ "apr",
+ "mag",
+ "giu",
+ "lug",
+ "ago",
+ "set",
+ "ott",
+ "nov",
+ "dic",
+ ]
+
+ day_names = [
+ "",
+ "lunedì",
+ "martedì",
+ "mercoledì",
+ "giovedì",
+ "venerdì",
+ "sabato",
+ "domenica",
+ ]
+ day_abbreviations = ["", "lun", "mar", "mer", "gio", "ven", "sab", "dom"]
+
+ ordinal_day_re = r"((?P[1-3]?[0-9](?=[ºª]))[ºª])"
+
+ def _ordinal_number(self, n):
+ return "{}º".format(n)
+
+
+class SpanishLocale(Locale):
+ names = ["es", "es_es"]
+ past = "hace {0}"
+ future = "en {0}"
+ and_word = "y"
+
+ timeframes = {
+ "now": "ahora",
+ "second": "un segundo",
+ "seconds": "{0} segundos",
+ "minute": "un minuto",
+ "minutes": "{0} minutos",
+ "hour": "una hora",
+ "hours": "{0} horas",
+ "day": "un día",
+ "days": "{0} días",
+ "week": "una semana",
+ "weeks": "{0} semanas",
+ "month": "un mes",
+ "months": "{0} meses",
+ "year": "un año",
+ "years": "{0} años",
+ }
+
+ meridians = {"am": "am", "pm": "pm", "AM": "AM", "PM": "PM"}
+
+ month_names = [
+ "",
+ "enero",
+ "febrero",
+ "marzo",
+ "abril",
+ "mayo",
+ "junio",
+ "julio",
+ "agosto",
+ "septiembre",
+ "octubre",
+ "noviembre",
+ "diciembre",
+ ]
+ month_abbreviations = [
+ "",
+ "ene",
+ "feb",
+ "mar",
+ "abr",
+ "may",
+ "jun",
+ "jul",
+ "ago",
+ "sep",
+ "oct",
+ "nov",
+ "dic",
+ ]
+
+ day_names = [
+ "",
+ "lunes",
+ "martes",
+ "miércoles",
+ "jueves",
+ "viernes",
+ "sábado",
+ "domingo",
+ ]
+ day_abbreviations = ["", "lun", "mar", "mie", "jue", "vie", "sab", "dom"]
+
+ ordinal_day_re = r"((?P[1-3]?[0-9](?=[ºª]))[ºª])"
+
+ def _ordinal_number(self, n):
+ return "{}º".format(n)
+
+
+class FrenchBaseLocale(Locale):
+
+ past = "il y a {0}"
+ future = "dans {0}"
+ and_word = "et"
+
+ timeframes = {
+ "now": "maintenant",
+ "second": "une seconde",
+ "seconds": "{0} quelques secondes",
+ "minute": "une minute",
+ "minutes": "{0} minutes",
+ "hour": "une heure",
+ "hours": "{0} heures",
+ "day": "un jour",
+ "days": "{0} jours",
+ "week": "une semaine",
+ "weeks": "{0} semaines",
+ "month": "un mois",
+ "months": "{0} mois",
+ "year": "un an",
+ "years": "{0} ans",
+ }
+
+ month_names = [
+ "",
+ "janvier",
+ "février",
+ "mars",
+ "avril",
+ "mai",
+ "juin",
+ "juillet",
+ "août",
+ "septembre",
+ "octobre",
+ "novembre",
+ "décembre",
+ ]
+
+ day_names = [
+ "",
+ "lundi",
+ "mardi",
+ "mercredi",
+ "jeudi",
+ "vendredi",
+ "samedi",
+ "dimanche",
+ ]
+ day_abbreviations = ["", "lun", "mar", "mer", "jeu", "ven", "sam", "dim"]
+
+ ordinal_day_re = (
+ r"((?P\b1(?=er\b)|[1-3]?[02-9](?=e\b)|[1-3]1(?=e\b))(er|e)\b)"
+ )
+
+ def _ordinal_number(self, n):
+ if abs(n) == 1:
+ return "{}er".format(n)
+ return "{}e".format(n)
+
+
+class FrenchLocale(FrenchBaseLocale, Locale):
+
+ names = ["fr", "fr_fr"]
+
+ month_abbreviations = [
+ "",
+ "janv",
+ "févr",
+ "mars",
+ "avr",
+ "mai",
+ "juin",
+ "juil",
+ "août",
+ "sept",
+ "oct",
+ "nov",
+ "déc",
+ ]
+
+
+class FrenchCanadianLocale(FrenchBaseLocale, Locale):
+
+ names = ["fr_ca"]
+
+ month_abbreviations = [
+ "",
+ "janv",
+ "févr",
+ "mars",
+ "avr",
+ "mai",
+ "juin",
+ "juill",
+ "août",
+ "sept",
+ "oct",
+ "nov",
+ "déc",
+ ]
+
+
+class GreekLocale(Locale):
+
+ names = ["el", "el_gr"]
+
+ past = "{0} πριν"
+ future = "σε {0}"
+ and_word = "και"
+
+ timeframes = {
+ "now": "τώρα",
+ "second": "ένα δεύτερο",
+ "seconds": "{0} δευτερόλεπτα",
+ "minute": "ένα λεπτό",
+ "minutes": "{0} λεπτά",
+ "hour": "μία ώρα",
+ "hours": "{0} ώρες",
+ "day": "μία μέρα",
+ "days": "{0} μέρες",
+ "month": "ένα μήνα",
+ "months": "{0} μήνες",
+ "year": "ένα χρόνο",
+ "years": "{0} χρόνια",
+ }
+
+ month_names = [
+ "",
+ "Ιανουαρίου",
+ "Φεβρουαρίου",
+ "Μαρτίου",
+ "Απριλίου",
+ "Μαΐου",
+ "Ιουνίου",
+ "Ιουλίου",
+ "Αυγούστου",
+ "Σεπτεμβρίου",
+ "Οκτωβρίου",
+ "Νοεμβρίου",
+ "Δεκεμβρίου",
+ ]
+ month_abbreviations = [
+ "",
+ "Ιαν",
+ "Φεβ",
+ "Μαρ",
+ "Απρ",
+ "Μαϊ",
+ "Ιον",
+ "Ιολ",
+ "Αυγ",
+ "Σεπ",
+ "Οκτ",
+ "Νοε",
+ "Δεκ",
+ ]
+
+ day_names = [
+ "",
+ "Δευτέρα",
+ "Τρίτη",
+ "Τετάρτη",
+ "Πέμπτη",
+ "Παρασκευή",
+ "Σάββατο",
+ "Κυριακή",
+ ]
+ day_abbreviations = ["", "Δευ", "Τρι", "Τετ", "Πεμ", "Παρ", "Σαβ", "Κυρ"]
+
+
+class JapaneseLocale(Locale):
+
+ names = ["ja", "ja_jp"]
+
+ past = "{0}前"
+ future = "{0}後"
+
+ timeframes = {
+ "now": "現在",
+ "second": "二番目の",
+ "seconds": "{0}数秒",
+ "minute": "1分",
+ "minutes": "{0}分",
+ "hour": "1時間",
+ "hours": "{0}時間",
+ "day": "1日",
+ "days": "{0}日",
+ "week": "1週間",
+ "weeks": "{0}週間",
+ "month": "1ヶ月",
+ "months": "{0}ヶ月",
+ "year": "1年",
+ "years": "{0}年",
+ }
+
+ month_names = [
+ "",
+ "1月",
+ "2月",
+ "3月",
+ "4月",
+ "5月",
+ "6月",
+ "7月",
+ "8月",
+ "9月",
+ "10月",
+ "11月",
+ "12月",
+ ]
+ month_abbreviations = [
+ "",
+ " 1",
+ " 2",
+ " 3",
+ " 4",
+ " 5",
+ " 6",
+ " 7",
+ " 8",
+ " 9",
+ "10",
+ "11",
+ "12",
+ ]
+
+ day_names = ["", "月曜日", "火曜日", "水曜日", "木曜日", "金曜日", "土曜日", "日曜日"]
+ day_abbreviations = ["", "月", "火", "水", "木", "金", "土", "日"]
+
+
+class SwedishLocale(Locale):
+
+ names = ["sv", "sv_se"]
+
+ past = "för {0} sen"
+ future = "om {0}"
+ and_word = "och"
+
+ timeframes = {
+ "now": "just nu",
+ "second": "en sekund",
+ "seconds": "{0} några sekunder",
+ "minute": "en minut",
+ "minutes": "{0} minuter",
+ "hour": "en timme",
+ "hours": "{0} timmar",
+ "day": "en dag",
+ "days": "{0} dagar",
+ "week": "en vecka",
+ "weeks": "{0} veckor",
+ "month": "en månad",
+ "months": "{0} månader",
+ "year": "ett år",
+ "years": "{0} år",
+ }
+
+ month_names = [
+ "",
+ "januari",
+ "februari",
+ "mars",
+ "april",
+ "maj",
+ "juni",
+ "juli",
+ "augusti",
+ "september",
+ "oktober",
+ "november",
+ "december",
+ ]
+ month_abbreviations = [
+ "",
+ "jan",
+ "feb",
+ "mar",
+ "apr",
+ "maj",
+ "jun",
+ "jul",
+ "aug",
+ "sep",
+ "okt",
+ "nov",
+ "dec",
+ ]
+
+ day_names = [
+ "",
+ "måndag",
+ "tisdag",
+ "onsdag",
+ "torsdag",
+ "fredag",
+ "lördag",
+ "söndag",
+ ]
+ day_abbreviations = ["", "mån", "tis", "ons", "tor", "fre", "lör", "sön"]
+
+
+class FinnishLocale(Locale):
+
+ names = ["fi", "fi_fi"]
+
+ # The finnish grammar is very complex, and its hard to convert
+ # 1-to-1 to something like English.
+
+ past = "{0} sitten"
+ future = "{0} kuluttua"
+
+ timeframes = {
+ "now": ["juuri nyt", "juuri nyt"],
+ "second": ["sekunti", "sekunti"],
+ "seconds": ["{0} muutama sekunti", "{0} muutaman sekunnin"],
+ "minute": ["minuutti", "minuutin"],
+ "minutes": ["{0} minuuttia", "{0} minuutin"],
+ "hour": ["tunti", "tunnin"],
+ "hours": ["{0} tuntia", "{0} tunnin"],
+ "day": ["päivä", "päivä"],
+ "days": ["{0} päivää", "{0} päivän"],
+ "month": ["kuukausi", "kuukauden"],
+ "months": ["{0} kuukautta", "{0} kuukauden"],
+ "year": ["vuosi", "vuoden"],
+ "years": ["{0} vuotta", "{0} vuoden"],
+ }
+
+ # Months and days are lowercase in Finnish
+ month_names = [
+ "",
+ "tammikuu",
+ "helmikuu",
+ "maaliskuu",
+ "huhtikuu",
+ "toukokuu",
+ "kesäkuu",
+ "heinäkuu",
+ "elokuu",
+ "syyskuu",
+ "lokakuu",
+ "marraskuu",
+ "joulukuu",
+ ]
+
+ month_abbreviations = [
+ "",
+ "tammi",
+ "helmi",
+ "maalis",
+ "huhti",
+ "touko",
+ "kesä",
+ "heinä",
+ "elo",
+ "syys",
+ "loka",
+ "marras",
+ "joulu",
+ ]
+
+ day_names = [
+ "",
+ "maanantai",
+ "tiistai",
+ "keskiviikko",
+ "torstai",
+ "perjantai",
+ "lauantai",
+ "sunnuntai",
+ ]
+
+ day_abbreviations = ["", "ma", "ti", "ke", "to", "pe", "la", "su"]
+
+ def _format_timeframe(self, timeframe, delta):
+ return (
+ self.timeframes[timeframe][0].format(abs(delta)),
+ self.timeframes[timeframe][1].format(abs(delta)),
+ )
+
+ def _format_relative(self, humanized, timeframe, delta):
+ if timeframe == "now":
+ return humanized[0]
+
+ direction = self.past if delta < 0 else self.future
+ which = 0 if delta < 0 else 1
+
+ return direction.format(humanized[which])
+
+ def _ordinal_number(self, n):
+ return "{}.".format(n)
+
+
+class ChineseCNLocale(Locale):
+
+ names = ["zh", "zh_cn"]
+
+ past = "{0}前"
+ future = "{0}后"
+
+ timeframes = {
+ "now": "刚才",
+ "second": "一秒",
+ "seconds": "{0}秒",
+ "minute": "1分钟",
+ "minutes": "{0}分钟",
+ "hour": "1小时",
+ "hours": "{0}小时",
+ "day": "1天",
+ "days": "{0}天",
+ "week": "一周",
+ "weeks": "{0}周",
+ "month": "1个月",
+ "months": "{0}个月",
+ "year": "1年",
+ "years": "{0}年",
+ }
+
+ month_names = [
+ "",
+ "一月",
+ "二月",
+ "三月",
+ "四月",
+ "五月",
+ "六月",
+ "七月",
+ "八月",
+ "九月",
+ "十月",
+ "十一月",
+ "十二月",
+ ]
+ month_abbreviations = [
+ "",
+ " 1",
+ " 2",
+ " 3",
+ " 4",
+ " 5",
+ " 6",
+ " 7",
+ " 8",
+ " 9",
+ "10",
+ "11",
+ "12",
+ ]
+
+ day_names = ["", "星期一", "星期二", "星期三", "星期四", "星期五", "星期六", "星期日"]
+ day_abbreviations = ["", "一", "二", "三", "四", "五", "六", "日"]
+
+
+class ChineseTWLocale(Locale):
+
+ names = ["zh_tw"]
+
+ past = "{0}前"
+ future = "{0}後"
+ and_word = "和"
+
+ timeframes = {
+ "now": "剛才",
+ "second": "1秒",
+ "seconds": "{0}秒",
+ "minute": "1分鐘",
+ "minutes": "{0}分鐘",
+ "hour": "1小時",
+ "hours": "{0}小時",
+ "day": "1天",
+ "days": "{0}天",
+ "week": "1週",
+ "weeks": "{0}週",
+ "month": "1個月",
+ "months": "{0}個月",
+ "year": "1年",
+ "years": "{0}年",
+ }
+
+ month_names = [
+ "",
+ "1月",
+ "2月",
+ "3月",
+ "4月",
+ "5月",
+ "6月",
+ "7月",
+ "8月",
+ "9月",
+ "10月",
+ "11月",
+ "12月",
+ ]
+ month_abbreviations = [
+ "",
+ " 1",
+ " 2",
+ " 3",
+ " 4",
+ " 5",
+ " 6",
+ " 7",
+ " 8",
+ " 9",
+ "10",
+ "11",
+ "12",
+ ]
+
+ day_names = ["", "週一", "週二", "週三", "週四", "週五", "週六", "週日"]
+ day_abbreviations = ["", "一", "二", "三", "四", "五", "六", "日"]
+
+
+class HongKongLocale(Locale):
+
+ names = ["zh_hk"]
+
+ past = "{0}前"
+ future = "{0}後"
+
+ timeframes = {
+ "now": "剛才",
+ "second": "1秒",
+ "seconds": "{0}秒",
+ "minute": "1分鐘",
+ "minutes": "{0}分鐘",
+ "hour": "1小時",
+ "hours": "{0}小時",
+ "day": "1天",
+ "days": "{0}天",
+ "week": "1星期",
+ "weeks": "{0}星期",
+ "month": "1個月",
+ "months": "{0}個月",
+ "year": "1年",
+ "years": "{0}年",
+ }
+
+ month_names = [
+ "",
+ "1月",
+ "2月",
+ "3月",
+ "4月",
+ "5月",
+ "6月",
+ "7月",
+ "8月",
+ "9月",
+ "10月",
+ "11月",
+ "12月",
+ ]
+ month_abbreviations = [
+ "",
+ " 1",
+ " 2",
+ " 3",
+ " 4",
+ " 5",
+ " 6",
+ " 7",
+ " 8",
+ " 9",
+ "10",
+ "11",
+ "12",
+ ]
+
+ day_names = ["", "星期一", "星期二", "星期三", "星期四", "星期五", "星期六", "星期日"]
+ day_abbreviations = ["", "一", "二", "三", "四", "五", "六", "日"]
+
+
+class KoreanLocale(Locale):
+
+ names = ["ko", "ko_kr"]
+
+ past = "{0} 전"
+ future = "{0} 후"
+
+ timeframes = {
+ "now": "지금",
+ "second": "1초",
+ "seconds": "{0}초",
+ "minute": "1분",
+ "minutes": "{0}분",
+ "hour": "한시간",
+ "hours": "{0}시간",
+ "day": "하루",
+ "days": "{0}일",
+ "week": "1주",
+ "weeks": "{0}주",
+ "month": "한달",
+ "months": "{0}개월",
+ "year": "1년",
+ "years": "{0}년",
+ }
+
+ special_dayframes = {
+ -3: "그끄제",
+ -2: "그제",
+ -1: "어제",
+ 1: "내일",
+ 2: "모레",
+ 3: "글피",
+ 4: "그글피",
+ }
+
+ special_yearframes = {-2: "제작년", -1: "작년", 1: "내년", 2: "내후년"}
+
+ month_names = [
+ "",
+ "1월",
+ "2월",
+ "3월",
+ "4월",
+ "5월",
+ "6월",
+ "7월",
+ "8월",
+ "9월",
+ "10월",
+ "11월",
+ "12월",
+ ]
+ month_abbreviations = [
+ "",
+ " 1",
+ " 2",
+ " 3",
+ " 4",
+ " 5",
+ " 6",
+ " 7",
+ " 8",
+ " 9",
+ "10",
+ "11",
+ "12",
+ ]
+
+ day_names = ["", "월요일", "화요일", "수요일", "목요일", "금요일", "토요일", "일요일"]
+ day_abbreviations = ["", "월", "화", "수", "목", "금", "토", "일"]
+
+ def _ordinal_number(self, n):
+ ordinals = ["0", "첫", "두", "세", "네", "다섯", "여섯", "일곱", "여덟", "아홉", "열"]
+ if n < len(ordinals):
+ return "{}번째".format(ordinals[n])
+ return "{}번째".format(n)
+
+ def _format_relative(self, humanized, timeframe, delta):
+ if timeframe in ("day", "days"):
+ special = self.special_dayframes.get(delta)
+ if special:
+ return special
+ elif timeframe in ("year", "years"):
+ special = self.special_yearframes.get(delta)
+ if special:
+ return special
+
+ return super(KoreanLocale, self)._format_relative(humanized, timeframe, delta)
+
+
+# derived locale types & implementations.
+class DutchLocale(Locale):
+
+ names = ["nl", "nl_nl"]
+
+ past = "{0} geleden"
+ future = "over {0}"
+
+ timeframes = {
+ "now": "nu",
+ "second": "een seconde",
+ "seconds": "{0} seconden",
+ "minute": "een minuut",
+ "minutes": "{0} minuten",
+ "hour": "een uur",
+ "hours": "{0} uur",
+ "day": "een dag",
+ "days": "{0} dagen",
+ "week": "een week",
+ "weeks": "{0} weken",
+ "month": "een maand",
+ "months": "{0} maanden",
+ "year": "een jaar",
+ "years": "{0} jaar",
+ }
+
+ # In Dutch names of months and days are not starting with a capital letter
+ # like in the English language.
+ month_names = [
+ "",
+ "januari",
+ "februari",
+ "maart",
+ "april",
+ "mei",
+ "juni",
+ "juli",
+ "augustus",
+ "september",
+ "oktober",
+ "november",
+ "december",
+ ]
+ month_abbreviations = [
+ "",
+ "jan",
+ "feb",
+ "mrt",
+ "apr",
+ "mei",
+ "jun",
+ "jul",
+ "aug",
+ "sep",
+ "okt",
+ "nov",
+ "dec",
+ ]
+
+ day_names = [
+ "",
+ "maandag",
+ "dinsdag",
+ "woensdag",
+ "donderdag",
+ "vrijdag",
+ "zaterdag",
+ "zondag",
+ ]
+ day_abbreviations = ["", "ma", "di", "wo", "do", "vr", "za", "zo"]
+
+
+class SlavicBaseLocale(Locale):
+ def _format_timeframe(self, timeframe, delta):
+
+ form = self.timeframes[timeframe]
+ delta = abs(delta)
+
+ if isinstance(form, list):
+
+ if delta % 10 == 1 and delta % 100 != 11:
+ form = form[0]
+ elif 2 <= delta % 10 <= 4 and (delta % 100 < 10 or delta % 100 >= 20):
+ form = form[1]
+ else:
+ form = form[2]
+
+ return form.format(delta)
+
+
+class BelarusianLocale(SlavicBaseLocale):
+
+ names = ["be", "be_by"]
+
+ past = "{0} таму"
+ future = "праз {0}"
+
+ timeframes = {
+ "now": "зараз",
+ "second": "секунду",
+ "seconds": "{0} некалькі секунд",
+ "minute": "хвіліну",
+ "minutes": ["{0} хвіліну", "{0} хвіліны", "{0} хвілін"],
+ "hour": "гадзіну",
+ "hours": ["{0} гадзіну", "{0} гадзіны", "{0} гадзін"],
+ "day": "дзень",
+ "days": ["{0} дзень", "{0} дні", "{0} дзён"],
+ "month": "месяц",
+ "months": ["{0} месяц", "{0} месяцы", "{0} месяцаў"],
+ "year": "год",
+ "years": ["{0} год", "{0} гады", "{0} гадоў"],
+ }
+
+ month_names = [
+ "",
+ "студзеня",
+ "лютага",
+ "сакавіка",
+ "красавіка",
+ "траўня",
+ "чэрвеня",
+ "ліпеня",
+ "жніўня",
+ "верасня",
+ "кастрычніка",
+ "лістапада",
+ "снежня",
+ ]
+ month_abbreviations = [
+ "",
+ "студ",
+ "лют",
+ "сак",
+ "крас",
+ "трав",
+ "чэрв",
+ "ліп",
+ "жнів",
+ "вер",
+ "каст",
+ "ліст",
+ "снеж",
+ ]
+
+ day_names = [
+ "",
+ "панядзелак",
+ "аўторак",
+ "серада",
+ "чацвер",
+ "пятніца",
+ "субота",
+ "нядзеля",
+ ]
+ day_abbreviations = ["", "пн", "ат", "ср", "чц", "пт", "сб", "нд"]
+
+
+class PolishLocale(SlavicBaseLocale):
+
+ names = ["pl", "pl_pl"]
+
+ past = "{0} temu"
+ future = "za {0}"
+
+ # The nouns should be in genitive case (Polish: "dopełniacz")
+ # in order to correctly form `past` & `future` expressions.
+ timeframes = {
+ "now": "teraz",
+ "second": "sekundę",
+ "seconds": ["{0} sekund", "{0} sekundy", "{0} sekund"],
+ "minute": "minutę",
+ "minutes": ["{0} minut", "{0} minuty", "{0} minut"],
+ "hour": "godzinę",
+ "hours": ["{0} godzin", "{0} godziny", "{0} godzin"],
+ "day": "dzień",
+ "days": "{0} dni",
+ "week": "tydzień",
+ "weeks": ["{0} tygodni", "{0} tygodnie", "{0} tygodni"],
+ "month": "miesiąc",
+ "months": ["{0} miesięcy", "{0} miesiące", "{0} miesięcy"],
+ "year": "rok",
+ "years": ["{0} lat", "{0} lata", "{0} lat"],
+ }
+
+ month_names = [
+ "",
+ "styczeń",
+ "luty",
+ "marzec",
+ "kwiecień",
+ "maj",
+ "czerwiec",
+ "lipiec",
+ "sierpień",
+ "wrzesień",
+ "październik",
+ "listopad",
+ "grudzień",
+ ]
+ month_abbreviations = [
+ "",
+ "sty",
+ "lut",
+ "mar",
+ "kwi",
+ "maj",
+ "cze",
+ "lip",
+ "sie",
+ "wrz",
+ "paź",
+ "lis",
+ "gru",
+ ]
+
+ day_names = [
+ "",
+ "poniedziałek",
+ "wtorek",
+ "środa",
+ "czwartek",
+ "piątek",
+ "sobota",
+ "niedziela",
+ ]
+ day_abbreviations = ["", "Pn", "Wt", "Śr", "Czw", "Pt", "So", "Nd"]
+
+
+class RussianLocale(SlavicBaseLocale):
+
+ names = ["ru", "ru_ru"]
+
+ past = "{0} назад"
+ future = "через {0}"
+
+ timeframes = {
+ "now": "сейчас",
+ "second": "Второй",
+ "seconds": "{0} несколько секунд",
+ "minute": "минуту",
+ "minutes": ["{0} минуту", "{0} минуты", "{0} минут"],
+ "hour": "час",
+ "hours": ["{0} час", "{0} часа", "{0} часов"],
+ "day": "день",
+ "days": ["{0} день", "{0} дня", "{0} дней"],
+ "week": "неделю",
+ "weeks": ["{0} неделю", "{0} недели", "{0} недель"],
+ "month": "месяц",
+ "months": ["{0} месяц", "{0} месяца", "{0} месяцев"],
+ "year": "год",
+ "years": ["{0} год", "{0} года", "{0} лет"],
+ }
+
+ month_names = [
+ "",
+ "января",
+ "февраля",
+ "марта",
+ "апреля",
+ "мая",
+ "июня",
+ "июля",
+ "августа",
+ "сентября",
+ "октября",
+ "ноября",
+ "декабря",
+ ]
+ month_abbreviations = [
+ "",
+ "янв",
+ "фев",
+ "мар",
+ "апр",
+ "май",
+ "июн",
+ "июл",
+ "авг",
+ "сен",
+ "окт",
+ "ноя",
+ "дек",
+ ]
+
+ day_names = [
+ "",
+ "понедельник",
+ "вторник",
+ "среда",
+ "четверг",
+ "пятница",
+ "суббота",
+ "воскресенье",
+ ]
+ day_abbreviations = ["", "пн", "вт", "ср", "чт", "пт", "сб", "вс"]
+
+
+class AfrikaansLocale(Locale):
+
+ names = ["af", "af_nl"]
+
+ past = "{0} gelede"
+ future = "in {0}"
+
+ timeframes = {
+ "now": "nou",
+ "second": "n sekonde",
+ "seconds": "{0} sekondes",
+ "minute": "minuut",
+ "minutes": "{0} minute",
+ "hour": "uur",
+ "hours": "{0} ure",
+ "day": "een dag",
+ "days": "{0} dae",
+ "month": "een maand",
+ "months": "{0} maande",
+ "year": "een jaar",
+ "years": "{0} jaar",
+ }
+
+ month_names = [
+ "",
+ "Januarie",
+ "Februarie",
+ "Maart",
+ "April",
+ "Mei",
+ "Junie",
+ "Julie",
+ "Augustus",
+ "September",
+ "Oktober",
+ "November",
+ "Desember",
+ ]
+ month_abbreviations = [
+ "",
+ "Jan",
+ "Feb",
+ "Mrt",
+ "Apr",
+ "Mei",
+ "Jun",
+ "Jul",
+ "Aug",
+ "Sep",
+ "Okt",
+ "Nov",
+ "Des",
+ ]
+
+ day_names = [
+ "",
+ "Maandag",
+ "Dinsdag",
+ "Woensdag",
+ "Donderdag",
+ "Vrydag",
+ "Saterdag",
+ "Sondag",
+ ]
+ day_abbreviations = ["", "Ma", "Di", "Wo", "Do", "Vr", "Za", "So"]
+
+
+class BulgarianLocale(SlavicBaseLocale):
+
+ names = ["bg", "bg_BG"]
+
+ past = "{0} назад"
+ future = "напред {0}"
+
+ timeframes = {
+ "now": "сега",
+ "second": "секунда",
+ "seconds": "{0} няколко секунди",
+ "minute": "минута",
+ "minutes": ["{0} минута", "{0} минути", "{0} минути"],
+ "hour": "час",
+ "hours": ["{0} час", "{0} часа", "{0} часа"],
+ "day": "ден",
+ "days": ["{0} ден", "{0} дни", "{0} дни"],
+ "month": "месец",
+ "months": ["{0} месец", "{0} месеца", "{0} месеца"],
+ "year": "година",
+ "years": ["{0} година", "{0} години", "{0} години"],
+ }
+
+ month_names = [
+ "",
+ "януари",
+ "февруари",
+ "март",
+ "април",
+ "май",
+ "юни",
+ "юли",
+ "август",
+ "септември",
+ "октомври",
+ "ноември",
+ "декември",
+ ]
+ month_abbreviations = [
+ "",
+ "ян",
+ "февр",
+ "март",
+ "апр",
+ "май",
+ "юни",
+ "юли",
+ "авг",
+ "септ",
+ "окт",
+ "ноем",
+ "дек",
+ ]
+
+ day_names = [
+ "",
+ "понеделник",
+ "вторник",
+ "сряда",
+ "четвъртък",
+ "петък",
+ "събота",
+ "неделя",
+ ]
+ day_abbreviations = ["", "пон", "вт", "ср", "четв", "пет", "съб", "нед"]
+
+
+class UkrainianLocale(SlavicBaseLocale):
+
+ names = ["ua", "uk_ua"]
+
+ past = "{0} тому"
+ future = "за {0}"
+
+ timeframes = {
+ "now": "зараз",
+ "second": "секунда",
+ "seconds": "{0} кілька секунд",
+ "minute": "хвилину",
+ "minutes": ["{0} хвилину", "{0} хвилини", "{0} хвилин"],
+ "hour": "годину",
+ "hours": ["{0} годину", "{0} години", "{0} годин"],
+ "day": "день",
+ "days": ["{0} день", "{0} дні", "{0} днів"],
+ "month": "місяць",
+ "months": ["{0} місяць", "{0} місяці", "{0} місяців"],
+ "year": "рік",
+ "years": ["{0} рік", "{0} роки", "{0} років"],
+ }
+
+ month_names = [
+ "",
+ "січня",
+ "лютого",
+ "березня",
+ "квітня",
+ "травня",
+ "червня",
+ "липня",
+ "серпня",
+ "вересня",
+ "жовтня",
+ "листопада",
+ "грудня",
+ ]
+ month_abbreviations = [
+ "",
+ "січ",
+ "лют",
+ "бер",
+ "квіт",
+ "трав",
+ "черв",
+ "лип",
+ "серп",
+ "вер",
+ "жовт",
+ "лист",
+ "груд",
+ ]
+
+ day_names = [
+ "",
+ "понеділок",
+ "вівторок",
+ "середа",
+ "четвер",
+ "п’ятниця",
+ "субота",
+ "неділя",
+ ]
+ day_abbreviations = ["", "пн", "вт", "ср", "чт", "пт", "сб", "нд"]
+
+
+class MacedonianLocale(SlavicBaseLocale):
+ names = ["mk", "mk_mk"]
+
+ past = "пред {0}"
+ future = "за {0}"
+
+ timeframes = {
+ "now": "сега",
+ "second": "една секунда",
+ "seconds": ["{0} секунда", "{0} секунди", "{0} секунди"],
+ "minute": "една минута",
+ "minutes": ["{0} минута", "{0} минути", "{0} минути"],
+ "hour": "еден саат",
+ "hours": ["{0} саат", "{0} саати", "{0} саати"],
+ "day": "еден ден",
+ "days": ["{0} ден", "{0} дена", "{0} дена"],
+ "week": "една недела",
+ "weeks": ["{0} недела", "{0} недели", "{0} недели"],
+ "month": "еден месец",
+ "months": ["{0} месец", "{0} месеци", "{0} месеци"],
+ "year": "една година",
+ "years": ["{0} година", "{0} години", "{0} години"],
+ }
+
+ meridians = {"am": "дп", "pm": "пп", "AM": "претпладне", "PM": "попладне"}
+
+ month_names = [
+ "",
+ "Јануари",
+ "Февруари",
+ "Март",
+ "Април",
+ "Мај",
+ "Јуни",
+ "Јули",
+ "Август",
+ "Септември",
+ "Октомври",
+ "Ноември",
+ "Декември",
+ ]
+ month_abbreviations = [
+ "",
+ "Јан",
+ "Фев",
+ "Мар",
+ "Апр",
+ "Мај",
+ "Јун",
+ "Јул",
+ "Авг",
+ "Септ",
+ "Окт",
+ "Ноем",
+ "Декем",
+ ]
+
+ day_names = [
+ "",
+ "Понеделник",
+ "Вторник",
+ "Среда",
+ "Четврток",
+ "Петок",
+ "Сабота",
+ "Недела",
+ ]
+ day_abbreviations = [
+ "",
+ "Пон",
+ "Вт",
+ "Сре",
+ "Чет",
+ "Пет",
+ "Саб",
+ "Нед",
+ ]
+
+
+class GermanBaseLocale(Locale):
+
+ past = "vor {0}"
+ future = "in {0}"
+ and_word = "und"
+
+ timeframes = {
+ "now": "gerade eben",
+ "second": "eine Sekunde",
+ "seconds": "{0} Sekunden",
+ "minute": "einer Minute",
+ "minutes": "{0} Minuten",
+ "hour": "einer Stunde",
+ "hours": "{0} Stunden",
+ "day": "einem Tag",
+ "days": "{0} Tagen",
+ "week": "einer Woche",
+ "weeks": "{0} Wochen",
+ "month": "einem Monat",
+ "months": "{0} Monaten",
+ "year": "einem Jahr",
+ "years": "{0} Jahren",
+ }
+
+ timeframes_only_distance = timeframes.copy()
+ timeframes_only_distance["minute"] = "eine Minute"
+ timeframes_only_distance["hour"] = "eine Stunde"
+ timeframes_only_distance["day"] = "ein Tag"
+ timeframes_only_distance["week"] = "eine Woche"
+ timeframes_only_distance["month"] = "ein Monat"
+ timeframes_only_distance["year"] = "ein Jahr"
+
+ month_names = [
+ "",
+ "Januar",
+ "Februar",
+ "März",
+ "April",
+ "Mai",
+ "Juni",
+ "Juli",
+ "August",
+ "September",
+ "Oktober",
+ "November",
+ "Dezember",
+ ]
+
+ month_abbreviations = [
+ "",
+ "Jan",
+ "Feb",
+ "Mär",
+ "Apr",
+ "Mai",
+ "Jun",
+ "Jul",
+ "Aug",
+ "Sep",
+ "Okt",
+ "Nov",
+ "Dez",
+ ]
+
+ day_names = [
+ "",
+ "Montag",
+ "Dienstag",
+ "Mittwoch",
+ "Donnerstag",
+ "Freitag",
+ "Samstag",
+ "Sonntag",
+ ]
+
+ day_abbreviations = ["", "Mo", "Di", "Mi", "Do", "Fr", "Sa", "So"]
+
+ def _ordinal_number(self, n):
+ return "{}.".format(n)
+
+ def describe(self, timeframe, delta=0, only_distance=False):
+ """Describes a delta within a timeframe in plain language.
+
+ :param timeframe: a string representing a timeframe.
+ :param delta: a quantity representing a delta in a timeframe.
+ :param only_distance: return only distance eg: "11 seconds" without "in" or "ago" keywords
+ """
+
+ if not only_distance:
+ return super(GermanBaseLocale, self).describe(
+ timeframe, delta, only_distance
+ )
+
+ # German uses a different case without 'in' or 'ago'
+ humanized = self.timeframes_only_distance[timeframe].format(trunc(abs(delta)))
+
+ return humanized
+
+
+class GermanLocale(GermanBaseLocale, Locale):
+
+ names = ["de", "de_de"]
+
+
+class SwissLocale(GermanBaseLocale, Locale):
+
+ names = ["de_ch"]
+
+
+class AustrianLocale(GermanBaseLocale, Locale):
+
+ names = ["de_at"]
+
+ month_names = [
+ "",
+ "Jänner",
+ "Februar",
+ "März",
+ "April",
+ "Mai",
+ "Juni",
+ "Juli",
+ "August",
+ "September",
+ "Oktober",
+ "November",
+ "Dezember",
+ ]
+
+
+class NorwegianLocale(Locale):
+
+ names = ["nb", "nb_no"]
+
+ past = "for {0} siden"
+ future = "om {0}"
+
+ timeframes = {
+ "now": "nå nettopp",
+ "second": "et sekund",
+ "seconds": "{0} noen sekunder",
+ "minute": "ett minutt",
+ "minutes": "{0} minutter",
+ "hour": "en time",
+ "hours": "{0} timer",
+ "day": "en dag",
+ "days": "{0} dager",
+ "month": "en måned",
+ "months": "{0} måneder",
+ "year": "ett år",
+ "years": "{0} år",
+ }
+
+ month_names = [
+ "",
+ "januar",
+ "februar",
+ "mars",
+ "april",
+ "mai",
+ "juni",
+ "juli",
+ "august",
+ "september",
+ "oktober",
+ "november",
+ "desember",
+ ]
+ month_abbreviations = [
+ "",
+ "jan",
+ "feb",
+ "mar",
+ "apr",
+ "mai",
+ "jun",
+ "jul",
+ "aug",
+ "sep",
+ "okt",
+ "nov",
+ "des",
+ ]
+
+ day_names = [
+ "",
+ "mandag",
+ "tirsdag",
+ "onsdag",
+ "torsdag",
+ "fredag",
+ "lørdag",
+ "søndag",
+ ]
+ day_abbreviations = ["", "ma", "ti", "on", "to", "fr", "lø", "sø"]
+
+
+class NewNorwegianLocale(Locale):
+
+ names = ["nn", "nn_no"]
+
+ past = "for {0} sidan"
+ future = "om {0}"
+
+ timeframes = {
+ "now": "no nettopp",
+ "second": "et sekund",
+ "seconds": "{0} nokre sekund",
+ "minute": "ett minutt",
+ "minutes": "{0} minutt",
+ "hour": "ein time",
+ "hours": "{0} timar",
+ "day": "ein dag",
+ "days": "{0} dagar",
+ "month": "en månad",
+ "months": "{0} månader",
+ "year": "eit år",
+ "years": "{0} år",
+ }
+
+ month_names = [
+ "",
+ "januar",
+ "februar",
+ "mars",
+ "april",
+ "mai",
+ "juni",
+ "juli",
+ "august",
+ "september",
+ "oktober",
+ "november",
+ "desember",
+ ]
+ month_abbreviations = [
+ "",
+ "jan",
+ "feb",
+ "mar",
+ "apr",
+ "mai",
+ "jun",
+ "jul",
+ "aug",
+ "sep",
+ "okt",
+ "nov",
+ "des",
+ ]
+
+ day_names = [
+ "",
+ "måndag",
+ "tysdag",
+ "onsdag",
+ "torsdag",
+ "fredag",
+ "laurdag",
+ "sundag",
+ ]
+ day_abbreviations = ["", "må", "ty", "on", "to", "fr", "la", "su"]
+
+
+class PortugueseLocale(Locale):
+ names = ["pt", "pt_pt"]
+
+ past = "há {0}"
+ future = "em {0}"
+ and_word = "e"
+
+ timeframes = {
+ "now": "agora",
+ "second": "um segundo",
+ "seconds": "{0} segundos",
+ "minute": "um minuto",
+ "minutes": "{0} minutos",
+ "hour": "uma hora",
+ "hours": "{0} horas",
+ "day": "um dia",
+ "days": "{0} dias",
+ "week": "uma semana",
+ "weeks": "{0} semanas",
+ "month": "um mês",
+ "months": "{0} meses",
+ "year": "um ano",
+ "years": "{0} anos",
+ }
+
+ month_names = [
+ "",
+ "Janeiro",
+ "Fevereiro",
+ "Março",
+ "Abril",
+ "Maio",
+ "Junho",
+ "Julho",
+ "Agosto",
+ "Setembro",
+ "Outubro",
+ "Novembro",
+ "Dezembro",
+ ]
+ month_abbreviations = [
+ "",
+ "Jan",
+ "Fev",
+ "Mar",
+ "Abr",
+ "Mai",
+ "Jun",
+ "Jul",
+ "Ago",
+ "Set",
+ "Out",
+ "Nov",
+ "Dez",
+ ]
+
+ day_names = [
+ "",
+ "Segunda-feira",
+ "Terça-feira",
+ "Quarta-feira",
+ "Quinta-feira",
+ "Sexta-feira",
+ "Sábado",
+ "Domingo",
+ ]
+ day_abbreviations = ["", "Seg", "Ter", "Qua", "Qui", "Sex", "Sab", "Dom"]
+
+
+class BrazilianPortugueseLocale(PortugueseLocale):
+ names = ["pt_br"]
+
+ past = "faz {0}"
+
+
+class TagalogLocale(Locale):
+
+ names = ["tl", "tl_ph"]
+
+ past = "nakaraang {0}"
+ future = "{0} mula ngayon"
+
+ timeframes = {
+ "now": "ngayon lang",
+ "second": "isang segundo",
+ "seconds": "{0} segundo",
+ "minute": "isang minuto",
+ "minutes": "{0} minuto",
+ "hour": "isang oras",
+ "hours": "{0} oras",
+ "day": "isang araw",
+ "days": "{0} araw",
+ "week": "isang linggo",
+ "weeks": "{0} linggo",
+ "month": "isang buwan",
+ "months": "{0} buwan",
+ "year": "isang taon",
+ "years": "{0} taon",
+ }
+
+ month_names = [
+ "",
+ "Enero",
+ "Pebrero",
+ "Marso",
+ "Abril",
+ "Mayo",
+ "Hunyo",
+ "Hulyo",
+ "Agosto",
+ "Setyembre",
+ "Oktubre",
+ "Nobyembre",
+ "Disyembre",
+ ]
+ month_abbreviations = [
+ "",
+ "Ene",
+ "Peb",
+ "Mar",
+ "Abr",
+ "May",
+ "Hun",
+ "Hul",
+ "Ago",
+ "Set",
+ "Okt",
+ "Nob",
+ "Dis",
+ ]
+
+ day_names = [
+ "",
+ "Lunes",
+ "Martes",
+ "Miyerkules",
+ "Huwebes",
+ "Biyernes",
+ "Sabado",
+ "Linggo",
+ ]
+ day_abbreviations = ["", "Lun", "Mar", "Miy", "Huw", "Biy", "Sab", "Lin"]
+
+ meridians = {"am": "nu", "pm": "nh", "AM": "ng umaga", "PM": "ng hapon"}
+
+ def _ordinal_number(self, n):
+ return "ika-{}".format(n)
+
+
+class VietnameseLocale(Locale):
+
+ names = ["vi", "vi_vn"]
+
+ past = "{0} trước"
+ future = "{0} nữa"
+
+ timeframes = {
+ "now": "hiện tại",
+ "second": "một giây",
+ "seconds": "{0} giây",
+ "minute": "một phút",
+ "minutes": "{0} phút",
+ "hour": "một giờ",
+ "hours": "{0} giờ",
+ "day": "một ngày",
+ "days": "{0} ngày",
+ "week": "một tuần",
+ "weeks": "{0} tuần",
+ "month": "một tháng",
+ "months": "{0} tháng",
+ "year": "một năm",
+ "years": "{0} năm",
+ }
+
+ month_names = [
+ "",
+ "Tháng Một",
+ "Tháng Hai",
+ "Tháng Ba",
+ "Tháng Tư",
+ "Tháng Năm",
+ "Tháng Sáu",
+ "Tháng Bảy",
+ "Tháng Tám",
+ "Tháng Chín",
+ "Tháng Mười",
+ "Tháng Mười Một",
+ "Tháng Mười Hai",
+ ]
+ month_abbreviations = [
+ "",
+ "Tháng 1",
+ "Tháng 2",
+ "Tháng 3",
+ "Tháng 4",
+ "Tháng 5",
+ "Tháng 6",
+ "Tháng 7",
+ "Tháng 8",
+ "Tháng 9",
+ "Tháng 10",
+ "Tháng 11",
+ "Tháng 12",
+ ]
+
+ day_names = [
+ "",
+ "Thứ Hai",
+ "Thứ Ba",
+ "Thứ Tư",
+ "Thứ Năm",
+ "Thứ Sáu",
+ "Thứ Bảy",
+ "Chủ Nhật",
+ ]
+ day_abbreviations = ["", "Thứ 2", "Thứ 3", "Thứ 4", "Thứ 5", "Thứ 6", "Thứ 7", "CN"]
+
+
+class TurkishLocale(Locale):
+
+ names = ["tr", "tr_tr"]
+
+ past = "{0} önce"
+ future = "{0} sonra"
+
+ timeframes = {
+ "now": "şimdi",
+ "second": "bir saniye",
+ "seconds": "{0} saniye",
+ "minute": "bir dakika",
+ "minutes": "{0} dakika",
+ "hour": "bir saat",
+ "hours": "{0} saat",
+ "day": "bir gün",
+ "days": "{0} gün",
+ "month": "bir ay",
+ "months": "{0} ay",
+ "year": "yıl",
+ "years": "{0} yıl",
+ }
+
+ month_names = [
+ "",
+ "Ocak",
+ "Şubat",
+ "Mart",
+ "Nisan",
+ "Mayıs",
+ "Haziran",
+ "Temmuz",
+ "Ağustos",
+ "Eylül",
+ "Ekim",
+ "Kasım",
+ "Aralık",
+ ]
+ month_abbreviations = [
+ "",
+ "Oca",
+ "Şub",
+ "Mar",
+ "Nis",
+ "May",
+ "Haz",
+ "Tem",
+ "Ağu",
+ "Eyl",
+ "Eki",
+ "Kas",
+ "Ara",
+ ]
+
+ day_names = [
+ "",
+ "Pazartesi",
+ "Salı",
+ "Çarşamba",
+ "Perşembe",
+ "Cuma",
+ "Cumartesi",
+ "Pazar",
+ ]
+ day_abbreviations = ["", "Pzt", "Sal", "Çar", "Per", "Cum", "Cmt", "Paz"]
+
+
+class AzerbaijaniLocale(Locale):
+
+ names = ["az", "az_az"]
+
+ past = "{0} əvvəl"
+ future = "{0} sonra"
+
+ timeframes = {
+ "now": "indi",
+ "second": "saniyə",
+ "seconds": "{0} saniyə",
+ "minute": "bir dəqiqə",
+ "minutes": "{0} dəqiqə",
+ "hour": "bir saat",
+ "hours": "{0} saat",
+ "day": "bir gün",
+ "days": "{0} gün",
+ "month": "bir ay",
+ "months": "{0} ay",
+ "year": "il",
+ "years": "{0} il",
+ }
+
+ month_names = [
+ "",
+ "Yanvar",
+ "Fevral",
+ "Mart",
+ "Aprel",
+ "May",
+ "İyun",
+ "İyul",
+ "Avqust",
+ "Sentyabr",
+ "Oktyabr",
+ "Noyabr",
+ "Dekabr",
+ ]
+ month_abbreviations = [
+ "",
+ "Yan",
+ "Fev",
+ "Mar",
+ "Apr",
+ "May",
+ "İyn",
+ "İyl",
+ "Avq",
+ "Sen",
+ "Okt",
+ "Noy",
+ "Dek",
+ ]
+
+ day_names = [
+ "",
+ "Bazar ertəsi",
+ "Çərşənbə axşamı",
+ "Çərşənbə",
+ "Cümə axşamı",
+ "Cümə",
+ "Şənbə",
+ "Bazar",
+ ]
+ day_abbreviations = ["", "Ber", "Çax", "Çər", "Cax", "Cüm", "Şnb", "Bzr"]
+
+
+class ArabicLocale(Locale):
+ names = [
+ "ar",
+ "ar_ae",
+ "ar_bh",
+ "ar_dj",
+ "ar_eg",
+ "ar_eh",
+ "ar_er",
+ "ar_km",
+ "ar_kw",
+ "ar_ly",
+ "ar_om",
+ "ar_qa",
+ "ar_sa",
+ "ar_sd",
+ "ar_so",
+ "ar_ss",
+ "ar_td",
+ "ar_ye",
+ ]
+
+ past = "منذ {0}"
+ future = "خلال {0}"
+
+ timeframes = {
+ "now": "الآن",
+ "second": "ثانية",
+ "seconds": {"double": "ثانيتين", "ten": "{0} ثوان", "higher": "{0} ثانية"},
+ "minute": "دقيقة",
+ "minutes": {"double": "دقيقتين", "ten": "{0} دقائق", "higher": "{0} دقيقة"},
+ "hour": "ساعة",
+ "hours": {"double": "ساعتين", "ten": "{0} ساعات", "higher": "{0} ساعة"},
+ "day": "يوم",
+ "days": {"double": "يومين", "ten": "{0} أيام", "higher": "{0} يوم"},
+ "month": "شهر",
+ "months": {"double": "شهرين", "ten": "{0} أشهر", "higher": "{0} شهر"},
+ "year": "سنة",
+ "years": {"double": "سنتين", "ten": "{0} سنوات", "higher": "{0} سنة"},
+ }
+
+ month_names = [
+ "",
+ "يناير",
+ "فبراير",
+ "مارس",
+ "أبريل",
+ "مايو",
+ "يونيو",
+ "يوليو",
+ "أغسطس",
+ "سبتمبر",
+ "أكتوبر",
+ "نوفمبر",
+ "ديسمبر",
+ ]
+ month_abbreviations = [
+ "",
+ "يناير",
+ "فبراير",
+ "مارس",
+ "أبريل",
+ "مايو",
+ "يونيو",
+ "يوليو",
+ "أغسطس",
+ "سبتمبر",
+ "أكتوبر",
+ "نوفمبر",
+ "ديسمبر",
+ ]
+
+ day_names = [
+ "",
+ "الإثنين",
+ "الثلاثاء",
+ "الأربعاء",
+ "الخميس",
+ "الجمعة",
+ "السبت",
+ "الأحد",
+ ]
+ day_abbreviations = ["", "إثنين", "ثلاثاء", "أربعاء", "خميس", "جمعة", "سبت", "أحد"]
+
+ def _format_timeframe(self, timeframe, delta):
+ form = self.timeframes[timeframe]
+ delta = abs(delta)
+ if isinstance(form, dict):
+ if delta == 2:
+ form = form["double"]
+ elif delta > 2 and delta <= 10:
+ form = form["ten"]
+ else:
+ form = form["higher"]
+
+ return form.format(delta)
+
+
+class LevantArabicLocale(ArabicLocale):
+ names = ["ar_iq", "ar_jo", "ar_lb", "ar_ps", "ar_sy"]
+ month_names = [
+ "",
+ "كانون الثاني",
+ "شباط",
+ "آذار",
+ "نيسان",
+ "أيار",
+ "حزيران",
+ "تموز",
+ "آب",
+ "أيلول",
+ "تشرين الأول",
+ "تشرين الثاني",
+ "كانون الأول",
+ ]
+ month_abbreviations = [
+ "",
+ "كانون الثاني",
+ "شباط",
+ "آذار",
+ "نيسان",
+ "أيار",
+ "حزيران",
+ "تموز",
+ "آب",
+ "أيلول",
+ "تشرين الأول",
+ "تشرين الثاني",
+ "كانون الأول",
+ ]
+
+
+class AlgeriaTunisiaArabicLocale(ArabicLocale):
+ names = ["ar_tn", "ar_dz"]
+ month_names = [
+ "",
+ "جانفي",
+ "فيفري",
+ "مارس",
+ "أفريل",
+ "ماي",
+ "جوان",
+ "جويلية",
+ "أوت",
+ "سبتمبر",
+ "أكتوبر",
+ "نوفمبر",
+ "ديسمبر",
+ ]
+ month_abbreviations = [
+ "",
+ "جانفي",
+ "فيفري",
+ "مارس",
+ "أفريل",
+ "ماي",
+ "جوان",
+ "جويلية",
+ "أوت",
+ "سبتمبر",
+ "أكتوبر",
+ "نوفمبر",
+ "ديسمبر",
+ ]
+
+
+class MauritaniaArabicLocale(ArabicLocale):
+ names = ["ar_mr"]
+ month_names = [
+ "",
+ "يناير",
+ "فبراير",
+ "مارس",
+ "إبريل",
+ "مايو",
+ "يونيو",
+ "يوليو",
+ "أغشت",
+ "شتمبر",
+ "أكتوبر",
+ "نوفمبر",
+ "دجمبر",
+ ]
+ month_abbreviations = [
+ "",
+ "يناير",
+ "فبراير",
+ "مارس",
+ "إبريل",
+ "مايو",
+ "يونيو",
+ "يوليو",
+ "أغشت",
+ "شتمبر",
+ "أكتوبر",
+ "نوفمبر",
+ "دجمبر",
+ ]
+
+
+class MoroccoArabicLocale(ArabicLocale):
+ names = ["ar_ma"]
+ month_names = [
+ "",
+ "يناير",
+ "فبراير",
+ "مارس",
+ "أبريل",
+ "ماي",
+ "يونيو",
+ "يوليوز",
+ "غشت",
+ "شتنبر",
+ "أكتوبر",
+ "نونبر",
+ "دجنبر",
+ ]
+ month_abbreviations = [
+ "",
+ "يناير",
+ "فبراير",
+ "مارس",
+ "أبريل",
+ "ماي",
+ "يونيو",
+ "يوليوز",
+ "غشت",
+ "شتنبر",
+ "أكتوبر",
+ "نونبر",
+ "دجنبر",
+ ]
+
+
+class IcelandicLocale(Locale):
+ def _format_timeframe(self, timeframe, delta):
+
+ timeframe = self.timeframes[timeframe]
+ if delta < 0:
+ timeframe = timeframe[0]
+ elif delta > 0:
+ timeframe = timeframe[1]
+
+ return timeframe.format(abs(delta))
+
+ names = ["is", "is_is"]
+
+ past = "fyrir {0} síðan"
+ future = "eftir {0}"
+
+ timeframes = {
+ "now": "rétt í þessu",
+ "second": ("sekúndu", "sekúndu"),
+ "seconds": ("{0} nokkrum sekúndum", "nokkrar sekúndur"),
+ "minute": ("einni mínútu", "eina mínútu"),
+ "minutes": ("{0} mínútum", "{0} mínútur"),
+ "hour": ("einum tíma", "einn tíma"),
+ "hours": ("{0} tímum", "{0} tíma"),
+ "day": ("einum degi", "einn dag"),
+ "days": ("{0} dögum", "{0} daga"),
+ "month": ("einum mánuði", "einn mánuð"),
+ "months": ("{0} mánuðum", "{0} mánuði"),
+ "year": ("einu ári", "eitt ár"),
+ "years": ("{0} árum", "{0} ár"),
+ }
+
+ meridians = {"am": "f.h.", "pm": "e.h.", "AM": "f.h.", "PM": "e.h."}
+
+ month_names = [
+ "",
+ "janúar",
+ "febrúar",
+ "mars",
+ "apríl",
+ "maí",
+ "júní",
+ "júlí",
+ "ágúst",
+ "september",
+ "október",
+ "nóvember",
+ "desember",
+ ]
+ month_abbreviations = [
+ "",
+ "jan",
+ "feb",
+ "mar",
+ "apr",
+ "maí",
+ "jún",
+ "júl",
+ "ágú",
+ "sep",
+ "okt",
+ "nóv",
+ "des",
+ ]
+
+ day_names = [
+ "",
+ "mánudagur",
+ "þriðjudagur",
+ "miðvikudagur",
+ "fimmtudagur",
+ "föstudagur",
+ "laugardagur",
+ "sunnudagur",
+ ]
+ day_abbreviations = ["", "mán", "þri", "mið", "fim", "fös", "lau", "sun"]
+
+
+class DanishLocale(Locale):
+
+ names = ["da", "da_dk"]
+
+ past = "for {0} siden"
+ future = "efter {0}"
+ and_word = "og"
+
+ timeframes = {
+ "now": "lige nu",
+ "second": "et sekund",
+ "seconds": "{0} et par sekunder",
+ "minute": "et minut",
+ "minutes": "{0} minutter",
+ "hour": "en time",
+ "hours": "{0} timer",
+ "day": "en dag",
+ "days": "{0} dage",
+ "month": "en måned",
+ "months": "{0} måneder",
+ "year": "et år",
+ "years": "{0} år",
+ }
+
+ month_names = [
+ "",
+ "januar",
+ "februar",
+ "marts",
+ "april",
+ "maj",
+ "juni",
+ "juli",
+ "august",
+ "september",
+ "oktober",
+ "november",
+ "december",
+ ]
+ month_abbreviations = [
+ "",
+ "jan",
+ "feb",
+ "mar",
+ "apr",
+ "maj",
+ "jun",
+ "jul",
+ "aug",
+ "sep",
+ "okt",
+ "nov",
+ "dec",
+ ]
+
+ day_names = [
+ "",
+ "mandag",
+ "tirsdag",
+ "onsdag",
+ "torsdag",
+ "fredag",
+ "lørdag",
+ "søndag",
+ ]
+ day_abbreviations = ["", "man", "tir", "ons", "tor", "fre", "lør", "søn"]
+
+
+class MalayalamLocale(Locale):
+
+ names = ["ml"]
+
+ past = "{0} മുമ്പ്"
+ future = "{0} ശേഷം"
+
+ timeframes = {
+ "now": "ഇപ്പോൾ",
+ "second": "ഒരു നിമിഷം",
+ "seconds": "{0} സെക്കന്റ്",
+ "minute": "ഒരു മിനിറ്റ്",
+ "minutes": "{0} മിനിറ്റ്",
+ "hour": "ഒരു മണിക്കൂർ",
+ "hours": "{0} മണിക്കൂർ",
+ "day": "ഒരു ദിവസം ",
+ "days": "{0} ദിവസം ",
+ "month": "ഒരു മാസം ",
+ "months": "{0} മാസം ",
+ "year": "ഒരു വർഷം ",
+ "years": "{0} വർഷം ",
+ }
+
+ meridians = {
+ "am": "രാവിലെ",
+ "pm": "ഉച്ചക്ക് ശേഷം",
+ "AM": "രാവിലെ",
+ "PM": "ഉച്ചക്ക് ശേഷം",
+ }
+
+ month_names = [
+ "",
+ "ജനുവരി",
+ "ഫെബ്രുവരി",
+ "മാർച്ച്",
+ "ഏപ്രിൽ ",
+ "മെയ് ",
+ "ജൂണ്",
+ "ജൂലൈ",
+ "ഓഗസ്റ്റ്",
+ "സെപ്റ്റംബർ",
+ "ഒക്ടോബർ",
+ "നവംബർ",
+ "ഡിസംബർ",
+ ]
+ month_abbreviations = [
+ "",
+ "ജനു",
+ "ഫെബ് ",
+ "മാർ",
+ "ഏപ്രിൽ",
+ "മേയ്",
+ "ജൂണ്",
+ "ജൂലൈ",
+ "ഓഗസ്റ",
+ "സെപ്റ്റ",
+ "ഒക്ടോ",
+ "നവം",
+ "ഡിസം",
+ ]
+
+ day_names = ["", "തിങ്കള്", "ചൊവ്വ", "ബുധന്", "വ്യാഴം", "വെള്ളി", "ശനി", "ഞായര്"]
+ day_abbreviations = [
+ "",
+ "തിങ്കള്",
+ "ചൊവ്വ",
+ "ബുധന്",
+ "വ്യാഴം",
+ "വെള്ളി",
+ "ശനി",
+ "ഞായര്",
+ ]
+
+
+class HindiLocale(Locale):
+
+ names = ["hi"]
+
+ past = "{0} पहले"
+ future = "{0} बाद"
+
+ timeframes = {
+ "now": "अभी",
+ "second": "एक पल",
+ "seconds": "{0} सेकंड्",
+ "minute": "एक मिनट ",
+ "minutes": "{0} मिनट ",
+ "hour": "एक घंटा",
+ "hours": "{0} घंटे",
+ "day": "एक दिन",
+ "days": "{0} दिन",
+ "month": "एक माह ",
+ "months": "{0} महीने ",
+ "year": "एक वर्ष ",
+ "years": "{0} साल ",
+ }
+
+ meridians = {"am": "सुबह", "pm": "शाम", "AM": "सुबह", "PM": "शाम"}
+
+ month_names = [
+ "",
+ "जनवरी",
+ "फरवरी",
+ "मार्च",
+ "अप्रैल ",
+ "मई",
+ "जून",
+ "जुलाई",
+ "अगस्त",
+ "सितंबर",
+ "अक्टूबर",
+ "नवंबर",
+ "दिसंबर",
+ ]
+ month_abbreviations = [
+ "",
+ "जन",
+ "फ़र",
+ "मार्च",
+ "अप्रै",
+ "मई",
+ "जून",
+ "जुलाई",
+ "आग",
+ "सित",
+ "अकत",
+ "नवे",
+ "दिस",
+ ]
+
+ day_names = [
+ "",
+ "सोमवार",
+ "मंगलवार",
+ "बुधवार",
+ "गुरुवार",
+ "शुक्रवार",
+ "शनिवार",
+ "रविवार",
+ ]
+ day_abbreviations = ["", "सोम", "मंगल", "बुध", "गुरुवार", "शुक्र", "शनि", "रवि"]
+
+
+class CzechLocale(Locale):
+ names = ["cs", "cs_cz"]
+
+ timeframes = {
+ "now": "Teď",
+ "second": {"past": "vteřina", "future": "vteřina", "zero": "vteřina"},
+ "seconds": {"past": "{0} sekundami", "future": ["{0} sekundy", "{0} sekund"]},
+ "minute": {"past": "minutou", "future": "minutu", "zero": "{0} minut"},
+ "minutes": {"past": "{0} minutami", "future": ["{0} minuty", "{0} minut"]},
+ "hour": {"past": "hodinou", "future": "hodinu", "zero": "{0} hodin"},
+ "hours": {"past": "{0} hodinami", "future": ["{0} hodiny", "{0} hodin"]},
+ "day": {"past": "dnem", "future": "den", "zero": "{0} dnů"},
+ "days": {"past": "{0} dny", "future": ["{0} dny", "{0} dnů"]},
+ "week": {"past": "týdnem", "future": "týden", "zero": "{0} týdnů"},
+ "weeks": {"past": "{0} týdny", "future": ["{0} týdny", "{0} týdnů"]},
+ "month": {"past": "měsícem", "future": "měsíc", "zero": "{0} měsíců"},
+ "months": {"past": "{0} měsíci", "future": ["{0} měsíce", "{0} měsíců"]},
+ "year": {"past": "rokem", "future": "rok", "zero": "{0} let"},
+ "years": {"past": "{0} lety", "future": ["{0} roky", "{0} let"]},
+ }
+
+ past = "Před {0}"
+ future = "Za {0}"
+
+ month_names = [
+ "",
+ "leden",
+ "únor",
+ "březen",
+ "duben",
+ "květen",
+ "červen",
+ "červenec",
+ "srpen",
+ "září",
+ "říjen",
+ "listopad",
+ "prosinec",
+ ]
+ month_abbreviations = [
+ "",
+ "led",
+ "úno",
+ "bře",
+ "dub",
+ "kvě",
+ "čvn",
+ "čvc",
+ "srp",
+ "zář",
+ "říj",
+ "lis",
+ "pro",
+ ]
+
+ day_names = [
+ "",
+ "pondělí",
+ "úterý",
+ "středa",
+ "čtvrtek",
+ "pátek",
+ "sobota",
+ "neděle",
+ ]
+ day_abbreviations = ["", "po", "út", "st", "čt", "pá", "so", "ne"]
+
+ def _format_timeframe(self, timeframe, delta):
+ """Czech aware time frame format function, takes into account
+ the differences between past and future forms."""
+ form = self.timeframes[timeframe]
+ if isinstance(form, dict):
+ if delta == 0:
+ form = form["zero"] # And *never* use 0 in the singular!
+ elif delta > 0:
+ form = form["future"]
+ else:
+ form = form["past"]
+ delta = abs(delta)
+
+ if isinstance(form, list):
+ if 2 <= delta % 10 <= 4 and (delta % 100 < 10 or delta % 100 >= 20):
+ form = form[0]
+ else:
+ form = form[1]
+
+ return form.format(delta)
+
+
+class SlovakLocale(Locale):
+ names = ["sk", "sk_sk"]
+
+ timeframes = {
+ "now": "Teraz",
+ "second": {"past": "sekundou", "future": "sekundu", "zero": "{0} sekúnd"},
+ "seconds": {"past": "{0} sekundami", "future": ["{0} sekundy", "{0} sekúnd"]},
+ "minute": {"past": "minútou", "future": "minútu", "zero": "{0} minút"},
+ "minutes": {"past": "{0} minútami", "future": ["{0} minúty", "{0} minút"]},
+ "hour": {"past": "hodinou", "future": "hodinu", "zero": "{0} hodín"},
+ "hours": {"past": "{0} hodinami", "future": ["{0} hodiny", "{0} hodín"]},
+ "day": {"past": "dňom", "future": "deň", "zero": "{0} dní"},
+ "days": {"past": "{0} dňami", "future": ["{0} dni", "{0} dní"]},
+ "week": {"past": "týždňom", "future": "týždeň", "zero": "{0} týždňov"},
+ "weeks": {"past": "{0} týždňami", "future": ["{0} týždne", "{0} týždňov"]},
+ "month": {"past": "mesiacom", "future": "mesiac", "zero": "{0} mesiacov"},
+ "months": {"past": "{0} mesiacmi", "future": ["{0} mesiace", "{0} mesiacov"]},
+ "year": {"past": "rokom", "future": "rok", "zero": "{0} rokov"},
+ "years": {"past": "{0} rokmi", "future": ["{0} roky", "{0} rokov"]},
+ }
+
+ past = "Pred {0}"
+ future = "O {0}"
+ and_word = "a"
+
+ month_names = [
+ "",
+ "január",
+ "február",
+ "marec",
+ "apríl",
+ "máj",
+ "jún",
+ "júl",
+ "august",
+ "september",
+ "október",
+ "november",
+ "december",
+ ]
+ month_abbreviations = [
+ "",
+ "jan",
+ "feb",
+ "mar",
+ "apr",
+ "máj",
+ "jún",
+ "júl",
+ "aug",
+ "sep",
+ "okt",
+ "nov",
+ "dec",
+ ]
+
+ day_names = [
+ "",
+ "pondelok",
+ "utorok",
+ "streda",
+ "štvrtok",
+ "piatok",
+ "sobota",
+ "nedeľa",
+ ]
+ day_abbreviations = ["", "po", "ut", "st", "št", "pi", "so", "ne"]
+
+ def _format_timeframe(self, timeframe, delta):
+ """Slovak aware time frame format function, takes into account
+ the differences between past and future forms."""
+ form = self.timeframes[timeframe]
+ if isinstance(form, dict):
+ if delta == 0:
+ form = form["zero"] # And *never* use 0 in the singular!
+ elif delta > 0:
+ form = form["future"]
+ else:
+ form = form["past"]
+ delta = abs(delta)
+
+ if isinstance(form, list):
+ if 2 <= delta % 10 <= 4 and (delta % 100 < 10 or delta % 100 >= 20):
+ form = form[0]
+ else:
+ form = form[1]
+
+ return form.format(delta)
+
+
+class FarsiLocale(Locale):
+
+ names = ["fa", "fa_ir"]
+
+ past = "{0} قبل"
+ future = "در {0}"
+
+ timeframes = {
+ "now": "اکنون",
+ "second": "یک لحظه",
+ "seconds": "{0} ثانیه",
+ "minute": "یک دقیقه",
+ "minutes": "{0} دقیقه",
+ "hour": "یک ساعت",
+ "hours": "{0} ساعت",
+ "day": "یک روز",
+ "days": "{0} روز",
+ "month": "یک ماه",
+ "months": "{0} ماه",
+ "year": "یک سال",
+ "years": "{0} سال",
+ }
+
+ meridians = {
+ "am": "قبل از ظهر",
+ "pm": "بعد از ظهر",
+ "AM": "قبل از ظهر",
+ "PM": "بعد از ظهر",
+ }
+
+ month_names = [
+ "",
+ "January",
+ "February",
+ "March",
+ "April",
+ "May",
+ "June",
+ "July",
+ "August",
+ "September",
+ "October",
+ "November",
+ "December",
+ ]
+ month_abbreviations = [
+ "",
+ "Jan",
+ "Feb",
+ "Mar",
+ "Apr",
+ "May",
+ "Jun",
+ "Jul",
+ "Aug",
+ "Sep",
+ "Oct",
+ "Nov",
+ "Dec",
+ ]
+
+ day_names = [
+ "",
+ "دو شنبه",
+ "سه شنبه",
+ "چهارشنبه",
+ "پنجشنبه",
+ "جمعه",
+ "شنبه",
+ "یکشنبه",
+ ]
+ day_abbreviations = ["", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat", "Sun"]
+
+
+class HebrewLocale(Locale):
+
+ names = ["he", "he_IL"]
+
+ past = "לפני {0}"
+ future = "בעוד {0}"
+ and_word = "ו"
+
+ timeframes = {
+ "now": "הרגע",
+ "second": "שנייה",
+ "seconds": "{0} שניות",
+ "minute": "דקה",
+ "minutes": "{0} דקות",
+ "hour": "שעה",
+ "hours": "{0} שעות",
+ "2-hours": "שעתיים",
+ "day": "יום",
+ "days": "{0} ימים",
+ "2-days": "יומיים",
+ "week": "שבוע",
+ "weeks": "{0} שבועות",
+ "2-weeks": "שבועיים",
+ "month": "חודש",
+ "months": "{0} חודשים",
+ "2-months": "חודשיים",
+ "year": "שנה",
+ "years": "{0} שנים",
+ "2-years": "שנתיים",
+ }
+
+ meridians = {
+ "am": 'לפנ"צ',
+ "pm": 'אחר"צ',
+ "AM": "לפני הצהריים",
+ "PM": "אחרי הצהריים",
+ }
+
+ month_names = [
+ "",
+ "ינואר",
+ "פברואר",
+ "מרץ",
+ "אפריל",
+ "מאי",
+ "יוני",
+ "יולי",
+ "אוגוסט",
+ "ספטמבר",
+ "אוקטובר",
+ "נובמבר",
+ "דצמבר",
+ ]
+ month_abbreviations = [
+ "",
+ "ינו׳",
+ "פבר׳",
+ "מרץ",
+ "אפר׳",
+ "מאי",
+ "יוני",
+ "יולי",
+ "אוג׳",
+ "ספט׳",
+ "אוק׳",
+ "נוב׳",
+ "דצמ׳",
+ ]
+
+ day_names = ["", "שני", "שלישי", "רביעי", "חמישי", "שישי", "שבת", "ראשון"]
+ day_abbreviations = ["", "ב׳", "ג׳", "ד׳", "ה׳", "ו׳", "ש׳", "א׳"]
+
+ def _format_timeframe(self, timeframe, delta):
+ """Hebrew couple of aware"""
+ couple = "2-{}".format(timeframe)
+ single = timeframe.rstrip("s")
+ if abs(delta) == 2 and couple in self.timeframes:
+ key = couple
+ elif abs(delta) == 1 and single in self.timeframes:
+ key = single
+ else:
+ key = timeframe
+
+ return self.timeframes[key].format(trunc(abs(delta)))
+
+ def describe_multi(self, timeframes, only_distance=False):
+ """Describes a delta within multiple timeframes in plain language.
+ In Hebrew, the and word behaves a bit differently.
+
+ :param timeframes: a list of string, quantity pairs each representing a timeframe and delta.
+ :param only_distance: return only distance eg: "2 hours and 11 seconds" without "in" or "ago" keywords
+ """
+
+ humanized = ""
+ for index, (timeframe, delta) in enumerate(timeframes):
+ last_humanized = self._format_timeframe(timeframe, delta)
+ if index == 0:
+ humanized = last_humanized
+ elif index == len(timeframes) - 1: # Must have at least 2 items
+ humanized += " " + self.and_word
+ if last_humanized[0].isdecimal():
+ humanized += "־"
+ humanized += last_humanized
+ else: # Don't add for the last one
+ humanized += ", " + last_humanized
+
+ if not only_distance:
+ humanized = self._format_relative(humanized, timeframe, delta)
+
+ return humanized
+
+
+class MarathiLocale(Locale):
+
+ names = ["mr"]
+
+ past = "{0} आधी"
+ future = "{0} नंतर"
+
+ timeframes = {
+ "now": "सद्य",
+ "second": "एक सेकंद",
+ "seconds": "{0} सेकंद",
+ "minute": "एक मिनिट ",
+ "minutes": "{0} मिनिट ",
+ "hour": "एक तास",
+ "hours": "{0} तास",
+ "day": "एक दिवस",
+ "days": "{0} दिवस",
+ "month": "एक महिना ",
+ "months": "{0} महिने ",
+ "year": "एक वर्ष ",
+ "years": "{0} वर्ष ",
+ }
+
+ meridians = {"am": "सकाळ", "pm": "संध्याकाळ", "AM": "सकाळ", "PM": "संध्याकाळ"}
+
+ month_names = [
+ "",
+ "जानेवारी",
+ "फेब्रुवारी",
+ "मार्च",
+ "एप्रिल",
+ "मे",
+ "जून",
+ "जुलै",
+ "अॉगस्ट",
+ "सप्टेंबर",
+ "अॉक्टोबर",
+ "नोव्हेंबर",
+ "डिसेंबर",
+ ]
+ month_abbreviations = [
+ "",
+ "जान",
+ "फेब्रु",
+ "मार्च",
+ "एप्रि",
+ "मे",
+ "जून",
+ "जुलै",
+ "अॉग",
+ "सप्टें",
+ "अॉक्टो",
+ "नोव्हें",
+ "डिसें",
+ ]
+
+ day_names = [
+ "",
+ "सोमवार",
+ "मंगळवार",
+ "बुधवार",
+ "गुरुवार",
+ "शुक्रवार",
+ "शनिवार",
+ "रविवार",
+ ]
+ day_abbreviations = ["", "सोम", "मंगळ", "बुध", "गुरु", "शुक्र", "शनि", "रवि"]
+
+
+def _map_locales():
+
+ locales = {}
+
+ for _, cls in inspect.getmembers(sys.modules[__name__], inspect.isclass):
+ if issubclass(cls, Locale): # pragma: no branch
+ for name in cls.names:
+ locales[name.lower()] = cls
+
+ return locales
+
+
+class CatalanLocale(Locale):
+ names = ["ca", "ca_es", "ca_ad", "ca_fr", "ca_it"]
+ past = "Fa {0}"
+ future = "En {0}"
+ and_word = "i"
+
+ timeframes = {
+ "now": "Ara mateix",
+ "second": "un segon",
+ "seconds": "{0} segons",
+ "minute": "1 minut",
+ "minutes": "{0} minuts",
+ "hour": "una hora",
+ "hours": "{0} hores",
+ "day": "un dia",
+ "days": "{0} dies",
+ "month": "un mes",
+ "months": "{0} mesos",
+ "year": "un any",
+ "years": "{0} anys",
+ }
+
+ month_names = [
+ "",
+ "gener",
+ "febrer",
+ "març",
+ "abril",
+ "maig",
+ "juny",
+ "juliol",
+ "agost",
+ "setembre",
+ "octubre",
+ "novembre",
+ "desembre",
+ ]
+ month_abbreviations = [
+ "",
+ "gen.",
+ "febr.",
+ "març",
+ "abr.",
+ "maig",
+ "juny",
+ "jul.",
+ "ag.",
+ "set.",
+ "oct.",
+ "nov.",
+ "des.",
+ ]
+ day_names = [
+ "",
+ "dilluns",
+ "dimarts",
+ "dimecres",
+ "dijous",
+ "divendres",
+ "dissabte",
+ "diumenge",
+ ]
+ day_abbreviations = [
+ "",
+ "dl.",
+ "dt.",
+ "dc.",
+ "dj.",
+ "dv.",
+ "ds.",
+ "dg.",
+ ]
+
+
+class BasqueLocale(Locale):
+ names = ["eu", "eu_eu"]
+ past = "duela {0}"
+ future = "{0}" # I don't know what's the right phrase in Basque for the future.
+
+ timeframes = {
+ "now": "Orain",
+ "second": "segundo bat",
+ "seconds": "{0} segundu",
+ "minute": "minutu bat",
+ "minutes": "{0} minutu",
+ "hour": "ordu bat",
+ "hours": "{0} ordu",
+ "day": "egun bat",
+ "days": "{0} egun",
+ "month": "hilabete bat",
+ "months": "{0} hilabet",
+ "year": "urte bat",
+ "years": "{0} urte",
+ }
+
+ month_names = [
+ "",
+ "urtarrilak",
+ "otsailak",
+ "martxoak",
+ "apirilak",
+ "maiatzak",
+ "ekainak",
+ "uztailak",
+ "abuztuak",
+ "irailak",
+ "urriak",
+ "azaroak",
+ "abenduak",
+ ]
+ month_abbreviations = [
+ "",
+ "urt",
+ "ots",
+ "mar",
+ "api",
+ "mai",
+ "eka",
+ "uzt",
+ "abu",
+ "ira",
+ "urr",
+ "aza",
+ "abe",
+ ]
+ day_names = [
+ "",
+ "astelehena",
+ "asteartea",
+ "asteazkena",
+ "osteguna",
+ "ostirala",
+ "larunbata",
+ "igandea",
+ ]
+ day_abbreviations = ["", "al", "ar", "az", "og", "ol", "lr", "ig"]
+
+
+class HungarianLocale(Locale):
+
+ names = ["hu", "hu_hu"]
+
+ past = "{0} ezelőtt"
+ future = "{0} múlva"
+
+ timeframes = {
+ "now": "éppen most",
+ "second": {"past": "egy második", "future": "egy második"},
+ "seconds": {"past": "{0} másodpercekkel", "future": "{0} pár másodperc"},
+ "minute": {"past": "egy perccel", "future": "egy perc"},
+ "minutes": {"past": "{0} perccel", "future": "{0} perc"},
+ "hour": {"past": "egy órával", "future": "egy óra"},
+ "hours": {"past": "{0} órával", "future": "{0} óra"},
+ "day": {"past": "egy nappal", "future": "egy nap"},
+ "days": {"past": "{0} nappal", "future": "{0} nap"},
+ "month": {"past": "egy hónappal", "future": "egy hónap"},
+ "months": {"past": "{0} hónappal", "future": "{0} hónap"},
+ "year": {"past": "egy évvel", "future": "egy év"},
+ "years": {"past": "{0} évvel", "future": "{0} év"},
+ }
+
+ month_names = [
+ "",
+ "január",
+ "február",
+ "március",
+ "április",
+ "május",
+ "június",
+ "július",
+ "augusztus",
+ "szeptember",
+ "október",
+ "november",
+ "december",
+ ]
+ month_abbreviations = [
+ "",
+ "jan",
+ "febr",
+ "márc",
+ "ápr",
+ "máj",
+ "jún",
+ "júl",
+ "aug",
+ "szept",
+ "okt",
+ "nov",
+ "dec",
+ ]
+
+ day_names = [
+ "",
+ "hétfő",
+ "kedd",
+ "szerda",
+ "csütörtök",
+ "péntek",
+ "szombat",
+ "vasárnap",
+ ]
+ day_abbreviations = ["", "hét", "kedd", "szer", "csüt", "pént", "szom", "vas"]
+
+ meridians = {"am": "de", "pm": "du", "AM": "DE", "PM": "DU"}
+
+ def _format_timeframe(self, timeframe, delta):
+ form = self.timeframes[timeframe]
+
+ if isinstance(form, dict):
+ if delta > 0:
+ form = form["future"]
+ else:
+ form = form["past"]
+
+ return form.format(abs(delta))
+
+
+class EsperantoLocale(Locale):
+ names = ["eo", "eo_xx"]
+ past = "antaŭ {0}"
+ future = "post {0}"
+
+ timeframes = {
+ "now": "nun",
+ "second": "sekundo",
+ "seconds": "{0} kelkaj sekundoj",
+ "minute": "unu minuto",
+ "minutes": "{0} minutoj",
+ "hour": "un horo",
+ "hours": "{0} horoj",
+ "day": "unu tago",
+ "days": "{0} tagoj",
+ "month": "unu monato",
+ "months": "{0} monatoj",
+ "year": "unu jaro",
+ "years": "{0} jaroj",
+ }
+
+ month_names = [
+ "",
+ "januaro",
+ "februaro",
+ "marto",
+ "aprilo",
+ "majo",
+ "junio",
+ "julio",
+ "aŭgusto",
+ "septembro",
+ "oktobro",
+ "novembro",
+ "decembro",
+ ]
+ month_abbreviations = [
+ "",
+ "jan",
+ "feb",
+ "mar",
+ "apr",
+ "maj",
+ "jun",
+ "jul",
+ "aŭg",
+ "sep",
+ "okt",
+ "nov",
+ "dec",
+ ]
+
+ day_names = [
+ "",
+ "lundo",
+ "mardo",
+ "merkredo",
+ "ĵaŭdo",
+ "vendredo",
+ "sabato",
+ "dimanĉo",
+ ]
+ day_abbreviations = ["", "lun", "mar", "mer", "ĵaŭ", "ven", "sab", "dim"]
+
+ meridians = {"am": "atm", "pm": "ptm", "AM": "ATM", "PM": "PTM"}
+
+ ordinal_day_re = r"((?P[1-3]?[0-9](?=a))a)"
+
+ def _ordinal_number(self, n):
+ return "{}a".format(n)
+
+
+class ThaiLocale(Locale):
+
+ names = ["th", "th_th"]
+
+ past = "{0}{1}ที่ผ่านมา"
+ future = "ในอีก{1}{0}"
+
+ timeframes = {
+ "now": "ขณะนี้",
+ "second": "วินาที",
+ "seconds": "{0} ไม่กี่วินาที",
+ "minute": "1 นาที",
+ "minutes": "{0} นาที",
+ "hour": "1 ชั่วโมง",
+ "hours": "{0} ชั่วโมง",
+ "day": "1 วัน",
+ "days": "{0} วัน",
+ "month": "1 เดือน",
+ "months": "{0} เดือน",
+ "year": "1 ปี",
+ "years": "{0} ปี",
+ }
+
+ month_names = [
+ "",
+ "มกราคม",
+ "กุมภาพันธ์",
+ "มีนาคม",
+ "เมษายน",
+ "พฤษภาคม",
+ "มิถุนายน",
+ "กรกฎาคม",
+ "สิงหาคม",
+ "กันยายน",
+ "ตุลาคม",
+ "พฤศจิกายน",
+ "ธันวาคม",
+ ]
+ month_abbreviations = [
+ "",
+ "ม.ค.",
+ "ก.พ.",
+ "มี.ค.",
+ "เม.ย.",
+ "พ.ค.",
+ "มิ.ย.",
+ "ก.ค.",
+ "ส.ค.",
+ "ก.ย.",
+ "ต.ค.",
+ "พ.ย.",
+ "ธ.ค.",
+ ]
+
+ day_names = ["", "จันทร์", "อังคาร", "พุธ", "พฤหัสบดี", "ศุกร์", "เสาร์", "อาทิตย์"]
+ day_abbreviations = ["", "จ", "อ", "พ", "พฤ", "ศ", "ส", "อา"]
+
+ meridians = {"am": "am", "pm": "pm", "AM": "AM", "PM": "PM"}
+
+ BE_OFFSET = 543
+
+ def year_full(self, year):
+ """Thai always use Buddhist Era (BE) which is CE + 543"""
+ year += self.BE_OFFSET
+ return "{:04d}".format(year)
+
+ def year_abbreviation(self, year):
+ """Thai always use Buddhist Era (BE) which is CE + 543"""
+ year += self.BE_OFFSET
+ return "{:04d}".format(year)[2:]
+
+ def _format_relative(self, humanized, timeframe, delta):
+ """Thai normally doesn't have any space between words"""
+ if timeframe == "now":
+ return humanized
+ space = "" if timeframe == "seconds" else " "
+ direction = self.past if delta < 0 else self.future
+
+ return direction.format(humanized, space)
+
+
+class BengaliLocale(Locale):
+
+ names = ["bn", "bn_bd", "bn_in"]
+
+ past = "{0} আগে"
+ future = "{0} পরে"
+
+ timeframes = {
+ "now": "এখন",
+ "second": "একটি দ্বিতীয়",
+ "seconds": "{0} সেকেন্ড",
+ "minute": "এক মিনিট",
+ "minutes": "{0} মিনিট",
+ "hour": "এক ঘণ্টা",
+ "hours": "{0} ঘণ্টা",
+ "day": "এক দিন",
+ "days": "{0} দিন",
+ "month": "এক মাস",
+ "months": "{0} মাস ",
+ "year": "এক বছর",
+ "years": "{0} বছর",
+ }
+
+ meridians = {"am": "সকাল", "pm": "বিকাল", "AM": "সকাল", "PM": "বিকাল"}
+
+ month_names = [
+ "",
+ "জানুয়ারি",
+ "ফেব্রুয়ারি",
+ "মার্চ",
+ "এপ্রিল",
+ "মে",
+ "জুন",
+ "জুলাই",
+ "আগস্ট",
+ "সেপ্টেম্বর",
+ "অক্টোবর",
+ "নভেম্বর",
+ "ডিসেম্বর",
+ ]
+ month_abbreviations = [
+ "",
+ "জানু",
+ "ফেব",
+ "মার্চ",
+ "এপ্রি",
+ "মে",
+ "জুন",
+ "জুল",
+ "অগা",
+ "সেপ্ট",
+ "অক্টো",
+ "নভে",
+ "ডিসে",
+ ]
+
+ day_names = [
+ "",
+ "সোমবার",
+ "মঙ্গলবার",
+ "বুধবার",
+ "বৃহস্পতিবার",
+ "শুক্রবার",
+ "শনিবার",
+ "রবিবার",
+ ]
+ day_abbreviations = ["", "সোম", "মঙ্গল", "বুধ", "বৃহঃ", "শুক্র", "শনি", "রবি"]
+
+ def _ordinal_number(self, n):
+ if n > 10 or n == 0:
+ return "{}তম".format(n)
+ if n in [1, 5, 7, 8, 9, 10]:
+ return "{}ম".format(n)
+ if n in [2, 3]:
+ return "{}য়".format(n)
+ if n == 4:
+ return "{}র্থ".format(n)
+ if n == 6:
+ return "{}ষ্ঠ".format(n)
+
+
+class RomanshLocale(Locale):
+
+ names = ["rm", "rm_ch"]
+
+ past = "avant {0}"
+ future = "en {0}"
+
+ timeframes = {
+ "now": "en quest mument",
+ "second": "in secunda",
+ "seconds": "{0} secundas",
+ "minute": "ina minuta",
+ "minutes": "{0} minutas",
+ "hour": "in'ura",
+ "hours": "{0} ura",
+ "day": "in di",
+ "days": "{0} dis",
+ "month": "in mais",
+ "months": "{0} mais",
+ "year": "in onn",
+ "years": "{0} onns",
+ }
+
+ month_names = [
+ "",
+ "schaner",
+ "favrer",
+ "mars",
+ "avrigl",
+ "matg",
+ "zercladur",
+ "fanadur",
+ "avust",
+ "settember",
+ "october",
+ "november",
+ "december",
+ ]
+
+ month_abbreviations = [
+ "",
+ "schan",
+ "fav",
+ "mars",
+ "avr",
+ "matg",
+ "zer",
+ "fan",
+ "avu",
+ "set",
+ "oct",
+ "nov",
+ "dec",
+ ]
+
+ day_names = [
+ "",
+ "glindesdi",
+ "mardi",
+ "mesemna",
+ "gievgia",
+ "venderdi",
+ "sonda",
+ "dumengia",
+ ]
+
+ day_abbreviations = ["", "gli", "ma", "me", "gie", "ve", "so", "du"]
+
+
+class RomanianLocale(Locale):
+ names = ["ro", "ro_ro"]
+
+ past = "{0} în urmă"
+ future = "peste {0}"
+ and_word = "și"
+
+ timeframes = {
+ "now": "acum",
+ "second": "o secunda",
+ "seconds": "{0} câteva secunde",
+ "minute": "un minut",
+ "minutes": "{0} minute",
+ "hour": "o oră",
+ "hours": "{0} ore",
+ "day": "o zi",
+ "days": "{0} zile",
+ "month": "o lună",
+ "months": "{0} luni",
+ "year": "un an",
+ "years": "{0} ani",
+ }
+
+ month_names = [
+ "",
+ "ianuarie",
+ "februarie",
+ "martie",
+ "aprilie",
+ "mai",
+ "iunie",
+ "iulie",
+ "august",
+ "septembrie",
+ "octombrie",
+ "noiembrie",
+ "decembrie",
+ ]
+ month_abbreviations = [
+ "",
+ "ian",
+ "febr",
+ "mart",
+ "apr",
+ "mai",
+ "iun",
+ "iul",
+ "aug",
+ "sept",
+ "oct",
+ "nov",
+ "dec",
+ ]
+
+ day_names = [
+ "",
+ "luni",
+ "marți",
+ "miercuri",
+ "joi",
+ "vineri",
+ "sâmbătă",
+ "duminică",
+ ]
+ day_abbreviations = ["", "Lun", "Mar", "Mie", "Joi", "Vin", "Sâm", "Dum"]
+
+
+class SlovenianLocale(Locale):
+ names = ["sl", "sl_si"]
+
+ past = "pred {0}"
+ future = "čez {0}"
+ and_word = "in"
+
+ timeframes = {
+ "now": "zdaj",
+ "second": "sekundo",
+ "seconds": "{0} sekund",
+ "minute": "minuta",
+ "minutes": "{0} minutami",
+ "hour": "uro",
+ "hours": "{0} ur",
+ "day": "dan",
+ "days": "{0} dni",
+ "month": "mesec",
+ "months": "{0} mesecev",
+ "year": "leto",
+ "years": "{0} let",
+ }
+
+ meridians = {"am": "", "pm": "", "AM": "", "PM": ""}
+
+ month_names = [
+ "",
+ "Januar",
+ "Februar",
+ "Marec",
+ "April",
+ "Maj",
+ "Junij",
+ "Julij",
+ "Avgust",
+ "September",
+ "Oktober",
+ "November",
+ "December",
+ ]
+
+ month_abbreviations = [
+ "",
+ "Jan",
+ "Feb",
+ "Mar",
+ "Apr",
+ "Maj",
+ "Jun",
+ "Jul",
+ "Avg",
+ "Sep",
+ "Okt",
+ "Nov",
+ "Dec",
+ ]
+
+ day_names = [
+ "",
+ "Ponedeljek",
+ "Torek",
+ "Sreda",
+ "Četrtek",
+ "Petek",
+ "Sobota",
+ "Nedelja",
+ ]
+
+ day_abbreviations = ["", "Pon", "Tor", "Sre", "Čet", "Pet", "Sob", "Ned"]
+
+
+class IndonesianLocale(Locale):
+
+ names = ["id", "id_id"]
+
+ past = "{0} yang lalu"
+ future = "dalam {0}"
+ and_word = "dan"
+
+ timeframes = {
+ "now": "baru saja",
+ "second": "1 sebentar",
+ "seconds": "{0} detik",
+ "minute": "1 menit",
+ "minutes": "{0} menit",
+ "hour": "1 jam",
+ "hours": "{0} jam",
+ "day": "1 hari",
+ "days": "{0} hari",
+ "month": "1 bulan",
+ "months": "{0} bulan",
+ "year": "1 tahun",
+ "years": "{0} tahun",
+ }
+
+ meridians = {"am": "", "pm": "", "AM": "", "PM": ""}
+
+ month_names = [
+ "",
+ "Januari",
+ "Februari",
+ "Maret",
+ "April",
+ "Mei",
+ "Juni",
+ "Juli",
+ "Agustus",
+ "September",
+ "Oktober",
+ "November",
+ "Desember",
+ ]
+
+ month_abbreviations = [
+ "",
+ "Jan",
+ "Feb",
+ "Mar",
+ "Apr",
+ "Mei",
+ "Jun",
+ "Jul",
+ "Ags",
+ "Sept",
+ "Okt",
+ "Nov",
+ "Des",
+ ]
+
+ day_names = ["", "Senin", "Selasa", "Rabu", "Kamis", "Jumat", "Sabtu", "Minggu"]
+
+ day_abbreviations = [
+ "",
+ "Senin",
+ "Selasa",
+ "Rabu",
+ "Kamis",
+ "Jumat",
+ "Sabtu",
+ "Minggu",
+ ]
+
+
+class NepaliLocale(Locale):
+ names = ["ne", "ne_np"]
+
+ past = "{0} पहिले"
+ future = "{0} पछी"
+
+ timeframes = {
+ "now": "अहिले",
+ "second": "एक सेकेन्ड",
+ "seconds": "{0} सेकण्ड",
+ "minute": "मिनेट",
+ "minutes": "{0} मिनेट",
+ "hour": "एक घण्टा",
+ "hours": "{0} घण्टा",
+ "day": "एक दिन",
+ "days": "{0} दिन",
+ "month": "एक महिना",
+ "months": "{0} महिना",
+ "year": "एक बर्ष",
+ "years": "बर्ष",
+ }
+
+ meridians = {"am": "पूर्वाह्न", "pm": "अपरान्ह", "AM": "पूर्वाह्न", "PM": "अपरान्ह"}
+
+ month_names = [
+ "",
+ "जनवरी",
+ "फेब्रुअरी",
+ "मार्च",
+ "एप्रील",
+ "मे",
+ "जुन",
+ "जुलाई",
+ "अगष्ट",
+ "सेप्टेम्बर",
+ "अक्टोबर",
+ "नोवेम्बर",
+ "डिसेम्बर",
+ ]
+ month_abbreviations = [
+ "",
+ "जन",
+ "फेब",
+ "मार्च",
+ "एप्रील",
+ "मे",
+ "जुन",
+ "जुलाई",
+ "अग",
+ "सेप",
+ "अक्ट",
+ "नोव",
+ "डिस",
+ ]
+
+ day_names = [
+ "",
+ "सोमवार",
+ "मंगलवार",
+ "बुधवार",
+ "बिहिवार",
+ "शुक्रवार",
+ "शनिवार",
+ "आइतवार",
+ ]
+
+ day_abbreviations = ["", "सोम", "मंगल", "बुध", "बिहि", "शुक्र", "शनि", "आइत"]
+
+
+class EstonianLocale(Locale):
+ names = ["ee", "et"]
+
+ past = "{0} tagasi"
+ future = "{0} pärast"
+ and_word = "ja"
+
+ timeframes = {
+ "now": {"past": "just nüüd", "future": "just nüüd"},
+ "second": {"past": "üks sekund", "future": "ühe sekundi"},
+ "seconds": {"past": "{0} sekundit", "future": "{0} sekundi"},
+ "minute": {"past": "üks minut", "future": "ühe minuti"},
+ "minutes": {"past": "{0} minutit", "future": "{0} minuti"},
+ "hour": {"past": "tund aega", "future": "tunni aja"},
+ "hours": {"past": "{0} tundi", "future": "{0} tunni"},
+ "day": {"past": "üks päev", "future": "ühe päeva"},
+ "days": {"past": "{0} päeva", "future": "{0} päeva"},
+ "month": {"past": "üks kuu", "future": "ühe kuu"},
+ "months": {"past": "{0} kuud", "future": "{0} kuu"},
+ "year": {"past": "üks aasta", "future": "ühe aasta"},
+ "years": {"past": "{0} aastat", "future": "{0} aasta"},
+ }
+
+ month_names = [
+ "",
+ "Jaanuar",
+ "Veebruar",
+ "Märts",
+ "Aprill",
+ "Mai",
+ "Juuni",
+ "Juuli",
+ "August",
+ "September",
+ "Oktoober",
+ "November",
+ "Detsember",
+ ]
+ month_abbreviations = [
+ "",
+ "Jan",
+ "Veb",
+ "Mär",
+ "Apr",
+ "Mai",
+ "Jun",
+ "Jul",
+ "Aug",
+ "Sep",
+ "Okt",
+ "Nov",
+ "Dets",
+ ]
+
+ day_names = [
+ "",
+ "Esmaspäev",
+ "Teisipäev",
+ "Kolmapäev",
+ "Neljapäev",
+ "Reede",
+ "Laupäev",
+ "Pühapäev",
+ ]
+ day_abbreviations = ["", "Esm", "Teis", "Kolm", "Nelj", "Re", "Lau", "Püh"]
+
+ def _format_timeframe(self, timeframe, delta):
+ form = self.timeframes[timeframe]
+ if delta > 0:
+ form = form["future"]
+ else:
+ form = form["past"]
+ return form.format(abs(delta))
+
+
+class SwahiliLocale(Locale):
+
+ names = [
+ "sw",
+ "sw_ke",
+ "sw_tz",
+ ]
+
+ past = "{0} iliyopita"
+ future = "muda wa {0}"
+ and_word = "na"
+
+ timeframes = {
+ "now": "sasa hivi",
+ "second": "sekunde",
+ "seconds": "sekunde {0}",
+ "minute": "dakika moja",
+ "minutes": "dakika {0}",
+ "hour": "saa moja",
+ "hours": "saa {0}",
+ "day": "siku moja",
+ "days": "siku {0}",
+ "week": "wiki moja",
+ "weeks": "wiki {0}",
+ "month": "mwezi moja",
+ "months": "miezi {0}",
+ "year": "mwaka moja",
+ "years": "miaka {0}",
+ }
+
+ meridians = {"am": "asu", "pm": "mch", "AM": "ASU", "PM": "MCH"}
+
+ month_names = [
+ "",
+ "Januari",
+ "Februari",
+ "Machi",
+ "Aprili",
+ "Mei",
+ "Juni",
+ "Julai",
+ "Agosti",
+ "Septemba",
+ "Oktoba",
+ "Novemba",
+ "Desemba",
+ ]
+ month_abbreviations = [
+ "",
+ "Jan",
+ "Feb",
+ "Mac",
+ "Apr",
+ "Mei",
+ "Jun",
+ "Jul",
+ "Ago",
+ "Sep",
+ "Okt",
+ "Nov",
+ "Des",
+ ]
+
+ day_names = [
+ "",
+ "Jumatatu",
+ "Jumanne",
+ "Jumatano",
+ "Alhamisi",
+ "Ijumaa",
+ "Jumamosi",
+ "Jumapili",
+ ]
+ day_abbreviations = [
+ "",
+ "Jumatatu",
+ "Jumanne",
+ "Jumatano",
+ "Alhamisi",
+ "Ijumaa",
+ "Jumamosi",
+ "Jumapili",
+ ]
+
+
+_locales = _map_locales()
diff --git a/openpype/modules/ftrack/python2_vendor/arrow/arrow/parser.py b/openpype/modules/ftrack/python2_vendor/arrow/arrow/parser.py
new file mode 100644
index 0000000000..243fd1721c
--- /dev/null
+++ b/openpype/modules/ftrack/python2_vendor/arrow/arrow/parser.py
@@ -0,0 +1,596 @@
+# -*- coding: utf-8 -*-
+from __future__ import absolute_import, unicode_literals
+
+import re
+from datetime import datetime, timedelta
+
+from dateutil import tz
+
+from arrow import locales
+from arrow.util import iso_to_gregorian, next_weekday, normalize_timestamp
+
+try:
+ from functools import lru_cache
+except ImportError: # pragma: no cover
+ from backports.functools_lru_cache import lru_cache # pragma: no cover
+
+
+class ParserError(ValueError):
+ pass
+
+
+# Allows for ParserErrors to be propagated from _build_datetime()
+# when day_of_year errors occur.
+# Before this, the ParserErrors were caught by the try/except in
+# _parse_multiformat() and the appropriate error message was not
+# transmitted to the user.
+class ParserMatchError(ParserError):
+ pass
+
+
+class DateTimeParser(object):
+
+ _FORMAT_RE = re.compile(
+ r"(YYY?Y?|MM?M?M?|Do|DD?D?D?|d?d?d?d|HH?|hh?|mm?|ss?|S+|ZZ?Z?|a|A|x|X|W)"
+ )
+ _ESCAPE_RE = re.compile(r"\[[^\[\]]*\]")
+
+ _ONE_OR_TWO_DIGIT_RE = re.compile(r"\d{1,2}")
+ _ONE_OR_TWO_OR_THREE_DIGIT_RE = re.compile(r"\d{1,3}")
+ _ONE_OR_MORE_DIGIT_RE = re.compile(r"\d+")
+ _TWO_DIGIT_RE = re.compile(r"\d{2}")
+ _THREE_DIGIT_RE = re.compile(r"\d{3}")
+ _FOUR_DIGIT_RE = re.compile(r"\d{4}")
+ _TZ_Z_RE = re.compile(r"([\+\-])(\d{2})(?:(\d{2}))?|Z")
+ _TZ_ZZ_RE = re.compile(r"([\+\-])(\d{2})(?:\:(\d{2}))?|Z")
+ _TZ_NAME_RE = re.compile(r"\w[\w+\-/]+")
+ # NOTE: timestamps cannot be parsed from natural language strings (by removing the ^...$) because it will
+ # break cases like "15 Jul 2000" and a format list (see issue #447)
+ _TIMESTAMP_RE = re.compile(r"^\-?\d+\.?\d+$")
+ _TIMESTAMP_EXPANDED_RE = re.compile(r"^\-?\d+$")
+ _TIME_RE = re.compile(r"^(\d{2})(?:\:?(\d{2}))?(?:\:?(\d{2}))?(?:([\.\,])(\d+))?$")
+ _WEEK_DATE_RE = re.compile(r"(?P\d{4})[\-]?W(?P\d{2})[\-]?(?P\d)?")
+
+ _BASE_INPUT_RE_MAP = {
+ "YYYY": _FOUR_DIGIT_RE,
+ "YY": _TWO_DIGIT_RE,
+ "MM": _TWO_DIGIT_RE,
+ "M": _ONE_OR_TWO_DIGIT_RE,
+ "DDDD": _THREE_DIGIT_RE,
+ "DDD": _ONE_OR_TWO_OR_THREE_DIGIT_RE,
+ "DD": _TWO_DIGIT_RE,
+ "D": _ONE_OR_TWO_DIGIT_RE,
+ "HH": _TWO_DIGIT_RE,
+ "H": _ONE_OR_TWO_DIGIT_RE,
+ "hh": _TWO_DIGIT_RE,
+ "h": _ONE_OR_TWO_DIGIT_RE,
+ "mm": _TWO_DIGIT_RE,
+ "m": _ONE_OR_TWO_DIGIT_RE,
+ "ss": _TWO_DIGIT_RE,
+ "s": _ONE_OR_TWO_DIGIT_RE,
+ "X": _TIMESTAMP_RE,
+ "x": _TIMESTAMP_EXPANDED_RE,
+ "ZZZ": _TZ_NAME_RE,
+ "ZZ": _TZ_ZZ_RE,
+ "Z": _TZ_Z_RE,
+ "S": _ONE_OR_MORE_DIGIT_RE,
+ "W": _WEEK_DATE_RE,
+ }
+
+ SEPARATORS = ["-", "/", "."]
+
+ def __init__(self, locale="en_us", cache_size=0):
+
+ self.locale = locales.get_locale(locale)
+ self._input_re_map = self._BASE_INPUT_RE_MAP.copy()
+ self._input_re_map.update(
+ {
+ "MMMM": self._generate_choice_re(
+ self.locale.month_names[1:], re.IGNORECASE
+ ),
+ "MMM": self._generate_choice_re(
+ self.locale.month_abbreviations[1:], re.IGNORECASE
+ ),
+ "Do": re.compile(self.locale.ordinal_day_re),
+ "dddd": self._generate_choice_re(
+ self.locale.day_names[1:], re.IGNORECASE
+ ),
+ "ddd": self._generate_choice_re(
+ self.locale.day_abbreviations[1:], re.IGNORECASE
+ ),
+ "d": re.compile(r"[1-7]"),
+ "a": self._generate_choice_re(
+ (self.locale.meridians["am"], self.locale.meridians["pm"])
+ ),
+ # note: 'A' token accepts both 'am/pm' and 'AM/PM' formats to
+ # ensure backwards compatibility of this token
+ "A": self._generate_choice_re(self.locale.meridians.values()),
+ }
+ )
+ if cache_size > 0:
+ self._generate_pattern_re = lru_cache(maxsize=cache_size)(
+ self._generate_pattern_re
+ )
+
+ # TODO: since we support more than ISO 8601, we should rename this function
+ # IDEA: break into multiple functions
+ def parse_iso(self, datetime_string, normalize_whitespace=False):
+
+ if normalize_whitespace:
+ datetime_string = re.sub(r"\s+", " ", datetime_string.strip())
+
+ has_space_divider = " " in datetime_string
+ has_t_divider = "T" in datetime_string
+
+ num_spaces = datetime_string.count(" ")
+ if has_space_divider and num_spaces != 1 or has_t_divider and num_spaces > 0:
+ raise ParserError(
+ "Expected an ISO 8601-like string, but was given '{}'. Try passing in a format string to resolve this.".format(
+ datetime_string
+ )
+ )
+
+ has_time = has_space_divider or has_t_divider
+ has_tz = False
+
+ # date formats (ISO 8601 and others) to test against
+ # NOTE: YYYYMM is omitted to avoid confusion with YYMMDD (no longer part of ISO 8601, but is still often used)
+ formats = [
+ "YYYY-MM-DD",
+ "YYYY-M-DD",
+ "YYYY-M-D",
+ "YYYY/MM/DD",
+ "YYYY/M/DD",
+ "YYYY/M/D",
+ "YYYY.MM.DD",
+ "YYYY.M.DD",
+ "YYYY.M.D",
+ "YYYYMMDD",
+ "YYYY-DDDD",
+ "YYYYDDDD",
+ "YYYY-MM",
+ "YYYY/MM",
+ "YYYY.MM",
+ "YYYY",
+ "W",
+ ]
+
+ if has_time:
+
+ if has_space_divider:
+ date_string, time_string = datetime_string.split(" ", 1)
+ else:
+ date_string, time_string = datetime_string.split("T", 1)
+
+ time_parts = re.split(r"[\+\-Z]", time_string, 1, re.IGNORECASE)
+
+ time_components = self._TIME_RE.match(time_parts[0])
+
+ if time_components is None:
+ raise ParserError(
+ "Invalid time component provided. Please specify a format or provide a valid time component in the basic or extended ISO 8601 time format."
+ )
+
+ (
+ hours,
+ minutes,
+ seconds,
+ subseconds_sep,
+ subseconds,
+ ) = time_components.groups()
+
+ has_tz = len(time_parts) == 2
+ has_minutes = minutes is not None
+ has_seconds = seconds is not None
+ has_subseconds = subseconds is not None
+
+ is_basic_time_format = ":" not in time_parts[0]
+ tz_format = "Z"
+
+ # use 'ZZ' token instead since tz offset is present in non-basic format
+ if has_tz and ":" in time_parts[1]:
+ tz_format = "ZZ"
+
+ time_sep = "" if is_basic_time_format else ":"
+
+ if has_subseconds:
+ time_string = "HH{time_sep}mm{time_sep}ss{subseconds_sep}S".format(
+ time_sep=time_sep, subseconds_sep=subseconds_sep
+ )
+ elif has_seconds:
+ time_string = "HH{time_sep}mm{time_sep}ss".format(time_sep=time_sep)
+ elif has_minutes:
+ time_string = "HH{time_sep}mm".format(time_sep=time_sep)
+ else:
+ time_string = "HH"
+
+ if has_space_divider:
+ formats = ["{} {}".format(f, time_string) for f in formats]
+ else:
+ formats = ["{}T{}".format(f, time_string) for f in formats]
+
+ if has_time and has_tz:
+ # Add "Z" or "ZZ" to the format strings to indicate to
+ # _parse_token() that a timezone needs to be parsed
+ formats = ["{}{}".format(f, tz_format) for f in formats]
+
+ return self._parse_multiformat(datetime_string, formats)
+
+ def parse(self, datetime_string, fmt, normalize_whitespace=False):
+
+ if normalize_whitespace:
+ datetime_string = re.sub(r"\s+", " ", datetime_string)
+
+ if isinstance(fmt, list):
+ return self._parse_multiformat(datetime_string, fmt)
+
+ fmt_tokens, fmt_pattern_re = self._generate_pattern_re(fmt)
+
+ match = fmt_pattern_re.search(datetime_string)
+
+ if match is None:
+ raise ParserMatchError(
+ "Failed to match '{}' when parsing '{}'".format(fmt, datetime_string)
+ )
+
+ parts = {}
+ for token in fmt_tokens:
+ if token == "Do":
+ value = match.group("value")
+ elif token == "W":
+ value = (match.group("year"), match.group("week"), match.group("day"))
+ else:
+ value = match.group(token)
+ self._parse_token(token, value, parts)
+
+ return self._build_datetime(parts)
+
+ def _generate_pattern_re(self, fmt):
+
+ # fmt is a string of tokens like 'YYYY-MM-DD'
+ # we construct a new string by replacing each
+ # token by its pattern:
+ # 'YYYY-MM-DD' -> '(?P\d{4})-(?P\d{2})-(?P
\d{2})'
+ tokens = []
+ offset = 0
+
+ # Escape all special RegEx chars
+ escaped_fmt = re.escape(fmt)
+
+ # Extract the bracketed expressions to be reinserted later.
+ escaped_fmt = re.sub(self._ESCAPE_RE, "#", escaped_fmt)
+
+ # Any number of S is the same as one.
+ # TODO: allow users to specify the number of digits to parse
+ escaped_fmt = re.sub(r"S+", "S", escaped_fmt)
+
+ escaped_data = re.findall(self._ESCAPE_RE, fmt)
+
+ fmt_pattern = escaped_fmt
+
+ for m in self._FORMAT_RE.finditer(escaped_fmt):
+ token = m.group(0)
+ try:
+ input_re = self._input_re_map[token]
+ except KeyError:
+ raise ParserError("Unrecognized token '{}'".format(token))
+ input_pattern = "(?P<{}>{})".format(token, input_re.pattern)
+ tokens.append(token)
+ # a pattern doesn't have the same length as the token
+ # it replaces! We keep the difference in the offset variable.
+ # This works because the string is scanned left-to-right and matches
+ # are returned in the order found by finditer.
+ fmt_pattern = (
+ fmt_pattern[: m.start() + offset]
+ + input_pattern
+ + fmt_pattern[m.end() + offset :]
+ )
+ offset += len(input_pattern) - (m.end() - m.start())
+
+ final_fmt_pattern = ""
+ split_fmt = fmt_pattern.split(r"\#")
+
+ # Due to the way Python splits, 'split_fmt' will always be longer
+ for i in range(len(split_fmt)):
+ final_fmt_pattern += split_fmt[i]
+ if i < len(escaped_data):
+ final_fmt_pattern += escaped_data[i][1:-1]
+
+ # Wrap final_fmt_pattern in a custom word boundary to strictly
+ # match the formatting pattern and filter out date and time formats
+ # that include junk such as: blah1998-09-12 blah, blah 1998-09-12blah,
+ # blah1998-09-12blah. The custom word boundary matches every character
+ # that is not a whitespace character to allow for searching for a date
+ # and time string in a natural language sentence. Therefore, searching
+ # for a string of the form YYYY-MM-DD in "blah 1998-09-12 blah" will
+ # work properly.
+ # Certain punctuation before or after the target pattern such as
+ # "1998-09-12," is permitted. For the full list of valid punctuation,
+ # see the documentation.
+
+ starting_word_boundary = (
+ r"(?\s])" # This is the list of punctuation that is ok before the pattern (i.e. "It can't not be these characters before the pattern")
+ r"(\b|^)" # The \b is to block cases like 1201912 but allow 201912 for pattern YYYYMM. The ^ was necessary to allow a negative number through i.e. before epoch numbers
+ )
+ ending_word_boundary = (
+ r"(?=[\,\.\;\:\?\!\"\'\`\[\]\{\}\(\)\<\>]?" # Positive lookahead stating that these punctuation marks can appear after the pattern at most 1 time
+ r"(?!\S))" # Don't allow any non-whitespace character after the punctuation
+ )
+ bounded_fmt_pattern = r"{}{}{}".format(
+ starting_word_boundary, final_fmt_pattern, ending_word_boundary
+ )
+
+ return tokens, re.compile(bounded_fmt_pattern, flags=re.IGNORECASE)
+
+ def _parse_token(self, token, value, parts):
+
+ if token == "YYYY":
+ parts["year"] = int(value)
+
+ elif token == "YY":
+ value = int(value)
+ parts["year"] = 1900 + value if value > 68 else 2000 + value
+
+ elif token in ["MMMM", "MMM"]:
+ parts["month"] = self.locale.month_number(value.lower())
+
+ elif token in ["MM", "M"]:
+ parts["month"] = int(value)
+
+ elif token in ["DDDD", "DDD"]:
+ parts["day_of_year"] = int(value)
+
+ elif token in ["DD", "D"]:
+ parts["day"] = int(value)
+
+ elif token == "Do":
+ parts["day"] = int(value)
+
+ elif token == "dddd":
+ # locale day names are 1-indexed
+ day_of_week = [x.lower() for x in self.locale.day_names].index(
+ value.lower()
+ )
+ parts["day_of_week"] = day_of_week - 1
+
+ elif token == "ddd":
+ # locale day abbreviations are 1-indexed
+ day_of_week = [x.lower() for x in self.locale.day_abbreviations].index(
+ value.lower()
+ )
+ parts["day_of_week"] = day_of_week - 1
+
+ elif token.upper() in ["HH", "H"]:
+ parts["hour"] = int(value)
+
+ elif token in ["mm", "m"]:
+ parts["minute"] = int(value)
+
+ elif token in ["ss", "s"]:
+ parts["second"] = int(value)
+
+ elif token == "S":
+ # We have the *most significant* digits of an arbitrary-precision integer.
+ # We want the six most significant digits as an integer, rounded.
+ # IDEA: add nanosecond support somehow? Need datetime support for it first.
+ value = value.ljust(7, str("0"))
+
+ # floating-point (IEEE-754) defaults to half-to-even rounding
+ seventh_digit = int(value[6])
+ if seventh_digit == 5:
+ rounding = int(value[5]) % 2
+ elif seventh_digit > 5:
+ rounding = 1
+ else:
+ rounding = 0
+
+ parts["microsecond"] = int(value[:6]) + rounding
+
+ elif token == "X":
+ parts["timestamp"] = float(value)
+
+ elif token == "x":
+ parts["expanded_timestamp"] = int(value)
+
+ elif token in ["ZZZ", "ZZ", "Z"]:
+ parts["tzinfo"] = TzinfoParser.parse(value)
+
+ elif token in ["a", "A"]:
+ if value in (self.locale.meridians["am"], self.locale.meridians["AM"]):
+ parts["am_pm"] = "am"
+ elif value in (self.locale.meridians["pm"], self.locale.meridians["PM"]):
+ parts["am_pm"] = "pm"
+
+ elif token == "W":
+ parts["weekdate"] = value
+
+ @staticmethod
+ def _build_datetime(parts):
+
+ weekdate = parts.get("weekdate")
+
+ if weekdate is not None:
+ # we can use strptime (%G, %V, %u) in python 3.6 but these tokens aren't available before that
+ year, week = int(weekdate[0]), int(weekdate[1])
+
+ if weekdate[2] is not None:
+ day = int(weekdate[2])
+ else:
+ # day not given, default to 1
+ day = 1
+
+ dt = iso_to_gregorian(year, week, day)
+ parts["year"] = dt.year
+ parts["month"] = dt.month
+ parts["day"] = dt.day
+
+ timestamp = parts.get("timestamp")
+
+ if timestamp is not None:
+ return datetime.fromtimestamp(timestamp, tz=tz.tzutc())
+
+ expanded_timestamp = parts.get("expanded_timestamp")
+
+ if expanded_timestamp is not None:
+ return datetime.fromtimestamp(
+ normalize_timestamp(expanded_timestamp),
+ tz=tz.tzutc(),
+ )
+
+ day_of_year = parts.get("day_of_year")
+
+ if day_of_year is not None:
+ year = parts.get("year")
+ month = parts.get("month")
+ if year is None:
+ raise ParserError(
+ "Year component is required with the DDD and DDDD tokens."
+ )
+
+ if month is not None:
+ raise ParserError(
+ "Month component is not allowed with the DDD and DDDD tokens."
+ )
+
+ date_string = "{}-{}".format(year, day_of_year)
+ try:
+ dt = datetime.strptime(date_string, "%Y-%j")
+ except ValueError:
+ raise ParserError(
+ "The provided day of year '{}' is invalid.".format(day_of_year)
+ )
+
+ parts["year"] = dt.year
+ parts["month"] = dt.month
+ parts["day"] = dt.day
+
+ day_of_week = parts.get("day_of_week")
+ day = parts.get("day")
+
+ # If day is passed, ignore day of week
+ if day_of_week is not None and day is None:
+ year = parts.get("year", 1970)
+ month = parts.get("month", 1)
+ day = 1
+
+ # dddd => first day of week after epoch
+ # dddd YYYY => first day of week in specified year
+ # dddd MM YYYY => first day of week in specified year and month
+ # dddd MM => first day after epoch in specified month
+ next_weekday_dt = next_weekday(datetime(year, month, day), day_of_week)
+ parts["year"] = next_weekday_dt.year
+ parts["month"] = next_weekday_dt.month
+ parts["day"] = next_weekday_dt.day
+
+ am_pm = parts.get("am_pm")
+ hour = parts.get("hour", 0)
+
+ if am_pm == "pm" and hour < 12:
+ hour += 12
+ elif am_pm == "am" and hour == 12:
+ hour = 0
+
+ # Support for midnight at the end of day
+ if hour == 24:
+ if parts.get("minute", 0) != 0:
+ raise ParserError("Midnight at the end of day must not contain minutes")
+ if parts.get("second", 0) != 0:
+ raise ParserError("Midnight at the end of day must not contain seconds")
+ if parts.get("microsecond", 0) != 0:
+ raise ParserError(
+ "Midnight at the end of day must not contain microseconds"
+ )
+ hour = 0
+ day_increment = 1
+ else:
+ day_increment = 0
+
+ # account for rounding up to 1000000
+ microsecond = parts.get("microsecond", 0)
+ if microsecond == 1000000:
+ microsecond = 0
+ second_increment = 1
+ else:
+ second_increment = 0
+
+ increment = timedelta(days=day_increment, seconds=second_increment)
+
+ return (
+ datetime(
+ year=parts.get("year", 1),
+ month=parts.get("month", 1),
+ day=parts.get("day", 1),
+ hour=hour,
+ minute=parts.get("minute", 0),
+ second=parts.get("second", 0),
+ microsecond=microsecond,
+ tzinfo=parts.get("tzinfo"),
+ )
+ + increment
+ )
+
+ def _parse_multiformat(self, string, formats):
+
+ _datetime = None
+
+ for fmt in formats:
+ try:
+ _datetime = self.parse(string, fmt)
+ break
+ except ParserMatchError:
+ pass
+
+ if _datetime is None:
+ raise ParserError(
+ "Could not match input '{}' to any of the following formats: {}".format(
+ string, ", ".join(formats)
+ )
+ )
+
+ return _datetime
+
+ # generates a capture group of choices separated by an OR operator
+ @staticmethod
+ def _generate_choice_re(choices, flags=0):
+ return re.compile(r"({})".format("|".join(choices)), flags=flags)
+
+
+class TzinfoParser(object):
+ _TZINFO_RE = re.compile(r"^([\+\-])?(\d{2})(?:\:?(\d{2}))?$")
+
+ @classmethod
+ def parse(cls, tzinfo_string):
+
+ tzinfo = None
+
+ if tzinfo_string == "local":
+ tzinfo = tz.tzlocal()
+
+ elif tzinfo_string in ["utc", "UTC", "Z"]:
+ tzinfo = tz.tzutc()
+
+ else:
+
+ iso_match = cls._TZINFO_RE.match(tzinfo_string)
+
+ if iso_match:
+ sign, hours, minutes = iso_match.groups()
+ if minutes is None:
+ minutes = 0
+ seconds = int(hours) * 3600 + int(minutes) * 60
+
+ if sign == "-":
+ seconds *= -1
+
+ tzinfo = tz.tzoffset(None, seconds)
+
+ else:
+ tzinfo = tz.gettz(tzinfo_string)
+
+ if tzinfo is None:
+ raise ParserError(
+ 'Could not parse timezone expression "{}"'.format(tzinfo_string)
+ )
+
+ return tzinfo
diff --git a/openpype/modules/ftrack/python2_vendor/arrow/arrow/util.py b/openpype/modules/ftrack/python2_vendor/arrow/arrow/util.py
new file mode 100644
index 0000000000..acce8878df
--- /dev/null
+++ b/openpype/modules/ftrack/python2_vendor/arrow/arrow/util.py
@@ -0,0 +1,115 @@
+# -*- coding: utf-8 -*-
+from __future__ import absolute_import
+
+import datetime
+import numbers
+
+from dateutil.rrule import WEEKLY, rrule
+
+from arrow.constants import MAX_TIMESTAMP, MAX_TIMESTAMP_MS, MAX_TIMESTAMP_US
+
+
+def next_weekday(start_date, weekday):
+ """Get next weekday from the specified start date.
+
+ :param start_date: Datetime object representing the start date.
+ :param weekday: Next weekday to obtain. Can be a value between 0 (Monday) and 6 (Sunday).
+ :return: Datetime object corresponding to the next weekday after start_date.
+
+ Usage::
+
+ # Get first Monday after epoch
+ >>> next_weekday(datetime(1970, 1, 1), 0)
+ 1970-01-05 00:00:00
+
+ # Get first Thursday after epoch
+ >>> next_weekday(datetime(1970, 1, 1), 3)
+ 1970-01-01 00:00:00
+
+ # Get first Sunday after epoch
+ >>> next_weekday(datetime(1970, 1, 1), 6)
+ 1970-01-04 00:00:00
+ """
+ if weekday < 0 or weekday > 6:
+ raise ValueError("Weekday must be between 0 (Monday) and 6 (Sunday).")
+ return rrule(freq=WEEKLY, dtstart=start_date, byweekday=weekday, count=1)[0]
+
+
+def total_seconds(td):
+ """Get total seconds for timedelta."""
+ return td.total_seconds()
+
+
+def is_timestamp(value):
+ """Check if value is a valid timestamp."""
+ if isinstance(value, bool):
+ return False
+ if not (
+ isinstance(value, numbers.Integral)
+ or isinstance(value, float)
+ or isinstance(value, str)
+ ):
+ return False
+ try:
+ float(value)
+ return True
+ except ValueError:
+ return False
+
+
+def normalize_timestamp(timestamp):
+ """Normalize millisecond and microsecond timestamps into normal timestamps."""
+ if timestamp > MAX_TIMESTAMP:
+ if timestamp < MAX_TIMESTAMP_MS:
+ timestamp /= 1e3
+ elif timestamp < MAX_TIMESTAMP_US:
+ timestamp /= 1e6
+ else:
+ raise ValueError(
+ "The specified timestamp '{}' is too large.".format(timestamp)
+ )
+ return timestamp
+
+
+# Credit to https://stackoverflow.com/a/1700069
+def iso_to_gregorian(iso_year, iso_week, iso_day):
+ """Converts an ISO week date tuple into a datetime object."""
+
+ if not 1 <= iso_week <= 53:
+ raise ValueError("ISO Calendar week value must be between 1-53.")
+
+ if not 1 <= iso_day <= 7:
+ raise ValueError("ISO Calendar day value must be between 1-7")
+
+ # The first week of the year always contains 4 Jan.
+ fourth_jan = datetime.date(iso_year, 1, 4)
+ delta = datetime.timedelta(fourth_jan.isoweekday() - 1)
+ year_start = fourth_jan - delta
+ gregorian = year_start + datetime.timedelta(days=iso_day - 1, weeks=iso_week - 1)
+
+ return gregorian
+
+
+def validate_bounds(bounds):
+ if bounds != "()" and bounds != "(]" and bounds != "[)" and bounds != "[]":
+ raise ValueError(
+ 'Invalid bounds. Please select between "()", "(]", "[)", or "[]".'
+ )
+
+
+# Python 2.7 / 3.0+ definitions for isstr function.
+
+try: # pragma: no cover
+ basestring
+
+ def isstr(s):
+ return isinstance(s, basestring) # noqa: F821
+
+
+except NameError: # pragma: no cover
+
+ def isstr(s):
+ return isinstance(s, str)
+
+
+__all__ = ["next_weekday", "total_seconds", "is_timestamp", "isstr", "iso_to_gregorian"]
diff --git a/openpype/modules/ftrack/python2_vendor/arrow/docs/Makefile b/openpype/modules/ftrack/python2_vendor/arrow/docs/Makefile
new file mode 100644
index 0000000000..d4bb2cbb9e
--- /dev/null
+++ b/openpype/modules/ftrack/python2_vendor/arrow/docs/Makefile
@@ -0,0 +1,20 @@
+# Minimal makefile for Sphinx documentation
+#
+
+# You can set these variables from the command line, and also
+# from the environment for the first two.
+SPHINXOPTS ?=
+SPHINXBUILD ?= sphinx-build
+SOURCEDIR = .
+BUILDDIR = _build
+
+# Put it first so that "make" without argument is like "make help".
+help:
+ @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
+
+.PHONY: help Makefile
+
+# Catch-all target: route all unknown targets to Sphinx using the new
+# "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS).
+%: Makefile
+ @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
diff --git a/openpype/modules/ftrack/python2_vendor/arrow/docs/conf.py b/openpype/modules/ftrack/python2_vendor/arrow/docs/conf.py
new file mode 100644
index 0000000000..aaf3c50822
--- /dev/null
+++ b/openpype/modules/ftrack/python2_vendor/arrow/docs/conf.py
@@ -0,0 +1,62 @@
+# -*- coding: utf-8 -*-
+
+# -- Path setup --------------------------------------------------------------
+
+import io
+import os
+import sys
+
+sys.path.insert(0, os.path.abspath(".."))
+
+about = {}
+with io.open("../arrow/_version.py", "r", encoding="utf-8") as f:
+ exec(f.read(), about)
+
+# -- Project information -----------------------------------------------------
+
+project = u"Arrow 🏹"
+copyright = "2020, Chris Smith"
+author = "Chris Smith"
+
+release = about["__version__"]
+
+# -- General configuration ---------------------------------------------------
+
+extensions = ["sphinx.ext.autodoc"]
+
+templates_path = []
+
+exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"]
+
+master_doc = "index"
+source_suffix = ".rst"
+pygments_style = "sphinx"
+
+language = None
+
+# -- Options for HTML output -------------------------------------------------
+
+html_theme = "alabaster"
+html_theme_path = []
+html_static_path = []
+
+html_show_sourcelink = False
+html_show_sphinx = False
+html_show_copyright = True
+
+# https://alabaster.readthedocs.io/en/latest/customization.html
+html_theme_options = {
+ "description": "Arrow is a sensible and human-friendly approach to dates, times and timestamps.",
+ "github_user": "arrow-py",
+ "github_repo": "arrow",
+ "github_banner": True,
+ "show_related": False,
+ "show_powered_by": False,
+ "github_button": True,
+ "github_type": "star",
+ "github_count": "true", # must be a string
+}
+
+html_sidebars = {
+ "**": ["about.html", "localtoc.html", "relations.html", "searchbox.html"]
+}
diff --git a/openpype/modules/ftrack/python2_vendor/arrow/docs/index.rst b/openpype/modules/ftrack/python2_vendor/arrow/docs/index.rst
new file mode 100644
index 0000000000..e2830b04f3
--- /dev/null
+++ b/openpype/modules/ftrack/python2_vendor/arrow/docs/index.rst
@@ -0,0 +1,566 @@
+Arrow: Better dates & times for Python
+======================================
+
+Release v\ |release| (`Installation`_) (`Changelog `_)
+
+.. include:: ../README.rst
+ :start-after: start-inclusion-marker-do-not-remove
+ :end-before: end-inclusion-marker-do-not-remove
+
+User's Guide
+------------
+
+Creation
+~~~~~~~~
+
+Get 'now' easily:
+
+.. code-block:: python
+
+ >>> arrow.utcnow()
+
+
+ >>> arrow.now()
+
+
+ >>> arrow.now('US/Pacific')
+
+
+Create from timestamps (:code:`int` or :code:`float`):
+
+.. code-block:: python
+
+ >>> arrow.get(1367900664)
+
+
+ >>> arrow.get(1367900664.152325)
+