diff --git a/.github/workflows/prerelease.yml b/.github/workflows/prerelease.yml
index d0853e74d6..82f9a6ae9d 100644
--- a/.github/workflows/prerelease.yml
+++ b/.github/workflows/prerelease.yml
@@ -47,7 +47,7 @@ jobs:
enhancementLabel: '**π Enhancements**'
bugsLabel: '**π Bug fixes**'
deprecatedLabel: '**β οΈ Deprecations**'
- addSections: '{"documentation":{"prefix":"### π Documentation","labels":["documentation"]},"tests":{"prefix":"### β
Testing","labels":["tests"]}}'
+ addSections: '{"documentation":{"prefix":"### π Documentation","labels":["documentation"]},"tests":{"prefix":"### β
Testing","labels":["tests"]},"feature":{"prefix":"### π New features","labels":["feature"]},}'
issues: false
issuesWoLabels: false
sinceTag: "3.0.0"
diff --git a/.gitmodules b/.gitmodules
index 6c12c76b16..b5230a9190 100644
--- a/.gitmodules
+++ b/.gitmodules
@@ -4,15 +4,15 @@
[submodule "repos/avalon-unreal-integration"]
path = repos/avalon-unreal-integration
url = https://github.com/pypeclub/avalon-unreal-integration.git
-[submodule "openpype/modules/ftrack/python2_vendor/ftrack-python-api"]
- path = openpype/modules/ftrack/python2_vendor/ftrack-python-api
- url = https://bitbucket.org/ftrack/ftrack-python-api.git
-[submodule "openpype/modules/ftrack/python2_vendor/arrow"]
- path = openpype/modules/ftrack/python2_vendor/arrow
+[submodule "openpype/modules/default_modules/ftrack/python2_vendor/arrow"]
+ path = openpype/modules/default_modules/ftrack/python2_vendor/arrow
url = https://github.com/arrow-py/arrow.git
+[submodule "openpype/modules/default_modules/ftrack/python2_vendor/ftrack-python-api"]
+ path = openpype/modules/default_modules/ftrack/python2_vendor/ftrack-python-api
+ url = https://bitbucket.org/ftrack/ftrack-python-api.git
[submodule "vendor/powershell/BurntToast"]
path = vendor/powershell/BurntToast
url = https://github.com/Windos/BurntToast.git
[submodule "vendor/powershell/PSWriteColor"]
path = vendor/powershell/PSWriteColor
- url = "https://github.com/EvotecIT/PSWriteColor.git"
\ No newline at end of file
+ url = https://github.com/EvotecIT/PSWriteColor.git
\ No newline at end of file
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 0f2cb2b1ab..e1737458b2 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,106 +1,96 @@
# Changelog
-## [3.3.0-nightly.9](https://github.com/pypeclub/OpenPype/tree/HEAD)
+## [3.4.0-nightly.4](https://github.com/pypeclub/OpenPype/tree/HEAD)
-[Full Changelog](https://github.com/pypeclub/OpenPype/compare/3.2.0...HEAD)
+[Full Changelog](https://github.com/pypeclub/OpenPype/compare/3.3.1...HEAD)
-**π Enhancements**
+**Merged pull requests:**
+- Ftrack: Fix hosts attribute in collect ftrack username [\#1972](https://github.com/pypeclub/OpenPype/pull/1972)
+- Removed deprecated submodules [\#1967](https://github.com/pypeclub/OpenPype/pull/1967)
+- Launcher: Fix crashes on action click [\#1964](https://github.com/pypeclub/OpenPype/pull/1964)
+- Settings: Minor fixes in UI and missing default values [\#1963](https://github.com/pypeclub/OpenPype/pull/1963)
+- Blender: Toggle system console works on windows [\#1962](https://github.com/pypeclub/OpenPype/pull/1962)
+- Resolve path when adding to zip [\#1960](https://github.com/pypeclub/OpenPype/pull/1960)
+- Bump url-parse from 1.5.1 to 1.5.3 in /website [\#1958](https://github.com/pypeclub/OpenPype/pull/1958)
+- Global: Avalon Host name collector [\#1949](https://github.com/pypeclub/OpenPype/pull/1949)
+- Global: Define hosts in CollectSceneVersion [\#1948](https://github.com/pypeclub/OpenPype/pull/1948)
+- Maya: Add Xgen family support [\#1947](https://github.com/pypeclub/OpenPype/pull/1947)
+- Add face sets to exported alembics [\#1942](https://github.com/pypeclub/OpenPype/pull/1942)
+- Bump path-parse from 1.0.6 to 1.0.7 in /website [\#1933](https://github.com/pypeclub/OpenPype/pull/1933)
+- \#1894 - adds host to template\_name\_profiles for filtering [\#1915](https://github.com/pypeclub/OpenPype/pull/1915)
+- Environments: Tool environments in alphabetical order [\#1910](https://github.com/pypeclub/OpenPype/pull/1910)
+- Disregard publishing time. [\#1888](https://github.com/pypeclub/OpenPype/pull/1888)
+- Feature/webpublisher backend [\#1876](https://github.com/pypeclub/OpenPype/pull/1876)
+- Dynamic modules [\#1872](https://github.com/pypeclub/OpenPype/pull/1872)
+- Houdini: add Camera, Point Cache, Composite, Redshift ROP and VDB Cache support [\#1821](https://github.com/pypeclub/OpenPype/pull/1821)
+
+## [3.3.1](https://github.com/pypeclub/OpenPype/tree/3.3.1) (2021-08-20)
+
+[Full Changelog](https://github.com/pypeclub/OpenPype/compare/CI/3.3.1-nightly.1...3.3.1)
+
+**Merged pull requests:**
+
+- TVPaint: Fixed rendered frame indexes [\#1946](https://github.com/pypeclub/OpenPype/pull/1946)
+- Maya: Menu actions fix [\#1945](https://github.com/pypeclub/OpenPype/pull/1945)
+- standalone: editorial shared object problem [\#1941](https://github.com/pypeclub/OpenPype/pull/1941)
+- Bugfix nuke deadline app name [\#1928](https://github.com/pypeclub/OpenPype/pull/1928)
+
+## [3.3.0](https://github.com/pypeclub/OpenPype/tree/3.3.0) (2021-08-17)
+
+[Full Changelog](https://github.com/pypeclub/OpenPype/compare/CI/3.3.0-nightly.11...3.3.0)
+
+**Merged pull requests:**
+
+- Python console interpreter [\#1940](https://github.com/pypeclub/OpenPype/pull/1940)
+- Fix - make AE workfile publish to Ftrack configurable [\#1937](https://github.com/pypeclub/OpenPype/pull/1937)
+- Fix - ftrack family was added incorrectly in some cases [\#1935](https://github.com/pypeclub/OpenPype/pull/1935)
+- Settings UI: Breadcrumbs in settings [\#1932](https://github.com/pypeclub/OpenPype/pull/1932)
+- Fix - Deadline publish on Linux started Tray instead of headless publishing [\#1930](https://github.com/pypeclub/OpenPype/pull/1930)
+- Maya: Validate Model Name - repair accident deletion in settings defaults [\#1929](https://github.com/pypeclub/OpenPype/pull/1929)
+- Global: Updated logos and Default settings [\#1927](https://github.com/pypeclub/OpenPype/pull/1927)
+- Nuke: submit to farm failed due `ftrack` family remove [\#1926](https://github.com/pypeclub/OpenPype/pull/1926)
+- Check for missing β¨ Python when using `pyenv` [\#1925](https://github.com/pypeclub/OpenPype/pull/1925)
+- Maya: Scene patching π©Ήon submission to Deadline [\#1923](https://github.com/pypeclub/OpenPype/pull/1923)
+- Fix - validate takes repre\["files"\] as list all the time [\#1922](https://github.com/pypeclub/OpenPype/pull/1922)
+- Settings: Default values for enum [\#1920](https://github.com/pypeclub/OpenPype/pull/1920)
+- Settings UI: Modifiable dict view enhance [\#1919](https://github.com/pypeclub/OpenPype/pull/1919)
+- standalone: validator asset parents [\#1917](https://github.com/pypeclub/OpenPype/pull/1917)
+- Nuke: update video file crassing [\#1916](https://github.com/pypeclub/OpenPype/pull/1916)
+- Fix - texture validators for workfiles triggers only for textures workfiles [\#1914](https://github.com/pypeclub/OpenPype/pull/1914)
- submodules: avalon-core update [\#1911](https://github.com/pypeclub/OpenPype/pull/1911)
+- Settings UI: List order works as expected [\#1906](https://github.com/pypeclub/OpenPype/pull/1906)
+- Add support for multiple Deadline β οΈβ servers [\#1905](https://github.com/pypeclub/OpenPype/pull/1905)
+- Hiero: loaded clip was not set colorspace from version data [\#1904](https://github.com/pypeclub/OpenPype/pull/1904)
+- Pyblish UI: Fix collecting stage processing [\#1903](https://github.com/pypeclub/OpenPype/pull/1903)
+- Burnins: Use input's bitrate in h624 [\#1902](https://github.com/pypeclub/OpenPype/pull/1902)
- Feature AE local render [\#1901](https://github.com/pypeclub/OpenPype/pull/1901)
- Ftrack: Where I run action enhancement [\#1900](https://github.com/pypeclub/OpenPype/pull/1900)
- Ftrack: Private project server actions [\#1899](https://github.com/pypeclub/OpenPype/pull/1899)
- Support nested studio plugins paths. [\#1898](https://github.com/pypeclub/OpenPype/pull/1898)
+- Bug: fixed python detection [\#1893](https://github.com/pypeclub/OpenPype/pull/1893)
- Settings: global validators with options [\#1892](https://github.com/pypeclub/OpenPype/pull/1892)
- Settings: Conditional dict enum positioning [\#1891](https://github.com/pypeclub/OpenPype/pull/1891)
+- global: integrate name missing default template [\#1890](https://github.com/pypeclub/OpenPype/pull/1890)
+- publisher: editorial plugins fixes [\#1889](https://github.com/pypeclub/OpenPype/pull/1889)
- Expose stop timer through rest api. [\#1886](https://github.com/pypeclub/OpenPype/pull/1886)
- TVPaint: Increment workfile [\#1885](https://github.com/pypeclub/OpenPype/pull/1885)
- Allow Multiple Notes to run on tasks. [\#1882](https://github.com/pypeclub/OpenPype/pull/1882)
+- Normalize path returned from Workfiles. [\#1880](https://github.com/pypeclub/OpenPype/pull/1880)
- Prepare for pyside2 [\#1869](https://github.com/pypeclub/OpenPype/pull/1869)
- Filter hosts in settings host-enum [\#1868](https://github.com/pypeclub/OpenPype/pull/1868)
- Local actions with process identifier [\#1867](https://github.com/pypeclub/OpenPype/pull/1867)
- Workfile tool start at host launch support [\#1865](https://github.com/pypeclub/OpenPype/pull/1865)
-- Anatomy schema validation [\#1864](https://github.com/pypeclub/OpenPype/pull/1864)
-- Ftrack prepare project structure [\#1861](https://github.com/pypeclub/OpenPype/pull/1861)
-- Independent general environments [\#1853](https://github.com/pypeclub/OpenPype/pull/1853)
-- TVPaint Start Frame [\#1844](https://github.com/pypeclub/OpenPype/pull/1844)
-- Ftrack push attributes action adds traceback to job [\#1843](https://github.com/pypeclub/OpenPype/pull/1843)
-- Prepare project action enhance [\#1838](https://github.com/pypeclub/OpenPype/pull/1838)
-- Standalone Publish of textures family [\#1834](https://github.com/pypeclub/OpenPype/pull/1834)
-- nuke: settings create missing default subsets [\#1829](https://github.com/pypeclub/OpenPype/pull/1829)
-- Update poetry lock [\#1823](https://github.com/pypeclub/OpenPype/pull/1823)
-- Settings: settings for plugins [\#1819](https://github.com/pypeclub/OpenPype/pull/1819)
-- Settings list can use template or schema as object type [\#1815](https://github.com/pypeclub/OpenPype/pull/1815)
-- Maya: Deadline custom settings [\#1797](https://github.com/pypeclub/OpenPype/pull/1797)
-- Maya: Shader name validation [\#1762](https://github.com/pypeclub/OpenPype/pull/1762)
-
-**π Bug fixes**
-
-- Nuke: update video file crassing [\#1916](https://github.com/pypeclub/OpenPype/pull/1916)
-- Fix - texture validators for workfiles triggers only for textures workfiles [\#1914](https://github.com/pypeclub/OpenPype/pull/1914)
-- Settings UI: List order works as expected [\#1906](https://github.com/pypeclub/OpenPype/pull/1906)
-- Hiero: loaded clip was not set colorspace from version data [\#1904](https://github.com/pypeclub/OpenPype/pull/1904)
-- Pyblish UI: Fix collecting stage processing [\#1903](https://github.com/pypeclub/OpenPype/pull/1903)
-- Burnins: Use input's bitrate in h624 [\#1902](https://github.com/pypeclub/OpenPype/pull/1902)
-- Bug: fixed python detection [\#1893](https://github.com/pypeclub/OpenPype/pull/1893)
-- global: integrate name missing default template [\#1890](https://github.com/pypeclub/OpenPype/pull/1890)
-- publisher: editorial plugins fixes [\#1889](https://github.com/pypeclub/OpenPype/pull/1889)
-- Normalize path returned from Workfiles. [\#1880](https://github.com/pypeclub/OpenPype/pull/1880)
-- Workfiles tool event arguments fix [\#1862](https://github.com/pypeclub/OpenPype/pull/1862)
-- imageio: fix grouping [\#1856](https://github.com/pypeclub/OpenPype/pull/1856)
-- publisher: missing version in subset prop [\#1849](https://github.com/pypeclub/OpenPype/pull/1849)
-- Ftrack type error fix in sync to avalon event handler [\#1845](https://github.com/pypeclub/OpenPype/pull/1845)
-- Nuke: updating effects subset fail [\#1841](https://github.com/pypeclub/OpenPype/pull/1841)
-- nuke: write render node skipped with crop [\#1836](https://github.com/pypeclub/OpenPype/pull/1836)
-- Project folder structure overrides [\#1813](https://github.com/pypeclub/OpenPype/pull/1813)
-- Maya: fix yeti settings path in extractor [\#1809](https://github.com/pypeclub/OpenPype/pull/1809)
-- Failsafe for cross project containers. [\#1806](https://github.com/pypeclub/OpenPype/pull/1806)
-- Settings error dialog on show [\#1798](https://github.com/pypeclub/OpenPype/pull/1798)
-
-**Merged pull requests:**
-
- Maya: add support for `RedshiftNormalMap` node, fix `tx` linear space π [\#1863](https://github.com/pypeclub/OpenPype/pull/1863)
-- Add support for pyenv-win on windows [\#1822](https://github.com/pypeclub/OpenPype/pull/1822)
-- PS, AE - send actual context when another webserver is running [\#1811](https://github.com/pypeclub/OpenPype/pull/1811)
+- Workfiles tool event arguments fix [\#1862](https://github.com/pypeclub/OpenPype/pull/1862)
+- Maya: support for configurable `dirmap` πΊοΈ [\#1859](https://github.com/pypeclub/OpenPype/pull/1859)
+- Maya: don't add reference members as connections to the container set π¦ [\#1855](https://github.com/pypeclub/OpenPype/pull/1855)
+- Settings list can use template or schema as object type [\#1815](https://github.com/pypeclub/OpenPype/pull/1815)
## [3.2.0](https://github.com/pypeclub/OpenPype/tree/3.2.0) (2021-07-13)
[Full Changelog](https://github.com/pypeclub/OpenPype/compare/CI/3.2.0-nightly.7...3.2.0)
-**π Enhancements**
-
-- Nuke: ftrack family plugin settings preset [\#1805](https://github.com/pypeclub/OpenPype/pull/1805)
-- Standalone publisher last project [\#1799](https://github.com/pypeclub/OpenPype/pull/1799)
-- Ftrack Multiple notes as server action [\#1795](https://github.com/pypeclub/OpenPype/pull/1795)
-- Settings conditional dict [\#1777](https://github.com/pypeclub/OpenPype/pull/1777)
-- Settings application use python 2 only where needed [\#1776](https://github.com/pypeclub/OpenPype/pull/1776)
-- Settings UI copy/paste [\#1769](https://github.com/pypeclub/OpenPype/pull/1769)
-- Workfile tool widths [\#1766](https://github.com/pypeclub/OpenPype/pull/1766)
-- Push hierarchical attributes care about task parent changes [\#1763](https://github.com/pypeclub/OpenPype/pull/1763)
-- Application executables with environment variables [\#1757](https://github.com/pypeclub/OpenPype/pull/1757)
-- Deadline: Nuke submission additional attributes [\#1756](https://github.com/pypeclub/OpenPype/pull/1756)
-
-**π Bug fixes**
-
-- nuke: fixing wrong name of family folder when `used existing frames` [\#1803](https://github.com/pypeclub/OpenPype/pull/1803)
-- Collect ftrack family bugs [\#1801](https://github.com/pypeclub/OpenPype/pull/1801)
-- Invitee email can be None which break the Ftrack commit. [\#1788](https://github.com/pypeclub/OpenPype/pull/1788)
-- Fix: staging and `--use-version` option [\#1786](https://github.com/pypeclub/OpenPype/pull/1786)
-- Otio unrelated error on import [\#1782](https://github.com/pypeclub/OpenPype/pull/1782)
-- FFprobe streams order [\#1775](https://github.com/pypeclub/OpenPype/pull/1775)
-- Fix - single file files are str only, cast it to list to count properly [\#1772](https://github.com/pypeclub/OpenPype/pull/1772)
-- Environments in app executable for MacOS [\#1768](https://github.com/pypeclub/OpenPype/pull/1768)
-- Project specific environments [\#1767](https://github.com/pypeclub/OpenPype/pull/1767)
-- Settings UI with refresh button [\#1764](https://github.com/pypeclub/OpenPype/pull/1764)
-- Standalone publisher thumbnail extractor fix [\#1761](https://github.com/pypeclub/OpenPype/pull/1761)
-- Anatomy others templates don't cause crash [\#1758](https://github.com/pypeclub/OpenPype/pull/1758)
-
-**Merged pull requests:**
-
-- Build: don't add Poetry to `PATH` [\#1808](https://github.com/pypeclub/OpenPype/pull/1808)
-- Bump prismjs from 1.23.0 to 1.24.0 in /website [\#1773](https://github.com/pypeclub/OpenPype/pull/1773)
-- Bc/fix/docs [\#1771](https://github.com/pypeclub/OpenPype/pull/1771)
-
## [2.18.4](https://github.com/pypeclub/OpenPype/tree/2.18.4) (2021-06-24)
[Full Changelog](https://github.com/pypeclub/OpenPype/compare/2.18.3...2.18.4)
diff --git a/igniter/bootstrap_repos.py b/igniter/bootstrap_repos.py
index 8c081b8614..b49a2f6e7f 100644
--- a/igniter/bootstrap_repos.py
+++ b/igniter/bootstrap_repos.py
@@ -508,7 +508,7 @@ class BootstrapRepos:
processed_path = file
self._print(f"- processing {processed_path}")
- zip_file.write(file, file.relative_to(openpype_root))
+ zip_file.write(file, file.resolve().relative_to(openpype_root))
# test if zip is ok
zip_file.testzip()
diff --git a/openpype/__init__.py b/openpype/__init__.py
index e7462e14e9..9d55006a67 100644
--- a/openpype/__init__.py
+++ b/openpype/__init__.py
@@ -68,6 +68,10 @@ def patched_discover(superclass):
def install():
"""Install Pype to Avalon."""
from pyblish.lib import MessageHandler
+ from openpype.modules import load_modules
+
+ # Make sure modules are loaded
+ load_modules()
def modified_emit(obj, record):
"""Method replacing `emit` in Pyblish's MessageHandler."""
diff --git a/openpype/cli.py b/openpype/cli.py
index ec5b04c468..c446d5e443 100644
--- a/openpype/cli.py
+++ b/openpype/cli.py
@@ -94,6 +94,31 @@ def eventserver(debug,
)
+@main.command()
+@click.option("-d", "--debug", is_flag=True, help="Print debug messages")
+@click.option("-h", "--host", help="Host", default=None)
+@click.option("-p", "--port", help="Port", default=None)
+@click.option("-e", "--executable", help="Executable")
+@click.option("-u", "--upload_dir", help="Upload dir")
+def webpublisherwebserver(debug, executable, upload_dir, host=None, port=None):
+ """Starts webserver for communication with Webpublish FR via command line
+
+ OP must be congigured on a machine, eg. OPENPYPE_MONGO filled AND
+ FTRACK_BOT_API_KEY provided with api key from Ftrack.
+
+ Expect "pype.club" user created on Ftrack.
+ """
+ if debug:
+ os.environ['OPENPYPE_DEBUG'] = "3"
+
+ PypeCommands().launch_webpublisher_webservercli(
+ upload_dir=upload_dir,
+ executable=executable,
+ host=host,
+ port=port
+ )
+
+
@main.command()
@click.argument("output_json_path")
@click.option("--project", help="Project name", default=None)
@@ -131,6 +156,25 @@ def publish(debug, paths, targets):
PypeCommands.publish(list(paths), targets)
+@main.command()
+@click.argument("path")
+@click.option("-d", "--debug", is_flag=True, help="Print debug messages")
+@click.option("-h", "--host", help="Host")
+@click.option("-u", "--user", help="User email address")
+@click.option("-p", "--project", help="Project")
+@click.option("-t", "--targets", help="Targets", default=None,
+ multiple=True)
+def remotepublish(debug, project, path, host, targets=None, user=None):
+ """Start CLI publishing.
+
+ Publish collects json from paths provided as an argument.
+ More than one path is allowed.
+ """
+ if debug:
+ os.environ['OPENPYPE_DEBUG'] = '3'
+ PypeCommands.remotepublish(project, path, host, user, targets=targets)
+
+
@main.command()
@click.option("-d", "--debug", is_flag=True, help="Print debug messages")
@click.option("-p", "--project", required=True,
diff --git a/openpype/hosts/aftereffects/plugins/publish/collect_workfile.py b/openpype/hosts/aftereffects/plugins/publish/collect_workfile.py
index 2f8f9ae91b..c1c2be4855 100644
--- a/openpype/hosts/aftereffects/plugins/publish/collect_workfile.py
+++ b/openpype/hosts/aftereffects/plugins/publish/collect_workfile.py
@@ -47,7 +47,7 @@ class CollectWorkfile(pyblish.api.ContextPlugin):
"subset": subset,
"label": scene_file,
"family": family,
- "families": [family, "ftrack"],
+ "families": [family],
"representations": list()
})
diff --git a/openpype/hosts/blender/api/plugin.py b/openpype/hosts/blender/api/plugin.py
index de30da3319..50b73ade2b 100644
--- a/openpype/hosts/blender/api/plugin.py
+++ b/openpype/hosts/blender/api/plugin.py
@@ -5,11 +5,12 @@ from typing import Dict, List, Optional
import bpy
-from avalon import api
-import avalon.blender
+from avalon import api, blender
+from avalon.blender import ops
+from avalon.blender.pipeline import AVALON_CONTAINERS
from openpype.api import PypeCreatorMixin
-VALID_EXTENSIONS = [".blend", ".json", ".abc"]
+VALID_EXTENSIONS = [".blend", ".json", ".abc", ".fbx"]
def asset_name(
@@ -27,32 +28,24 @@ def get_unique_number(
asset: str, subset: str
) -> str:
"""Return a unique number based on the asset name."""
- avalon_containers = [
- c for c in bpy.data.collections
- if c.name == 'AVALON_CONTAINERS'
- ]
- containers = []
- # First, add the children of avalon containers
- for c in avalon_containers:
- containers.extend(c.children)
- # then keep looping to include all the children
- for c in containers:
- containers.extend(c.children)
- container_names = [
- c.name for c in containers
- ]
+ avalon_container = bpy.data.collections.get(AVALON_CONTAINERS)
+ if not avalon_container:
+ return "01"
+ asset_groups = avalon_container.all_objects
+
+ container_names = [c.name for c in asset_groups if c.type == 'EMPTY']
count = 1
- name = f"{asset}_{count:0>2}_{subset}_CON"
+ name = f"{asset}_{count:0>2}_{subset}"
while name in container_names:
count += 1
- name = f"{asset}_{count:0>2}_{subset}_CON"
+ name = f"{asset}_{count:0>2}_{subset}"
return f"{count:0>2}"
def prepare_data(data, container_name):
name = data.name
local_data = data.make_local()
- local_data.name = f"{name}:{container_name}"
+ local_data.name = f"{container_name}:{name}"
return local_data
@@ -102,7 +95,7 @@ def get_local_collection_with_name(name):
return None
-class Creator(PypeCreatorMixin, avalon.blender.Creator):
+class Creator(PypeCreatorMixin, blender.Creator):
pass
@@ -173,6 +166,16 @@ class AssetLoader(api.Loader):
name: Optional[str] = None,
namespace: Optional[str] = None,
options: Optional[Dict] = None) -> Optional[bpy.types.Collection]:
+ """ Run the loader on Blender main thread"""
+ mti = ops.MainThreadItem(self._load, context, name, namespace, options)
+ ops.execute_in_main_thread(mti)
+
+ def _load(self,
+ context: dict,
+ name: Optional[str] = None,
+ namespace: Optional[str] = None,
+ options: Optional[Dict] = None
+ ) -> Optional[bpy.types.Collection]:
"""Load asset via database
Arguments:
@@ -218,16 +221,26 @@ class AssetLoader(api.Loader):
# loader=self.__class__.__name__,
# )
- asset = context["asset"]["name"]
- subset = context["subset"]["name"]
- instance_name = asset_name(asset, subset, unique_number) + '_CON'
+ # asset = context["asset"]["name"]
+ # subset = context["subset"]["name"]
+ # instance_name = asset_name(asset, subset, unique_number) + '_CON'
- return self._get_instance_collection(instance_name, nodes)
+ # return self._get_instance_collection(instance_name, nodes)
+
+ def exec_update(self, container: Dict, representation: Dict):
+ """Must be implemented by a sub-class"""
+ raise NotImplementedError("Must be implemented by a sub-class")
def update(self, container: Dict, representation: Dict):
+ """ Run the update on Blender main thread"""
+ mti = ops.MainThreadItem(self.exec_update, container, representation)
+ ops.execute_in_main_thread(mti)
+
+ def exec_remove(self, container: Dict) -> bool:
"""Must be implemented by a sub-class"""
raise NotImplementedError("Must be implemented by a sub-class")
def remove(self, container: Dict) -> bool:
- """Must be implemented by a sub-class"""
- raise NotImplementedError("Must be implemented by a sub-class")
+ """ Run the remove on Blender main thread"""
+ mti = ops.MainThreadItem(self.exec_remove, container)
+ ops.execute_in_main_thread(mti)
diff --git a/openpype/hosts/blender/hooks/pre_windows_console.py b/openpype/hosts/blender/hooks/pre_windows_console.py
new file mode 100644
index 0000000000..d6be45b225
--- /dev/null
+++ b/openpype/hosts/blender/hooks/pre_windows_console.py
@@ -0,0 +1,28 @@
+import subprocess
+from openpype.lib import PreLaunchHook
+
+
+class BlenderConsoleWindows(PreLaunchHook):
+ """Foundry applications have specific way how to launch them.
+
+ Blender is executed "like" python process so it is required to pass
+ `CREATE_NEW_CONSOLE` flag on windows to trigger creation of new console.
+ At the same time the newly created console won't create it's own stdout
+ and stderr handlers so they should not be redirected to DEVNULL.
+ """
+
+ # Should be as last hook because must change launch arguments to string
+ order = 1000
+ app_groups = ["blender"]
+ platforms = ["windows"]
+
+ def execute(self):
+ # Change `creationflags` to CREATE_NEW_CONSOLE
+ # - on Windows will blender create new window using it's console
+ # Set `stdout` and `stderr` to None so new created console does not
+ # have redirected output to DEVNULL in build
+ self.launch_context.kwargs.update({
+ "creationflags": subprocess.CREATE_NEW_CONSOLE,
+ "stdout": None,
+ "stderr": None
+ })
diff --git a/openpype/hosts/blender/plugins/create/create_animation.py b/openpype/hosts/blender/plugins/create/create_animation.py
index 9aebf7e9b7..f7887b7e80 100644
--- a/openpype/hosts/blender/plugins/create/create_animation.py
+++ b/openpype/hosts/blender/plugins/create/create_animation.py
@@ -2,11 +2,13 @@
import bpy
-from avalon import api, blender
-import openpype.hosts.blender.api.plugin
+from avalon import api
+from avalon.blender import lib, ops
+from avalon.blender.pipeline import AVALON_INSTANCES
+from openpype.hosts.blender.api import plugin
-class CreateAnimation(openpype.hosts.blender.api.plugin.Creator):
+class CreateAnimation(plugin.Creator):
"""Animation output for character rigs"""
name = "animationMain"
@@ -15,16 +17,36 @@ class CreateAnimation(openpype.hosts.blender.api.plugin.Creator):
icon = "male"
def process(self):
+ """ Run the creator on Blender main thread"""
+ mti = ops.MainThreadItem(self._process)
+ ops.execute_in_main_thread(mti)
+
+ def _process(self):
+ # Get Instance Containter or create it if it does not exist
+ instances = bpy.data.collections.get(AVALON_INSTANCES)
+ if not instances:
+ instances = bpy.data.collections.new(name=AVALON_INSTANCES)
+ bpy.context.scene.collection.children.link(instances)
+
+ # Create instance object
+ # name = self.name
+ # if not name:
asset = self.data["asset"]
subset = self.data["subset"]
- name = openpype.hosts.blender.api.plugin.asset_name(asset, subset)
- collection = bpy.data.collections.new(name=name)
- bpy.context.scene.collection.children.link(collection)
+ name = plugin.asset_name(asset, subset)
+ # asset_group = bpy.data.objects.new(name=name, object_data=None)
+ # asset_group.empty_display_type = 'SINGLE_ARROW'
+ asset_group = bpy.data.collections.new(name=name)
+ instances.children.link(asset_group)
self.data['task'] = api.Session.get('AVALON_TASK')
- blender.lib.imprint(collection, self.data)
+ lib.imprint(asset_group, self.data)
if (self.options or {}).get("useSelection"):
- for obj in blender.lib.get_selection():
- collection.objects.link(obj)
+ selected = lib.get_selection()
+ for obj in selected:
+ asset_group.objects.link(obj)
+ elif (self.options or {}).get("asset_group"):
+ obj = (self.options or {}).get("asset_group")
+ asset_group.objects.link(obj)
- return collection
+ return asset_group
diff --git a/openpype/hosts/blender/plugins/create/create_layout.py b/openpype/hosts/blender/plugins/create/create_layout.py
index 5404cec587..831261f027 100644
--- a/openpype/hosts/blender/plugins/create/create_layout.py
+++ b/openpype/hosts/blender/plugins/create/create_layout.py
@@ -3,11 +3,12 @@
import bpy
from avalon import api
-from avalon.blender import lib
-import openpype.hosts.blender.api.plugin
+from avalon.blender import lib, ops
+from avalon.blender.pipeline import AVALON_INSTANCES
+from openpype.hosts.blender.api import plugin
-class CreateLayout(openpype.hosts.blender.api.plugin.Creator):
+class CreateLayout(plugin.Creator):
"""Layout output for character rigs"""
name = "layoutMain"
@@ -16,13 +17,34 @@ class CreateLayout(openpype.hosts.blender.api.plugin.Creator):
icon = "cubes"
def process(self):
+ """ Run the creator on Blender main thread"""
+ mti = ops.MainThreadItem(self._process)
+ ops.execute_in_main_thread(mti)
+ def _process(self):
+ # Get Instance Containter or create it if it does not exist
+ instances = bpy.data.collections.get(AVALON_INSTANCES)
+ if not instances:
+ instances = bpy.data.collections.new(name=AVALON_INSTANCES)
+ bpy.context.scene.collection.children.link(instances)
+
+ # Create instance object
asset = self.data["asset"]
subset = self.data["subset"]
- name = openpype.hosts.blender.api.plugin.asset_name(asset, subset)
- collection = bpy.context.collection
- collection.name = name
+ name = plugin.asset_name(asset, subset)
+ asset_group = bpy.data.objects.new(name=name, object_data=None)
+ asset_group.empty_display_type = 'SINGLE_ARROW'
+ instances.objects.link(asset_group)
self.data['task'] = api.Session.get('AVALON_TASK')
- lib.imprint(collection, self.data)
+ lib.imprint(asset_group, self.data)
- return collection
+ # Add selected objects to instance
+ if (self.options or {}).get("useSelection"):
+ bpy.context.view_layer.objects.active = asset_group
+ selected = lib.get_selection()
+ for obj in selected:
+ obj.select_set(True)
+ selected.append(asset_group)
+ bpy.ops.object.parent_set(keep_transform=True)
+
+ return asset_group
diff --git a/openpype/hosts/blender/plugins/create/create_model.py b/openpype/hosts/blender/plugins/create/create_model.py
index 921d86513b..e778f5b74f 100644
--- a/openpype/hosts/blender/plugins/create/create_model.py
+++ b/openpype/hosts/blender/plugins/create/create_model.py
@@ -3,11 +3,12 @@
import bpy
from avalon import api
-from avalon.blender import lib
-import openpype.hosts.blender.api.plugin
+from avalon.blender import lib, ops
+from avalon.blender.pipeline import AVALON_INSTANCES
+from openpype.hosts.blender.api import plugin
-class CreateModel(openpype.hosts.blender.api.plugin.Creator):
+class CreateModel(plugin.Creator):
"""Polygonal static geometry"""
name = "modelMain"
@@ -16,17 +17,34 @@ class CreateModel(openpype.hosts.blender.api.plugin.Creator):
icon = "cube"
def process(self):
+ """ Run the creator on Blender main thread"""
+ mti = ops.MainThreadItem(self._process)
+ ops.execute_in_main_thread(mti)
+ def _process(self):
+ # Get Instance Containter or create it if it does not exist
+ instances = bpy.data.collections.get(AVALON_INSTANCES)
+ if not instances:
+ instances = bpy.data.collections.new(name=AVALON_INSTANCES)
+ bpy.context.scene.collection.children.link(instances)
+
+ # Create instance object
asset = self.data["asset"]
subset = self.data["subset"]
- name = openpype.hosts.blender.api.plugin.asset_name(asset, subset)
- collection = bpy.data.collections.new(name=name)
- bpy.context.scene.collection.children.link(collection)
+ name = plugin.asset_name(asset, subset)
+ asset_group = bpy.data.objects.new(name=name, object_data=None)
+ asset_group.empty_display_type = 'SINGLE_ARROW'
+ instances.objects.link(asset_group)
self.data['task'] = api.Session.get('AVALON_TASK')
- lib.imprint(collection, self.data)
+ lib.imprint(asset_group, self.data)
+ # Add selected objects to instance
if (self.options or {}).get("useSelection"):
- for obj in lib.get_selection():
- collection.objects.link(obj)
+ bpy.context.view_layer.objects.active = asset_group
+ selected = lib.get_selection()
+ for obj in selected:
+ obj.select_set(True)
+ selected.append(asset_group)
+ bpy.ops.object.parent_set(keep_transform=True)
- return collection
+ return asset_group
diff --git a/openpype/hosts/blender/plugins/create/create_rig.py b/openpype/hosts/blender/plugins/create/create_rig.py
index 116fb9f742..2e1c71f570 100644
--- a/openpype/hosts/blender/plugins/create/create_rig.py
+++ b/openpype/hosts/blender/plugins/create/create_rig.py
@@ -3,11 +3,12 @@
import bpy
from avalon import api
-from avalon.blender import lib
-import openpype.hosts.blender.api.plugin
+from avalon.blender import lib, ops
+from avalon.blender.pipeline import AVALON_INSTANCES
+from openpype.hosts.blender.api import plugin
-class CreateRig(openpype.hosts.blender.api.plugin.Creator):
+class CreateRig(plugin.Creator):
"""Artist-friendly rig with controls to direct motion"""
name = "rigMain"
@@ -16,26 +17,34 @@ class CreateRig(openpype.hosts.blender.api.plugin.Creator):
icon = "wheelchair"
def process(self):
+ """ Run the creator on Blender main thread"""
+ mti = ops.MainThreadItem(self._process)
+ ops.execute_in_main_thread(mti)
+ def _process(self):
+ # Get Instance Containter or create it if it does not exist
+ instances = bpy.data.collections.get(AVALON_INSTANCES)
+ if not instances:
+ instances = bpy.data.collections.new(name=AVALON_INSTANCES)
+ bpy.context.scene.collection.children.link(instances)
+
+ # Create instance object
asset = self.data["asset"]
subset = self.data["subset"]
- name = openpype.hosts.blender.api.plugin.asset_name(asset, subset)
- collection = bpy.data.collections.new(name=name)
- bpy.context.scene.collection.children.link(collection)
+ name = plugin.asset_name(asset, subset)
+ asset_group = bpy.data.objects.new(name=name, object_data=None)
+ asset_group.empty_display_type = 'SINGLE_ARROW'
+ instances.objects.link(asset_group)
self.data['task'] = api.Session.get('AVALON_TASK')
- lib.imprint(collection, self.data)
-
- # Add the rig object and all the children meshes to
- # a set and link them all at the end to avoid duplicates.
- # Blender crashes if trying to link an object that is already linked.
- # This links automatically the children meshes if they were not
- # selected, and doesn't link them twice if they, insted,
- # were manually selected by the user.
+ lib.imprint(asset_group, self.data)
+ # Add selected objects to instance
if (self.options or {}).get("useSelection"):
- for obj in lib.get_selection():
- for child in obj.users_collection[0].children:
- collection.children.link(child)
- collection.objects.link(obj)
+ bpy.context.view_layer.objects.active = asset_group
+ selected = lib.get_selection()
+ for obj in selected:
+ obj.select_set(True)
+ selected.append(asset_group)
+ bpy.ops.object.parent_set(keep_transform=True)
- return collection
+ return asset_group
diff --git a/openpype/hosts/blender/plugins/load/load_abc.py b/openpype/hosts/blender/plugins/load/load_abc.py
index 4248cffd69..92656fac9e 100644
--- a/openpype/hosts/blender/plugins/load/load_abc.py
+++ b/openpype/hosts/blender/plugins/load/load_abc.py
@@ -4,9 +4,14 @@ from pathlib import Path
from pprint import pformat
from typing import Dict, List, Optional
-from avalon import api, blender
import bpy
-import openpype.hosts.blender.api.plugin as plugin
+
+from avalon import api
+from avalon.blender import lib
+from avalon.blender.pipeline import AVALON_CONTAINERS
+from avalon.blender.pipeline import AVALON_CONTAINER_ID
+from avalon.blender.pipeline import AVALON_PROPERTY
+from openpype.hosts.blender.api import plugin
class CacheModelLoader(plugin.AssetLoader):
@@ -21,24 +26,30 @@ class CacheModelLoader(plugin.AssetLoader):
families = ["model", "pointcache"]
representations = ["abc"]
- label = "Link Alembic"
+ label = "Load Alembic"
icon = "code-fork"
color = "orange"
- def _remove(self, objects, container):
- for obj in list(objects):
+ def _remove(self, asset_group):
+ objects = list(asset_group.children)
+ empties = []
+
+ for obj in objects:
if obj.type == 'MESH':
+ for material_slot in list(obj.material_slots):
+ bpy.data.materials.remove(material_slot.material)
bpy.data.meshes.remove(obj.data)
elif obj.type == 'EMPTY':
- bpy.data.objects.remove(obj)
+ objects.extend(obj.children)
+ empties.append(obj)
- bpy.data.collections.remove(container)
+ for empty in empties:
+ bpy.data.objects.remove(empty)
- def _process(self, libpath, container_name, parent_collection):
+ def _process(self, libpath, asset_group, group_name):
bpy.ops.object.select_all(action='DESELECT')
- view_layer = bpy.context.view_layer
- view_layer_collection = view_layer.active_layer_collection.collection
+ collection = bpy.context.view_layer.active_layer_collection.collection
relative = bpy.context.preferences.filepaths.use_relative_paths
bpy.ops.wm.alembic_import(
@@ -46,34 +57,61 @@ class CacheModelLoader(plugin.AssetLoader):
relative_path=relative
)
- parent = parent_collection
+ parent = bpy.context.scene.collection
- if parent is None:
- parent = bpy.context.scene.collection
+ imported = lib.get_selection()
- model_container = bpy.data.collections.new(container_name)
- parent.children.link(model_container)
- for obj in bpy.context.selected_objects:
- model_container.objects.link(obj)
- view_layer_collection.objects.unlink(obj)
+ empties = [obj for obj in imported if obj.type == 'EMPTY']
+ container = None
+
+ for empty in empties:
+ if not empty.parent:
+ container = empty
+ break
+
+ assert container, "No asset group found"
+
+ # Children must be linked before parents,
+ # otherwise the hierarchy will break
+ objects = []
+ nodes = list(container.children)
+
+ for obj in nodes:
+ obj.parent = asset_group
+
+ bpy.data.objects.remove(container)
+
+ for obj in nodes:
+ objects.append(obj)
+ nodes.extend(list(obj.children))
+
+ objects.reverse()
+
+ for obj in objects:
+ parent.objects.link(obj)
+ collection.objects.unlink(obj)
+
+ for obj in objects:
name = obj.name
- obj.name = f"{name}:{container_name}"
+ obj.name = f"{group_name}:{name}"
+ if obj.type != 'EMPTY':
+ name_data = obj.data.name
+ obj.data.name = f"{group_name}:{name_data}"
- # Groups are imported as Empty objects in Blender
- if obj.type == 'MESH':
- data_name = obj.data.name
- obj.data.name = f"{data_name}:{container_name}"
+ for material_slot in obj.material_slots:
+ name_mat = material_slot.material.name
+ material_slot.material.name = f"{group_name}:{name_mat}"
- if not obj.get(blender.pipeline.AVALON_PROPERTY):
- obj[blender.pipeline.AVALON_PROPERTY] = dict()
+ if not obj.get(AVALON_PROPERTY):
+ obj[AVALON_PROPERTY] = dict()
- avalon_info = obj[blender.pipeline.AVALON_PROPERTY]
- avalon_info.update({"container_name": container_name})
+ avalon_info = obj[AVALON_PROPERTY]
+ avalon_info.update({"container_name": group_name})
bpy.ops.object.select_all(action='DESELECT')
- return model_container
+ return objects
def process_asset(
self, context: dict, name: str, namespace: Optional[str] = None,
@@ -91,47 +129,41 @@ class CacheModelLoader(plugin.AssetLoader):
asset = context["asset"]["name"]
subset = context["subset"]["name"]
- lib_container = plugin.asset_name(
- asset, subset
- )
- unique_number = plugin.get_unique_number(
- asset, subset
- )
+ asset_name = plugin.asset_name(asset, subset)
+ unique_number = plugin.get_unique_number(asset, subset)
+ group_name = plugin.asset_name(asset, subset, unique_number)
namespace = namespace or f"{asset}_{unique_number}"
- container_name = plugin.asset_name(
- asset, subset, unique_number
- )
- container = bpy.data.collections.new(lib_container)
- container.name = container_name
- blender.pipeline.containerise_existing(
- container,
- name,
- namespace,
- context,
- self.__class__.__name__,
- )
+ avalon_container = bpy.data.collections.get(AVALON_CONTAINERS)
+ if not avalon_container:
+ avalon_container = bpy.data.collections.new(name=AVALON_CONTAINERS)
+ bpy.context.scene.collection.children.link(avalon_container)
- container_metadata = container.get(
- blender.pipeline.AVALON_PROPERTY)
+ asset_group = bpy.data.objects.new(group_name, object_data=None)
+ avalon_container.objects.link(asset_group)
- container_metadata["libpath"] = libpath
- container_metadata["lib_container"] = lib_container
+ objects = self._process(libpath, asset_group, group_name)
- obj_container = self._process(
- libpath, container_name, None)
+ bpy.context.scene.collection.objects.link(asset_group)
- container_metadata["obj_container"] = obj_container
+ asset_group[AVALON_PROPERTY] = {
+ "schema": "openpype:container-2.0",
+ "id": AVALON_CONTAINER_ID,
+ "name": name,
+ "namespace": namespace or '',
+ "loader": str(self.__class__.__name__),
+ "representation": str(context["representation"]["_id"]),
+ "libpath": libpath,
+ "asset_name": asset_name,
+ "parent": str(context["representation"]["parent"]),
+ "family": context["representation"]["context"]["family"],
+ "objectName": group_name
+ }
- # Save the list of objects in the metadata container
- container_metadata["objects"] = obj_container.all_objects
+ self[:] = objects
+ return objects
- nodes = list(container.objects)
- nodes.append(container)
- self[:] = nodes
- return nodes
-
- def update(self, container: Dict, representation: Dict):
+ def exec_update(self, container: Dict, representation: Dict):
"""Update the loaded asset.
This will remove all objects of the current collection, load the new
@@ -143,9 +175,8 @@ class CacheModelLoader(plugin.AssetLoader):
Warning:
No nested collections are supported at the moment!
"""
- collection = bpy.data.collections.get(
- container["objectName"]
- )
+ object_name = container["objectName"]
+ asset_group = bpy.data.objects.get(object_name)
libpath = Path(api.get_representation_path(representation))
extension = libpath.suffix.lower()
@@ -155,12 +186,9 @@ class CacheModelLoader(plugin.AssetLoader):
pformat(representation, indent=2),
)
- assert collection, (
+ assert asset_group, (
f"The asset is not loaded: {container['objectName']}"
)
- assert not (collection.children), (
- "Nested collections are not supported."
- )
assert libpath, (
"No existing library file found for {container['objectName']}"
)
@@ -171,45 +199,34 @@ class CacheModelLoader(plugin.AssetLoader):
f"Unsupported file: {libpath}"
)
- collection_metadata = collection.get(
- blender.pipeline.AVALON_PROPERTY)
- collection_libpath = collection_metadata["libpath"]
+ metadata = asset_group.get(AVALON_PROPERTY)
+ group_libpath = metadata["libpath"]
- obj_container = plugin.get_local_collection_with_name(
- collection_metadata["obj_container"].name
- )
- objects = obj_container.all_objects
-
- container_name = obj_container.name
-
- normalized_collection_libpath = (
- str(Path(bpy.path.abspath(collection_libpath)).resolve())
+ normalized_group_libpath = (
+ str(Path(bpy.path.abspath(group_libpath)).resolve())
)
normalized_libpath = (
str(Path(bpy.path.abspath(str(libpath))).resolve())
)
self.log.debug(
- "normalized_collection_libpath:\n %s\nnormalized_libpath:\n %s",
- normalized_collection_libpath,
+ "normalized_group_libpath:\n %s\nnormalized_libpath:\n %s",
+ normalized_group_libpath,
normalized_libpath,
)
- if normalized_collection_libpath == normalized_libpath:
+ if normalized_group_libpath == normalized_libpath:
self.log.info("Library already loaded, not updating...")
return
- parent = plugin.get_parent_collection(obj_container)
+ mat = asset_group.matrix_basis.copy()
+ self._remove(asset_group)
- self._remove(objects, obj_container)
+ self._process(str(libpath), asset_group, object_name)
+ asset_group.matrix_basis = mat
- obj_container = self._process(
- str(libpath), container_name, parent)
+ metadata["libpath"] = str(libpath)
+ metadata["representation"] = str(representation["_id"])
- collection_metadata["obj_container"] = obj_container
- collection_metadata["objects"] = obj_container.all_objects
- collection_metadata["libpath"] = str(libpath)
- collection_metadata["representation"] = str(representation["_id"])
-
- def remove(self, container: Dict) -> bool:
+ def exec_remove(self, container: Dict) -> bool:
"""Remove an existing container from a Blender scene.
Arguments:
@@ -222,25 +239,14 @@ class CacheModelLoader(plugin.AssetLoader):
Warning:
No nested collections are supported at the moment!
"""
- collection = bpy.data.collections.get(
- container["objectName"]
- )
- if not collection:
+ object_name = container["objectName"]
+ asset_group = bpy.data.objects.get(object_name)
+
+ if not asset_group:
return False
- assert not (collection.children), (
- "Nested collections are not supported."
- )
- collection_metadata = collection.get(
- blender.pipeline.AVALON_PROPERTY)
+ self._remove(asset_group)
- obj_container = plugin.get_local_collection_with_name(
- collection_metadata["obj_container"].name
- )
- objects = obj_container.all_objects
-
- self._remove(objects, obj_container)
-
- bpy.data.collections.remove(collection)
+ bpy.data.objects.remove(asset_group)
return True
diff --git a/openpype/hosts/blender/plugins/load/load_animation.py b/openpype/hosts/blender/plugins/load/load_animation.py
index 4025fdfa74..47c48248b2 100644
--- a/openpype/hosts/blender/plugins/load/load_animation.py
+++ b/openpype/hosts/blender/plugins/load/load_animation.py
@@ -1,20 +1,19 @@
"""Load an animation in Blender."""
import logging
-from pathlib import Path
-from pprint import pformat
from typing import Dict, List, Optional
-from avalon import api, blender
import bpy
-import openpype.hosts.blender.api.plugin
+
+from avalon.blender.pipeline import AVALON_PROPERTY
+from openpype.hosts.blender.api import plugin
logger = logging.getLogger("openpype").getChild(
"blender").getChild("load_animation")
-class BlendAnimationLoader(openpype.hosts.blender.api.plugin.AssetLoader):
+class BlendAnimationLoader(plugin.AssetLoader):
"""Load animations from a .blend file.
Warning:
@@ -29,67 +28,6 @@ class BlendAnimationLoader(openpype.hosts.blender.api.plugin.AssetLoader):
icon = "code-fork"
color = "orange"
- def _remove(self, objects, lib_container):
- for obj in list(objects):
- if obj.type == 'ARMATURE':
- bpy.data.armatures.remove(obj.data)
- elif obj.type == 'MESH':
- bpy.data.meshes.remove(obj.data)
-
- bpy.data.collections.remove(bpy.data.collections[lib_container])
-
- def _process(self, libpath, lib_container, container_name):
-
- relative = bpy.context.preferences.filepaths.use_relative_paths
- with bpy.data.libraries.load(
- libpath, link=True, relative=relative
- ) as (_, data_to):
- data_to.collections = [lib_container]
-
- scene = bpy.context.scene
-
- scene.collection.children.link(bpy.data.collections[lib_container])
-
- anim_container = scene.collection.children[lib_container].make_local()
-
- meshes = [obj for obj in anim_container.objects if obj.type == 'MESH']
- armatures = [
- obj for obj in anim_container.objects if obj.type == 'ARMATURE']
-
- # Should check if there is only an armature?
-
- objects_list = []
-
- # Link meshes first, then armatures.
- # The armature is unparented for all the non-local meshes,
- # when it is made local.
- for obj in meshes + armatures:
-
- obj = obj.make_local()
-
- obj.data.make_local()
-
- anim_data = obj.animation_data
-
- if anim_data is not None and anim_data.action is not None:
-
- anim_data.action.make_local()
-
- if not obj.get(blender.pipeline.AVALON_PROPERTY):
-
- obj[blender.pipeline.AVALON_PROPERTY] = dict()
-
- avalon_info = obj[blender.pipeline.AVALON_PROPERTY]
- avalon_info.update({"container_name": container_name})
-
- objects_list.append(obj)
-
- anim_container.pop(blender.pipeline.AVALON_PROPERTY)
-
- bpy.ops.object.select_all(action='DESELECT')
-
- return objects_list
-
def process_asset(
self, context: dict, name: str, namespace: Optional[str] = None,
options: Optional[Dict] = None
@@ -101,148 +39,32 @@ class BlendAnimationLoader(openpype.hosts.blender.api.plugin.AssetLoader):
context: Full parenthood of representation to load
options: Additional settings dictionary
"""
-
libpath = self.fname
- asset = context["asset"]["name"]
- subset = context["subset"]["name"]
- lib_container = openpype.hosts.blender.api.plugin.asset_name(asset, subset)
- container_name = openpype.hosts.blender.api.plugin.asset_name(
- asset, subset, namespace
- )
- container = bpy.data.collections.new(lib_container)
- container.name = container_name
- blender.pipeline.containerise_existing(
- container,
- name,
- namespace,
- context,
- self.__class__.__name__,
- )
+ with bpy.data.libraries.load(
+ libpath, link=True, relative=False
+ ) as (data_from, data_to):
+ data_to.objects = data_from.objects
+ data_to.actions = data_from.actions
- container_metadata = container.get(
- blender.pipeline.AVALON_PROPERTY)
+ container = data_to.objects[0]
- container_metadata["libpath"] = libpath
- container_metadata["lib_container"] = lib_container
+ assert container, "No asset group found"
- objects_list = self._process(
- libpath, lib_container, container_name)
+ target_namespace = container.get(AVALON_PROPERTY).get('namespace')
- # Save the list of objects in the metadata container
- container_metadata["objects"] = objects_list
+ action = data_to.actions[0].make_local().copy()
- nodes = list(container.objects)
- nodes.append(container)
- self[:] = nodes
- return nodes
+ for obj in bpy.data.objects:
+ if obj.get(AVALON_PROPERTY) and obj.get(AVALON_PROPERTY).get(
+ 'namespace') == target_namespace:
+ if obj.children[0]:
+ if not obj.children[0].animation_data:
+ obj.children[0].animation_data_create()
+ obj.children[0].animation_data.action = action
+ break
- def update(self, container: Dict, representation: Dict):
- """Update the loaded asset.
+ bpy.data.objects.remove(container)
- This will remove all objects of the current collection, load the new
- ones and add them to the collection.
- If the objects of the collection are used in another collection they
- will not be removed, only unlinked. Normally this should not be the
- case though.
-
- Warning:
- No nested collections are supported at the moment!
- """
-
- collection = bpy.data.collections.get(
- container["objectName"]
- )
-
- libpath = Path(api.get_representation_path(representation))
- extension = libpath.suffix.lower()
-
- logger.info(
- "Container: %s\nRepresentation: %s",
- pformat(container, indent=2),
- pformat(representation, indent=2),
- )
-
- assert collection, (
- f"The asset is not loaded: {container['objectName']}"
- )
- assert not (collection.children), (
- "Nested collections are not supported."
- )
- assert libpath, (
- "No existing library file found for {container['objectName']}"
- )
- assert libpath.is_file(), (
- f"The file doesn't exist: {libpath}"
- )
- assert extension in openpype.hosts.blender.api.plugin.VALID_EXTENSIONS, (
- f"Unsupported file: {libpath}"
- )
-
- collection_metadata = collection.get(
- blender.pipeline.AVALON_PROPERTY)
-
- collection_libpath = collection_metadata["libpath"]
- normalized_collection_libpath = (
- str(Path(bpy.path.abspath(collection_libpath)).resolve())
- )
- normalized_libpath = (
- str(Path(bpy.path.abspath(str(libpath))).resolve())
- )
- logger.debug(
- "normalized_collection_libpath:\n %s\nnormalized_libpath:\n %s",
- normalized_collection_libpath,
- normalized_libpath,
- )
- if normalized_collection_libpath == normalized_libpath:
- logger.info("Library already loaded, not updating...")
- return
-
- objects = collection_metadata["objects"]
- lib_container = collection_metadata["lib_container"]
-
- self._remove(objects, lib_container)
-
- objects_list = self._process(
- str(libpath), lib_container, collection.name)
-
- # Save the list of objects in the metadata container
- collection_metadata["objects"] = objects_list
- collection_metadata["libpath"] = str(libpath)
- collection_metadata["representation"] = str(representation["_id"])
-
- bpy.ops.object.select_all(action='DESELECT')
-
- def remove(self, container: Dict) -> bool:
- """Remove an existing container from a Blender scene.
-
- Arguments:
- container (openpype:container-1.0): Container to remove,
- from `host.ls()`.
-
- Returns:
- bool: Whether the container was deleted.
-
- Warning:
- No nested collections are supported at the moment!
- """
-
- collection = bpy.data.collections.get(
- container["objectName"]
- )
- if not collection:
- return False
- assert not (collection.children), (
- "Nested collections are not supported."
- )
-
- collection_metadata = collection.get(
- blender.pipeline.AVALON_PROPERTY)
- objects = collection_metadata["objects"]
- lib_container = collection_metadata["lib_container"]
-
- self._remove(objects, lib_container)
-
- bpy.data.collections.remove(collection)
-
- return True
+ library = bpy.data.libraries.get(bpy.path.basename(libpath))
+ bpy.data.libraries.remove(library)
diff --git a/openpype/hosts/blender/plugins/load/load_fbx.py b/openpype/hosts/blender/plugins/load/load_fbx.py
new file mode 100644
index 0000000000..b80dc69adc
--- /dev/null
+++ b/openpype/hosts/blender/plugins/load/load_fbx.py
@@ -0,0 +1,273 @@
+"""Load an asset in Blender from an Alembic file."""
+
+from pathlib import Path
+from pprint import pformat
+from typing import Dict, List, Optional
+
+import bpy
+
+from avalon import api
+from avalon.blender import lib
+from avalon.blender.pipeline import AVALON_CONTAINERS
+from avalon.blender.pipeline import AVALON_CONTAINER_ID
+from avalon.blender.pipeline import AVALON_PROPERTY
+from openpype.hosts.blender.api import plugin
+
+
+class FbxModelLoader(plugin.AssetLoader):
+ """Load FBX models.
+
+ Stores the imported asset in an empty named after the asset.
+ """
+
+ families = ["model", "rig"]
+ representations = ["fbx"]
+
+ label = "Load FBX"
+ icon = "code-fork"
+ color = "orange"
+
+ def _remove(self, asset_group):
+ objects = list(asset_group.children)
+
+ for obj in objects:
+ if obj.type == 'MESH':
+ for material_slot in list(obj.material_slots):
+ if material_slot.material:
+ bpy.data.materials.remove(material_slot.material)
+ bpy.data.meshes.remove(obj.data)
+ elif obj.type == 'ARMATURE':
+ objects.extend(obj.children)
+ bpy.data.armatures.remove(obj.data)
+ elif obj.type == 'CURVE':
+ bpy.data.curves.remove(obj.data)
+ elif obj.type == 'EMPTY':
+ objects.extend(obj.children)
+ bpy.data.objects.remove(obj)
+
+ def _process(self, libpath, asset_group, group_name, action):
+ bpy.ops.object.select_all(action='DESELECT')
+
+ collection = bpy.context.view_layer.active_layer_collection.collection
+
+ bpy.ops.import_scene.fbx(filepath=libpath)
+
+ parent = bpy.context.scene.collection
+
+ imported = lib.get_selection()
+
+ empties = [obj for obj in imported if obj.type == 'EMPTY']
+
+ container = None
+
+ for empty in empties:
+ if not empty.parent:
+ container = empty
+ break
+
+ assert container, "No asset group found"
+
+ # Children must be linked before parents,
+ # otherwise the hierarchy will break
+ objects = []
+ nodes = list(container.children)
+
+ for obj in nodes:
+ obj.parent = asset_group
+
+ bpy.data.objects.remove(container)
+
+ for obj in nodes:
+ objects.append(obj)
+ nodes.extend(list(obj.children))
+
+ objects.reverse()
+
+ for obj in objects:
+ parent.objects.link(obj)
+ collection.objects.unlink(obj)
+
+ for obj in objects:
+ name = obj.name
+ obj.name = f"{group_name}:{name}"
+ if obj.type != 'EMPTY':
+ name_data = obj.data.name
+ obj.data.name = f"{group_name}:{name_data}"
+
+ if obj.type == 'MESH':
+ for material_slot in obj.material_slots:
+ name_mat = material_slot.material.name
+ material_slot.material.name = f"{group_name}:{name_mat}"
+ elif obj.type == 'ARMATURE':
+ anim_data = obj.animation_data
+ if action is not None:
+ anim_data.action = action
+ elif anim_data.action is not None:
+ name_action = anim_data.action.name
+ anim_data.action.name = f"{group_name}:{name_action}"
+
+ if not obj.get(AVALON_PROPERTY):
+ obj[AVALON_PROPERTY] = dict()
+
+ avalon_info = obj[AVALON_PROPERTY]
+ avalon_info.update({"container_name": group_name})
+
+ bpy.ops.object.select_all(action='DESELECT')
+
+ return objects
+
+ def process_asset(
+ self, context: dict, name: str, namespace: Optional[str] = None,
+ options: Optional[Dict] = None
+ ) -> Optional[List]:
+ """
+ Arguments:
+ name: Use pre-defined name
+ namespace: Use pre-defined namespace
+ context: Full parenthood of representation to load
+ options: Additional settings dictionary
+ """
+ libpath = self.fname
+ asset = context["asset"]["name"]
+ subset = context["subset"]["name"]
+
+ asset_name = plugin.asset_name(asset, subset)
+ unique_number = plugin.get_unique_number(asset, subset)
+ group_name = plugin.asset_name(asset, subset, unique_number)
+ namespace = namespace or f"{asset}_{unique_number}"
+
+ avalon_container = bpy.data.collections.get(AVALON_CONTAINERS)
+ if not avalon_container:
+ avalon_container = bpy.data.collections.new(name=AVALON_CONTAINERS)
+ bpy.context.scene.collection.children.link(avalon_container)
+
+ asset_group = bpy.data.objects.new(group_name, object_data=None)
+ avalon_container.objects.link(asset_group)
+
+ objects = self._process(libpath, asset_group, group_name, None)
+
+ objects = []
+ nodes = list(asset_group.children)
+
+ for obj in nodes:
+ objects.append(obj)
+ nodes.extend(list(obj.children))
+
+ bpy.context.scene.collection.objects.link(asset_group)
+
+ asset_group[AVALON_PROPERTY] = {
+ "schema": "openpype:container-2.0",
+ "id": AVALON_CONTAINER_ID,
+ "name": name,
+ "namespace": namespace or '',
+ "loader": str(self.__class__.__name__),
+ "representation": str(context["representation"]["_id"]),
+ "libpath": libpath,
+ "asset_name": asset_name,
+ "parent": str(context["representation"]["parent"]),
+ "family": context["representation"]["context"]["family"],
+ "objectName": group_name
+ }
+
+ self[:] = objects
+ return objects
+
+ def exec_update(self, container: Dict, representation: Dict):
+ """Update the loaded asset.
+
+ This will remove all objects of the current collection, load the new
+ ones and add them to the collection.
+ If the objects of the collection are used in another collection they
+ will not be removed, only unlinked. Normally this should not be the
+ case though.
+
+ Warning:
+ No nested collections are supported at the moment!
+ """
+ object_name = container["objectName"]
+ asset_group = bpy.data.objects.get(object_name)
+ libpath = Path(api.get_representation_path(representation))
+ extension = libpath.suffix.lower()
+
+ self.log.info(
+ "Container: %s\nRepresentation: %s",
+ pformat(container, indent=2),
+ pformat(representation, indent=2),
+ )
+
+ assert asset_group, (
+ f"The asset is not loaded: {container['objectName']}"
+ )
+ assert libpath, (
+ "No existing library file found for {container['objectName']}"
+ )
+ assert libpath.is_file(), (
+ f"The file doesn't exist: {libpath}"
+ )
+ assert extension in plugin.VALID_EXTENSIONS, (
+ f"Unsupported file: {libpath}"
+ )
+
+ metadata = asset_group.get(AVALON_PROPERTY)
+ group_libpath = metadata["libpath"]
+
+ normalized_group_libpath = (
+ str(Path(bpy.path.abspath(group_libpath)).resolve())
+ )
+ normalized_libpath = (
+ str(Path(bpy.path.abspath(str(libpath))).resolve())
+ )
+ self.log.debug(
+ "normalized_group_libpath:\n %s\nnormalized_libpath:\n %s",
+ normalized_group_libpath,
+ normalized_libpath,
+ )
+ if normalized_group_libpath == normalized_libpath:
+ self.log.info("Library already loaded, not updating...")
+ return
+
+ # Get the armature of the rig
+ objects = asset_group.children
+ armatures = [obj for obj in objects if obj.type == 'ARMATURE']
+ action = None
+
+ if armatures:
+ armature = armatures[0]
+
+ if armature.animation_data and armature.animation_data.action:
+ action = armature.animation_data.action
+
+ mat = asset_group.matrix_basis.copy()
+ self._remove(asset_group)
+
+ self._process(str(libpath), asset_group, object_name, action)
+
+ asset_group.matrix_basis = mat
+
+ metadata["libpath"] = str(libpath)
+ metadata["representation"] = str(representation["_id"])
+
+ def exec_remove(self, container: Dict) -> bool:
+ """Remove an existing container from a Blender scene.
+
+ Arguments:
+ container (openpype:container-1.0): Container to remove,
+ from `host.ls()`.
+
+ Returns:
+ bool: Whether the container was deleted.
+
+ Warning:
+ No nested collections are supported at the moment!
+ """
+ object_name = container["objectName"]
+ asset_group = bpy.data.objects.get(object_name)
+
+ if not asset_group:
+ return False
+
+ self._remove(asset_group)
+
+ bpy.data.objects.remove(asset_group)
+
+ return True
diff --git a/openpype/hosts/blender/plugins/load/load_layout.py b/openpype/hosts/blender/plugins/load/load_layout.py
deleted file mode 100644
index 2092be9139..0000000000
--- a/openpype/hosts/blender/plugins/load/load_layout.py
+++ /dev/null
@@ -1,664 +0,0 @@
-"""Load a layout in Blender."""
-
-import json
-from logging import log, warning
-import math
-
-import logging
-from pathlib import Path
-from pprint import pformat
-from typing import Dict, List, Optional
-
-from avalon import api, blender, pipeline
-import bpy
-import openpype.hosts.blender.api.plugin as plugin
-from openpype.lib import get_creator_by_name
-
-
-class BlendLayoutLoader(plugin.AssetLoader):
- """Load layout from a .blend file."""
-
- families = ["layout"]
- representations = ["blend"]
-
- label = "Link Layout"
- icon = "code-fork"
- color = "orange"
-
- def _remove(self, objects, obj_container):
- for obj in list(objects):
- if obj.type == 'ARMATURE':
- bpy.data.armatures.remove(obj.data)
- elif obj.type == 'MESH':
- bpy.data.meshes.remove(obj.data)
- elif obj.type == 'CAMERA':
- bpy.data.cameras.remove(obj.data)
- elif obj.type == 'CURVE':
- bpy.data.curves.remove(obj.data)
-
- for element_container in obj_container.children:
- for child in element_container.children:
- bpy.data.collections.remove(child)
- bpy.data.collections.remove(element_container)
-
- bpy.data.collections.remove(obj_container)
-
- def _process(self, libpath, lib_container, container_name, actions):
- relative = bpy.context.preferences.filepaths.use_relative_paths
- with bpy.data.libraries.load(
- libpath, link=True, relative=relative
- ) as (_, data_to):
- data_to.collections = [lib_container]
-
- scene = bpy.context.scene
-
- scene.collection.children.link(bpy.data.collections[lib_container])
-
- layout_container = scene.collection.children[lib_container].make_local()
- layout_container.name = container_name
-
- objects_local_types = ['MESH', 'CAMERA', 'CURVE']
-
- objects = []
- armatures = []
-
- containers = list(layout_container.children)
-
- for container in layout_container.children:
- if container.name == blender.pipeline.AVALON_CONTAINERS:
- containers.remove(container)
-
- for container in containers:
- container.make_local()
- objects.extend([
- obj for obj in container.objects
- if obj.type in objects_local_types
- ])
- armatures.extend([
- obj for obj in container.objects
- if obj.type == 'ARMATURE'
- ])
- containers.extend(list(container.children))
-
- # Link meshes first, then armatures.
- # The armature is unparented for all the non-local meshes,
- # when it is made local.
- for obj in objects + armatures:
- local_obj = obj.make_local()
- if obj.data:
- obj.data.make_local()
-
- if not local_obj.get(blender.pipeline.AVALON_PROPERTY):
- local_obj[blender.pipeline.AVALON_PROPERTY] = dict()
-
- avalon_info = local_obj[blender.pipeline.AVALON_PROPERTY]
- avalon_info.update({"container_name": container_name})
-
- action = actions.get(local_obj.name, None)
-
- if local_obj.type == 'ARMATURE' and action is not None:
- local_obj.animation_data.action = action
-
- layout_container.pop(blender.pipeline.AVALON_PROPERTY)
-
- bpy.ops.object.select_all(action='DESELECT')
-
- return layout_container
-
- def process_asset(
- self, context: dict, name: str, namespace: Optional[str] = None,
- options: Optional[Dict] = None
- ) -> Optional[List]:
- """
- Arguments:
- name: Use pre-defined name
- namespace: Use pre-defined namespace
- context: Full parenthood of representation to load
- options: Additional settings dictionary
- """
-
- libpath = self.fname
- asset = context["asset"]["name"]
- subset = context["subset"]["name"]
- lib_container = plugin.asset_name(
- asset, subset
- )
- unique_number = plugin.get_unique_number(
- asset, subset
- )
- namespace = namespace or f"{asset}_{unique_number}"
- container_name = plugin.asset_name(
- asset, subset, unique_number
- )
-
- container = bpy.data.collections.new(lib_container)
- container.name = container_name
- blender.pipeline.containerise_existing(
- container,
- name,
- namespace,
- context,
- self.__class__.__name__,
- )
-
- container_metadata = container.get(
- blender.pipeline.AVALON_PROPERTY)
-
- container_metadata["libpath"] = libpath
- container_metadata["lib_container"] = lib_container
-
- obj_container = self._process(
- libpath, lib_container, container_name, {})
-
- container_metadata["obj_container"] = obj_container
-
- # Save the list of objects in the metadata container
- container_metadata["objects"] = obj_container.all_objects
-
- # nodes = list(container.objects)
- # nodes.append(container)
- nodes = [container]
- self[:] = nodes
- return nodes
-
- def update(self, container: Dict, representation: Dict):
- """Update the loaded asset.
-
- This will remove all objects of the current collection, load the new
- ones and add them to the collection.
- If the objects of the collection are used in another collection they
- will not be removed, only unlinked. Normally this should not be the
- case though.
-
- Warning:
- No nested collections are supported at the moment!
- """
- collection = bpy.data.collections.get(
- container["objectName"]
- )
-
- libpath = Path(api.get_representation_path(representation))
- extension = libpath.suffix.lower()
-
- self.log.info(
- "Container: %s\nRepresentation: %s",
- pformat(container, indent=2),
- pformat(representation, indent=2),
- )
-
- assert collection, (
- f"The asset is not loaded: {container['objectName']}"
- )
- assert not (collection.children), (
- "Nested collections are not supported."
- )
- assert libpath, (
- "No existing library file found for {container['objectName']}"
- )
- assert libpath.is_file(), (
- f"The file doesn't exist: {libpath}"
- )
- assert extension in plugin.VALID_EXTENSIONS, (
- f"Unsupported file: {libpath}"
- )
-
- collection_metadata = collection.get(
- blender.pipeline.AVALON_PROPERTY)
- collection_libpath = collection_metadata["libpath"]
- objects = collection_metadata["objects"]
- lib_container = collection_metadata["lib_container"]
- obj_container = collection_metadata["obj_container"]
-
- normalized_collection_libpath = (
- str(Path(bpy.path.abspath(collection_libpath)).resolve())
- )
- normalized_libpath = (
- str(Path(bpy.path.abspath(str(libpath))).resolve())
- )
- self.log.debug(
- "normalized_collection_libpath:\n %s\nnormalized_libpath:\n %s",
- normalized_collection_libpath,
- normalized_libpath,
- )
- if normalized_collection_libpath == normalized_libpath:
- self.log.info("Library already loaded, not updating...")
- return
-
- actions = {}
-
- for obj in objects:
- if obj.type == 'ARMATURE':
- if obj.animation_data and obj.animation_data.action:
- actions[obj.name] = obj.animation_data.action
-
- self._remove(objects, obj_container)
-
- obj_container = self._process(
- str(libpath), lib_container, collection.name, actions)
-
- # Save the list of objects in the metadata container
- collection_metadata["obj_container"] = obj_container
- collection_metadata["objects"] = obj_container.all_objects
- collection_metadata["libpath"] = str(libpath)
- collection_metadata["representation"] = str(representation["_id"])
-
- bpy.ops.object.select_all(action='DESELECT')
-
- def remove(self, container: Dict) -> bool:
- """Remove an existing container from a Blender scene.
-
- Arguments:
- container (openpype:container-1.0): Container to remove,
- from `host.ls()`.
-
- Returns:
- bool: Whether the container was deleted.
-
- Warning:
- No nested collections are supported at the moment!
- """
-
- collection = bpy.data.collections.get(
- container["objectName"]
- )
- if not collection:
- return False
- assert not (collection.children), (
- "Nested collections are not supported."
- )
-
- collection_metadata = collection.get(
- blender.pipeline.AVALON_PROPERTY)
- objects = collection_metadata["objects"]
- obj_container = collection_metadata["obj_container"]
-
- self._remove(objects, obj_container)
-
- bpy.data.collections.remove(collection)
-
- return True
-
-
-class UnrealLayoutLoader(plugin.AssetLoader):
- """Load layout published from Unreal."""
-
- families = ["layout"]
- representations = ["json"]
-
- label = "Link Layout"
- icon = "code-fork"
- color = "orange"
-
- animation_creator_name = "CreateAnimation"
-
- def _remove_objects(self, objects):
- for obj in list(objects):
- if obj.type == 'ARMATURE':
- bpy.data.armatures.remove(obj.data)
- elif obj.type == 'MESH':
- bpy.data.meshes.remove(obj.data)
- elif obj.type == 'CAMERA':
- bpy.data.cameras.remove(obj.data)
- elif obj.type == 'CURVE':
- bpy.data.curves.remove(obj.data)
- else:
- self.log.error(
- f"Object {obj.name} of type {obj.type} not recognized.")
-
- def _remove_collections(self, collection):
- if collection.children:
- for child in collection.children:
- self._remove_collections(child)
- bpy.data.collections.remove(child)
-
- def _remove(self, layout_container):
- layout_container_metadata = layout_container.get(
- blender.pipeline.AVALON_PROPERTY)
-
- if layout_container.children:
- for child in layout_container.children:
- child_container = child.get(blender.pipeline.AVALON_PROPERTY)
- child_container['objectName'] = child.name
- api.remove(child_container)
-
- for c in bpy.data.collections:
- metadata = c.get('avalon')
- if metadata:
- print("metadata.get('id')")
- print(metadata.get('id'))
- if metadata and metadata.get('id') == 'pyblish.avalon.instance':
- print("metadata.get('dependencies')")
- print(metadata.get('dependencies'))
- print("layout_container_metadata.get('representation')")
- print(layout_container_metadata.get('representation'))
- if metadata.get('dependencies') == layout_container_metadata.get('representation'):
-
- for child in c.children:
- bpy.data.collections.remove(child)
- bpy.data.collections.remove(c)
- break
-
- def _get_loader(self, loaders, family):
- name = ""
- if family == 'rig':
- name = "BlendRigLoader"
- elif family == 'model':
- name = "BlendModelLoader"
-
- if name == "":
- return None
-
- for loader in loaders:
- if loader.__name__ == name:
- return loader
-
- return None
-
- def set_transform(self, obj, transform):
- location = transform.get('translation')
- rotation = transform.get('rotation')
- scale = transform.get('scale')
-
- # Y position is inverted in sign because Unreal and Blender have the
- # Y axis mirrored
- obj.location = (
- location.get('x'),
- location.get('y'),
- location.get('z')
- )
- obj.rotation_euler = (
- rotation.get('x'),
- rotation.get('y'),
- rotation.get('z')
- )
- obj.scale = (
- scale.get('x'),
- scale.get('y'),
- scale.get('z')
- )
-
- def _process(
- self, libpath, layout_container, container_name, representation,
- actions, parent_collection
- ):
- with open(libpath, "r") as fp:
- data = json.load(fp)
-
- scene = bpy.context.scene
- layout_collection = bpy.data.collections.new(container_name)
- scene.collection.children.link(layout_collection)
-
- parent = parent_collection
-
- if parent is None:
- parent = scene.collection
-
- all_loaders = api.discover(api.Loader)
-
- avalon_container = bpy.data.collections.get(
- blender.pipeline.AVALON_CONTAINERS)
-
- for element in data:
- reference = element.get('reference')
- family = element.get('family')
-
- loaders = api.loaders_from_representation(all_loaders, reference)
- loader = self._get_loader(loaders, family)
-
- if not loader:
- continue
-
- instance_name = element.get('instance_name')
-
- element_container = api.load(
- loader,
- reference,
- namespace=instance_name
- )
-
- if not element_container:
- continue
-
- avalon_container.children.unlink(element_container)
- layout_container.children.link(element_container)
-
- element_metadata = element_container.get(
- blender.pipeline.AVALON_PROPERTY)
-
- # Unlink the object's collection from the scene collection and
- # link it in the layout collection
- element_collection = element_metadata.get('obj_container')
- scene.collection.children.unlink(element_collection)
- layout_collection.children.link(element_collection)
-
- objects = element_metadata.get('objects')
- element_metadata['instance_name'] = instance_name
-
- objects_to_transform = []
-
- creator_plugin = get_creator_by_name(self.animation_creator_name)
- if not creator_plugin:
- raise ValueError("Creator plugin \"{}\" was not found.".format(
- self.animation_creator_name
- ))
-
- if family == 'rig':
- for o in objects:
- if o.type == 'ARMATURE':
- objects_to_transform.append(o)
- # Create an animation subset for each rig
- o.select_set(True)
- asset = api.Session["AVALON_ASSET"]
- c = api.create(
- creator_plugin,
- name="animation_" + element_collection.name,
- asset=asset,
- options={"useSelection": True},
- data={"dependencies": representation})
- scene.collection.children.unlink(c)
- parent.children.link(c)
- o.select_set(False)
- break
- elif family == 'model':
- objects_to_transform = objects
-
- for o in objects_to_transform:
- self.set_transform(o, element.get('transform'))
-
- if actions:
- if o.type == 'ARMATURE':
- action = actions.get(instance_name, None)
-
- if action:
- if o.animation_data is None:
- o.animation_data_create()
- o.animation_data.action = action
-
- return layout_collection
-
- def process_asset(self,
- context: dict,
- name: str,
- namespace: Optional[str] = None,
- options: Optional[Dict] = None):
- """
- Arguments:
- name: Use pre-defined name
- namespace: Use pre-defined namespace
- context: Full parenthood of representation to load
- options: Additional settings dictionary
- """
- libpath = self.fname
- asset = context["asset"]["name"]
- subset = context["subset"]["name"]
- lib_container = plugin.asset_name(
- asset, subset
- )
- unique_number = plugin.get_unique_number(
- asset, subset
- )
- namespace = namespace or f"{asset}_{unique_number}"
- container_name = plugin.asset_name(
- asset, subset, unique_number
- )
-
- layout_container = bpy.data.collections.new(container_name)
- blender.pipeline.containerise_existing(
- layout_container,
- name,
- namespace,
- context,
- self.__class__.__name__,
- )
-
- container_metadata = layout_container.get(
- blender.pipeline.AVALON_PROPERTY)
-
- container_metadata["libpath"] = libpath
- container_metadata["lib_container"] = lib_container
-
- layout_collection = self._process(
- libpath, layout_container, container_name,
- str(context["representation"]["_id"]), None, None)
-
- container_metadata["obj_container"] = layout_collection
-
- # Save the list of objects in the metadata container
- container_metadata["objects"] = layout_collection.all_objects
-
- nodes = [layout_container]
- self[:] = nodes
- return nodes
-
- def update(self, container: Dict, representation: Dict):
- """Update the loaded asset.
-
- This will remove all objects of the current collection, load the new
- ones and add them to the collection.
- If the objects of the collection are used in another collection they
- will not be removed, only unlinked. Normally this should not be the
- case though.
- """
- layout_container = bpy.data.collections.get(
- container["objectName"]
- )
- if not layout_container:
- return False
-
- libpath = Path(api.get_representation_path(representation))
- extension = libpath.suffix.lower()
-
- self.log.info(
- "Container: %s\nRepresentation: %s",
- pformat(container, indent=2),
- pformat(representation, indent=2),
- )
-
- assert layout_container, (
- f"The asset is not loaded: {container['objectName']}"
- )
- assert libpath, (
- "No existing library file found for {container['objectName']}"
- )
- assert libpath.is_file(), (
- f"The file doesn't exist: {libpath}"
- )
- assert extension in plugin.VALID_EXTENSIONS, (
- f"Unsupported file: {libpath}"
- )
-
- layout_container_metadata = layout_container.get(
- blender.pipeline.AVALON_PROPERTY)
- collection_libpath = layout_container_metadata["libpath"]
- lib_container = layout_container_metadata["lib_container"]
- obj_container = plugin.get_local_collection_with_name(
- layout_container_metadata["obj_container"].name
- )
- objects = obj_container.all_objects
-
- container_name = obj_container.name
-
- normalized_collection_libpath = (
- str(Path(bpy.path.abspath(collection_libpath)).resolve())
- )
- normalized_libpath = (
- str(Path(bpy.path.abspath(str(libpath))).resolve())
- )
- self.log.debug(
- "normalized_collection_libpath:\n %s\nnormalized_libpath:\n %s",
- normalized_collection_libpath,
- normalized_libpath,
- )
- if normalized_collection_libpath == normalized_libpath:
- self.log.info("Library already loaded, not updating...")
- return
-
- actions = {}
-
- for obj in objects:
- if obj.type == 'ARMATURE':
- if obj.animation_data and obj.animation_data.action:
- obj_cont_name = obj.get(
- blender.pipeline.AVALON_PROPERTY).get('container_name')
- obj_cont = plugin.get_local_collection_with_name(
- obj_cont_name)
- element_metadata = obj_cont.get(
- blender.pipeline.AVALON_PROPERTY)
- instance_name = element_metadata.get('instance_name')
- actions[instance_name] = obj.animation_data.action
-
- self._remove(layout_container)
-
- bpy.data.collections.remove(obj_container)
-
- creator_plugin = get_creator_by_name(self.setdress_creator_name)
- if not creator_plugin:
- raise ValueError("Creator plugin \"{}\" was not found.".format(
- self.setdress_creator_name
- ))
-
- parent = api.create(
- creator_plugin,
- name="animation",
- asset=api.Session["AVALON_ASSET"],
- options={"useSelection": True},
- data={"dependencies": str(representation["_id"])})
-
- layout_collection = self._process(
- libpath, layout_container, container_name,
- str(representation["_id"]), actions, parent)
-
- layout_container_metadata["obj_container"] = layout_collection
- layout_container_metadata["objects"] = layout_collection.all_objects
- layout_container_metadata["libpath"] = str(libpath)
- layout_container_metadata["representation"] = str(
- representation["_id"])
-
- def remove(self, container: Dict) -> bool:
- """Remove an existing container from a Blender scene.
-
- Arguments:
- container (openpype:container-1.0): Container to remove,
- from `host.ls()`.
-
- Returns:
- bool: Whether the container was deleted.
- """
- layout_container = bpy.data.collections.get(
- container["objectName"]
- )
- if not layout_container:
- return False
-
- layout_container_metadata = layout_container.get(
- blender.pipeline.AVALON_PROPERTY)
- obj_container = plugin.get_local_collection_with_name(
- layout_container_metadata["obj_container"].name
- )
-
- self._remove(layout_container)
-
- bpy.data.collections.remove(obj_container)
- bpy.data.collections.remove(layout_container)
-
- return True
diff --git a/openpype/hosts/blender/plugins/load/load_layout_blend.py b/openpype/hosts/blender/plugins/load/load_layout_blend.py
new file mode 100644
index 0000000000..85cb4dfbd3
--- /dev/null
+++ b/openpype/hosts/blender/plugins/load/load_layout_blend.py
@@ -0,0 +1,337 @@
+"""Load a layout in Blender."""
+
+from pathlib import Path
+from pprint import pformat
+from typing import Dict, List, Optional
+
+import bpy
+
+from avalon import api
+from avalon.blender.pipeline import AVALON_CONTAINERS
+from avalon.blender.pipeline import AVALON_CONTAINER_ID
+from avalon.blender.pipeline import AVALON_PROPERTY
+from openpype.hosts.blender.api import plugin
+
+
+class BlendLayoutLoader(plugin.AssetLoader):
+ """Load layout from a .blend file."""
+
+ families = ["layout"]
+ representations = ["blend"]
+
+ label = "Link Layout"
+ icon = "code-fork"
+ color = "orange"
+
+ def _remove(self, asset_group):
+ objects = list(asset_group.children)
+
+ for obj in objects:
+ if obj.type == 'MESH':
+ for material_slot in list(obj.material_slots):
+ if material_slot.material:
+ bpy.data.materials.remove(material_slot.material)
+ bpy.data.meshes.remove(obj.data)
+ elif obj.type == 'ARMATURE':
+ objects.extend(obj.children)
+ bpy.data.armatures.remove(obj.data)
+ elif obj.type == 'CURVE':
+ bpy.data.curves.remove(obj.data)
+ elif obj.type == 'EMPTY':
+ objects.extend(obj.children)
+ bpy.data.objects.remove(obj)
+
+ def _remove_asset_and_library(self, asset_group):
+ libpath = asset_group.get(AVALON_PROPERTY).get('libpath')
+
+ # Check how many assets use the same library
+ count = 0
+ for obj in bpy.data.collections.get(AVALON_CONTAINERS).all_objects:
+ if obj.get(AVALON_PROPERTY).get('libpath') == libpath:
+ count += 1
+
+ self._remove(asset_group)
+
+ bpy.data.objects.remove(asset_group)
+
+ # If it is the last object to use that library, remove it
+ if count == 1:
+ library = bpy.data.libraries.get(bpy.path.basename(libpath))
+ bpy.data.libraries.remove(library)
+
+ def _process(self, libpath, asset_group, group_name, actions):
+ with bpy.data.libraries.load(
+ libpath, link=True, relative=False
+ ) as (data_from, data_to):
+ data_to.objects = data_from.objects
+
+ parent = bpy.context.scene.collection
+
+ empties = [obj for obj in data_to.objects if obj.type == 'EMPTY']
+
+ container = None
+
+ for empty in empties:
+ if empty.get(AVALON_PROPERTY):
+ container = empty
+ break
+
+ assert container, "No asset group found"
+
+ # Children must be linked before parents,
+ # otherwise the hierarchy will break
+ objects = []
+ nodes = list(container.children)
+
+ for obj in nodes:
+ obj.parent = asset_group
+
+ for obj in nodes:
+ objects.append(obj)
+ nodes.extend(list(obj.children))
+
+ objects.reverse()
+
+ constraints = []
+
+ armatures = [obj for obj in objects if obj.type == 'ARMATURE']
+
+ for armature in armatures:
+ for bone in armature.pose.bones:
+ for constraint in bone.constraints:
+ if hasattr(constraint, 'target'):
+ constraints.append(constraint)
+
+ for obj in objects:
+ parent.objects.link(obj)
+
+ for obj in objects:
+ local_obj = plugin.prepare_data(obj, group_name)
+
+ action = None
+
+ if actions:
+ action = actions.get(local_obj.name, None)
+
+ if local_obj.type == 'MESH':
+ plugin.prepare_data(local_obj.data, group_name)
+
+ if obj != local_obj:
+ for constraint in constraints:
+ if constraint.target == obj:
+ constraint.target = local_obj
+
+ for material_slot in local_obj.material_slots:
+ if material_slot.material:
+ plugin.prepare_data(material_slot.material, group_name)
+ elif local_obj.type == 'ARMATURE':
+ plugin.prepare_data(local_obj.data, group_name)
+
+ if action is not None:
+ local_obj.animation_data.action = action
+ elif local_obj.animation_data.action is not None:
+ plugin.prepare_data(
+ local_obj.animation_data.action, group_name)
+
+ # Set link the drivers to the local object
+ if local_obj.data.animation_data:
+ for d in local_obj.data.animation_data.drivers:
+ for v in d.driver.variables:
+ for t in v.targets:
+ t.id = local_obj
+
+ if not local_obj.get(AVALON_PROPERTY):
+ local_obj[AVALON_PROPERTY] = dict()
+
+ avalon_info = local_obj[AVALON_PROPERTY]
+ avalon_info.update({"container_name": group_name})
+
+ objects.reverse()
+
+ bpy.data.orphans_purge(do_local_ids=False)
+
+ bpy.ops.object.select_all(action='DESELECT')
+
+ return objects
+
+ def process_asset(
+ self, context: dict, name: str, namespace: Optional[str] = None,
+ options: Optional[Dict] = None
+ ) -> Optional[List]:
+ """
+ Arguments:
+ name: Use pre-defined name
+ namespace: Use pre-defined namespace
+ context: Full parenthood of representation to load
+ options: Additional settings dictionary
+ """
+ libpath = self.fname
+ asset = context["asset"]["name"]
+ subset = context["subset"]["name"]
+
+ asset_name = plugin.asset_name(asset, subset)
+ unique_number = plugin.get_unique_number(asset, subset)
+ group_name = plugin.asset_name(asset, subset, unique_number)
+ namespace = namespace or f"{asset}_{unique_number}"
+
+ avalon_container = bpy.data.collections.get(AVALON_CONTAINERS)
+ if not avalon_container:
+ avalon_container = bpy.data.collections.new(name=AVALON_CONTAINERS)
+ bpy.context.scene.collection.children.link(avalon_container)
+
+ asset_group = bpy.data.objects.new(group_name, object_data=None)
+ asset_group.empty_display_type = 'SINGLE_ARROW'
+ avalon_container.objects.link(asset_group)
+
+ objects = self._process(libpath, asset_group, group_name, None)
+
+ for child in asset_group.children:
+ if child.get(AVALON_PROPERTY):
+ avalon_container.objects.link(child)
+
+ bpy.context.scene.collection.objects.link(asset_group)
+
+ asset_group[AVALON_PROPERTY] = {
+ "schema": "openpype:container-2.0",
+ "id": AVALON_CONTAINER_ID,
+ "name": name,
+ "namespace": namespace or '',
+ "loader": str(self.__class__.__name__),
+ "representation": str(context["representation"]["_id"]),
+ "libpath": libpath,
+ "asset_name": asset_name,
+ "parent": str(context["representation"]["parent"]),
+ "family": context["representation"]["context"]["family"],
+ "objectName": group_name
+ }
+
+ self[:] = objects
+ return objects
+
+ def update(self, container: Dict, representation: Dict):
+ """Update the loaded asset.
+
+ This will remove all objects of the current collection, load the new
+ ones and add them to the collection.
+ If the objects of the collection are used in another collection they
+ will not be removed, only unlinked. Normally this should not be the
+ case though.
+
+ Warning:
+ No nested collections are supported at the moment!
+ """
+ object_name = container["objectName"]
+ asset_group = bpy.data.objects.get(object_name)
+ libpath = Path(api.get_representation_path(representation))
+ extension = libpath.suffix.lower()
+
+ self.log.info(
+ "Container: %s\nRepresentation: %s",
+ pformat(container, indent=2),
+ pformat(representation, indent=2),
+ )
+
+ assert asset_group, (
+ f"The asset is not loaded: {container['objectName']}"
+ )
+ assert libpath, (
+ "No existing library file found for {container['objectName']}"
+ )
+ assert libpath.is_file(), (
+ f"The file doesn't exist: {libpath}"
+ )
+ assert extension in plugin.VALID_EXTENSIONS, (
+ f"Unsupported file: {libpath}"
+ )
+
+ metadata = asset_group.get(AVALON_PROPERTY)
+ group_libpath = metadata["libpath"]
+
+ normalized_group_libpath = (
+ str(Path(bpy.path.abspath(group_libpath)).resolve())
+ )
+ normalized_libpath = (
+ str(Path(bpy.path.abspath(str(libpath))).resolve())
+ )
+ self.log.debug(
+ "normalized_group_libpath:\n %s\nnormalized_libpath:\n %s",
+ normalized_group_libpath,
+ normalized_libpath,
+ )
+ if normalized_group_libpath == normalized_libpath:
+ self.log.info("Library already loaded, not updating...")
+ return
+
+ actions = {}
+
+ for obj in asset_group.children:
+ obj_meta = obj.get(AVALON_PROPERTY)
+ if obj_meta.get('family') == 'rig':
+ rig = None
+ for child in obj.children:
+ if child.type == 'ARMATURE':
+ rig = child
+ break
+ if not rig:
+ raise Exception("No armature in the rig asset group.")
+ if rig.animation_data and rig.animation_data.action:
+ instance_name = obj_meta.get('instance_name')
+ actions[instance_name] = rig.animation_data.action
+
+ mat = asset_group.matrix_basis.copy()
+
+ # Remove the children of the asset_group first
+ for child in list(asset_group.children):
+ self._remove_asset_and_library(child)
+
+ # Check how many assets use the same library
+ count = 0
+ for obj in bpy.data.collections.get(AVALON_CONTAINERS).objects:
+ if obj.get(AVALON_PROPERTY).get('libpath') == group_libpath:
+ count += 1
+
+ self._remove(asset_group)
+
+ # If it is the last object to use that library, remove it
+ if count == 1:
+ library = bpy.data.libraries.get(bpy.path.basename(group_libpath))
+ bpy.data.libraries.remove(library)
+
+ self._process(str(libpath), asset_group, object_name, actions)
+
+ avalon_container = bpy.data.collections.get(AVALON_CONTAINERS)
+ for child in asset_group.children:
+ if child.get(AVALON_PROPERTY):
+ avalon_container.objects.link(child)
+
+ asset_group.matrix_basis = mat
+
+ metadata["libpath"] = str(libpath)
+ metadata["representation"] = str(representation["_id"])
+
+ def exec_remove(self, container: Dict) -> bool:
+ """Remove an existing container from a Blender scene.
+
+ Arguments:
+ container (openpype:container-1.0): Container to remove,
+ from `host.ls()`.
+
+ Returns:
+ bool: Whether the container was deleted.
+
+ Warning:
+ No nested collections are supported at the moment!
+ """
+ object_name = container["objectName"]
+ asset_group = bpy.data.objects.get(object_name)
+
+ if not asset_group:
+ return False
+
+ # Remove the children of the asset_group first
+ for child in list(asset_group.children):
+ self._remove_asset_and_library(child)
+
+ self._remove_asset_and_library(asset_group)
+
+ return True
diff --git a/openpype/hosts/blender/plugins/load/load_layout_json.py b/openpype/hosts/blender/plugins/load/load_layout_json.py
new file mode 100644
index 0000000000..1a4dbbb5cb
--- /dev/null
+++ b/openpype/hosts/blender/plugins/load/load_layout_json.py
@@ -0,0 +1,259 @@
+"""Load a layout in Blender."""
+
+from pathlib import Path
+from pprint import pformat
+from typing import Dict, Optional
+
+import bpy
+import json
+
+from avalon import api
+from avalon.blender.pipeline import AVALON_CONTAINERS
+from avalon.blender.pipeline import AVALON_CONTAINER_ID
+from avalon.blender.pipeline import AVALON_PROPERTY
+from avalon.blender.pipeline import AVALON_INSTANCES
+from openpype.hosts.blender.api import plugin
+
+
+class JsonLayoutLoader(plugin.AssetLoader):
+ """Load layout published from Unreal."""
+
+ families = ["layout"]
+ representations = ["json"]
+
+ label = "Load Layout"
+ icon = "code-fork"
+ color = "orange"
+
+ animation_creator_name = "CreateAnimation"
+
+ def _remove(self, asset_group):
+ objects = list(asset_group.children)
+
+ for obj in objects:
+ api.remove(obj.get(AVALON_PROPERTY))
+
+ def _remove_animation_instances(self, asset_group):
+ instances = bpy.data.collections.get(AVALON_INSTANCES)
+ if instances:
+ for obj in list(asset_group.children):
+ anim_collection = instances.children.get(
+ obj.name + "_animation")
+ if anim_collection:
+ bpy.data.collections.remove(anim_collection)
+
+ def _get_loader(self, loaders, family):
+ name = ""
+ if family == 'rig':
+ name = "BlendRigLoader"
+ elif family == 'model':
+ name = "BlendModelLoader"
+
+ if name == "":
+ return None
+
+ for loader in loaders:
+ if loader.__name__ == name:
+ return loader
+
+ return None
+
+ def _process(self, libpath, asset, asset_group, actions):
+ bpy.ops.object.select_all(action='DESELECT')
+
+ with open(libpath, "r") as fp:
+ data = json.load(fp)
+
+ all_loaders = api.discover(api.Loader)
+
+ for element in data:
+ reference = element.get('reference')
+ family = element.get('family')
+
+ loaders = api.loaders_from_representation(all_loaders, reference)
+ loader = self._get_loader(loaders, family)
+
+ if not loader:
+ continue
+
+ instance_name = element.get('instance_name')
+
+ action = None
+
+ if actions:
+ action = actions.get(instance_name, None)
+
+ options = {
+ 'parent': asset_group,
+ 'transform': element.get('transform'),
+ 'action': action,
+ 'create_animation': True if family == 'rig' else False,
+ 'animation_asset': asset
+ }
+
+ # This should return the loaded asset, but the load call will be
+ # added to the queue to run in the Blender main thread, so
+ # at this time it will not return anything. The assets will be
+ # loaded in the next Blender cycle, so we use the options to
+ # set the transform, parent and assign the action, if there is one.
+ api.load(
+ loader,
+ reference,
+ namespace=instance_name,
+ options=options
+ )
+
+ def process_asset(self,
+ context: dict,
+ name: str,
+ namespace: Optional[str] = None,
+ options: Optional[Dict] = None):
+ """
+ Arguments:
+ name: Use pre-defined name
+ namespace: Use pre-defined namespace
+ context: Full parenthood of representation to load
+ options: Additional settings dictionary
+ """
+ libpath = self.fname
+ asset = context["asset"]["name"]
+ subset = context["subset"]["name"]
+
+ asset_name = plugin.asset_name(asset, subset)
+ unique_number = plugin.get_unique_number(asset, subset)
+ group_name = plugin.asset_name(asset, subset, unique_number)
+ namespace = namespace or f"{asset}_{unique_number}"
+
+ avalon_container = bpy.data.collections.get(AVALON_CONTAINERS)
+ if not avalon_container:
+ avalon_container = bpy.data.collections.new(name=AVALON_CONTAINERS)
+ bpy.context.scene.collection.children.link(avalon_container)
+
+ asset_group = bpy.data.objects.new(group_name, object_data=None)
+ asset_group.empty_display_type = 'SINGLE_ARROW'
+ avalon_container.objects.link(asset_group)
+
+ self._process(libpath, asset, asset_group, None)
+
+ bpy.context.scene.collection.objects.link(asset_group)
+
+ asset_group[AVALON_PROPERTY] = {
+ "schema": "openpype:container-2.0",
+ "id": AVALON_CONTAINER_ID,
+ "name": name,
+ "namespace": namespace or '',
+ "loader": str(self.__class__.__name__),
+ "representation": str(context["representation"]["_id"]),
+ "libpath": libpath,
+ "asset_name": asset_name,
+ "parent": str(context["representation"]["parent"]),
+ "family": context["representation"]["context"]["family"],
+ "objectName": group_name
+ }
+
+ self[:] = asset_group.children
+ return asset_group.children
+
+ def exec_update(self, container: Dict, representation: Dict):
+ """Update the loaded asset.
+
+ This will remove all objects of the current collection, load the new
+ ones and add them to the collection.
+ If the objects of the collection are used in another collection they
+ will not be removed, only unlinked. Normally this should not be the
+ case though.
+ """
+ object_name = container["objectName"]
+ asset_group = bpy.data.objects.get(object_name)
+ libpath = Path(api.get_representation_path(representation))
+ extension = libpath.suffix.lower()
+
+ self.log.info(
+ "Container: %s\nRepresentation: %s",
+ pformat(container, indent=2),
+ pformat(representation, indent=2),
+ )
+
+ assert asset_group, (
+ f"The asset is not loaded: {container['objectName']}"
+ )
+ assert libpath, (
+ "No existing library file found for {container['objectName']}"
+ )
+ assert libpath.is_file(), (
+ f"The file doesn't exist: {libpath}"
+ )
+ assert extension in plugin.VALID_EXTENSIONS, (
+ f"Unsupported file: {libpath}"
+ )
+
+ metadata = asset_group.get(AVALON_PROPERTY)
+ group_libpath = metadata["libpath"]
+
+ normalized_group_libpath = (
+ str(Path(bpy.path.abspath(group_libpath)).resolve())
+ )
+ normalized_libpath = (
+ str(Path(bpy.path.abspath(str(libpath))).resolve())
+ )
+ self.log.debug(
+ "normalized_group_libpath:\n %s\nnormalized_libpath:\n %s",
+ normalized_group_libpath,
+ normalized_libpath,
+ )
+ if normalized_group_libpath == normalized_libpath:
+ self.log.info("Library already loaded, not updating...")
+ return
+
+ actions = {}
+
+ for obj in asset_group.children:
+ obj_meta = obj.get(AVALON_PROPERTY)
+ if obj_meta.get('family') == 'rig':
+ rig = None
+ for child in obj.children:
+ if child.type == 'ARMATURE':
+ rig = child
+ break
+ if not rig:
+ raise Exception("No armature in the rig asset group.")
+ if rig.animation_data and rig.animation_data.action:
+ namespace = obj_meta.get('namespace')
+ actions[namespace] = rig.animation_data.action
+
+ mat = asset_group.matrix_basis.copy()
+
+ self._remove_animation_instances(asset_group)
+
+ self._remove(asset_group)
+
+ self._process(str(libpath), asset_group, actions)
+
+ asset_group.matrix_basis = mat
+
+ metadata["libpath"] = str(libpath)
+ metadata["representation"] = str(representation["_id"])
+
+ def exec_remove(self, container: Dict) -> bool:
+ """Remove an existing container from a Blender scene.
+
+ Arguments:
+ container (openpype:container-1.0): Container to remove,
+ from `host.ls()`.
+
+ Returns:
+ bool: Whether the container was deleted.
+ """
+ object_name = container["objectName"]
+ asset_group = bpy.data.objects.get(object_name)
+
+ if not asset_group:
+ return False
+
+ self._remove_animation_instances(asset_group)
+
+ self._remove(asset_group)
+
+ bpy.data.objects.remove(asset_group)
+
+ return True
diff --git a/openpype/hosts/blender/plugins/load/load_model.py b/openpype/hosts/blender/plugins/load/load_model.py
index 35a241b98e..af5591c299 100644
--- a/openpype/hosts/blender/plugins/load/load_model.py
+++ b/openpype/hosts/blender/plugins/load/load_model.py
@@ -1,13 +1,16 @@
"""Load a model asset in Blender."""
-import logging
from pathlib import Path
from pprint import pformat
from typing import Dict, List, Optional
-from avalon import api, blender
import bpy
-import openpype.hosts.blender.api.plugin as plugin
+
+from avalon import api
+from avalon.blender.pipeline import AVALON_CONTAINERS
+from avalon.blender.pipeline import AVALON_CONTAINER_ID
+from avalon.blender.pipeline import AVALON_PROPERTY
+from openpype.hosts.blender.api import plugin
class BlendModelLoader(plugin.AssetLoader):
@@ -24,52 +27,75 @@ class BlendModelLoader(plugin.AssetLoader):
icon = "code-fork"
color = "orange"
- def _remove(self, objects, container):
- for obj in list(objects):
- for material_slot in list(obj.material_slots):
- bpy.data.materials.remove(material_slot.material)
- bpy.data.meshes.remove(obj.data)
+ def _remove(self, asset_group):
+ objects = list(asset_group.children)
- bpy.data.collections.remove(container)
+ for obj in objects:
+ if obj.type == 'MESH':
+ for material_slot in list(obj.material_slots):
+ bpy.data.materials.remove(material_slot.material)
+ bpy.data.meshes.remove(obj.data)
+ elif obj.type == 'EMPTY':
+ objects.extend(obj.children)
+ bpy.data.objects.remove(obj)
- def _process(
- self, libpath, lib_container, container_name,
- parent_collection
- ):
- relative = bpy.context.preferences.filepaths.use_relative_paths
+ def _process(self, libpath, asset_group, group_name):
with bpy.data.libraries.load(
- libpath, link=True, relative=relative
- ) as (_, data_to):
- data_to.collections = [lib_container]
+ libpath, link=True, relative=False
+ ) as (data_from, data_to):
+ data_to.objects = data_from.objects
- parent = parent_collection
+ parent = bpy.context.scene.collection
- if parent is None:
- parent = bpy.context.scene.collection
+ empties = [obj for obj in data_to.objects if obj.type == 'EMPTY']
- parent.children.link(bpy.data.collections[lib_container])
+ container = None
- model_container = parent.children[lib_container].make_local()
- model_container.name = container_name
+ for empty in empties:
+ if empty.get(AVALON_PROPERTY):
+ container = empty
+ break
- for obj in model_container.objects:
- local_obj = plugin.prepare_data(obj, container_name)
- plugin.prepare_data(local_obj.data, container_name)
+ assert container, "No asset group found"
- for material_slot in local_obj.material_slots:
- plugin.prepare_data(material_slot.material, container_name)
+ # Children must be linked before parents,
+ # otherwise the hierarchy will break
+ objects = []
+ nodes = list(container.children)
- if not obj.get(blender.pipeline.AVALON_PROPERTY):
- local_obj[blender.pipeline.AVALON_PROPERTY] = dict()
+ for obj in nodes:
+ obj.parent = asset_group
- avalon_info = local_obj[blender.pipeline.AVALON_PROPERTY]
- avalon_info.update({"container_name": container_name})
+ for obj in nodes:
+ objects.append(obj)
+ nodes.extend(list(obj.children))
- model_container.pop(blender.pipeline.AVALON_PROPERTY)
+ objects.reverse()
+
+ for obj in objects:
+ parent.objects.link(obj)
+
+ for obj in objects:
+ local_obj = plugin.prepare_data(obj, group_name)
+ if local_obj.type != 'EMPTY':
+ plugin.prepare_data(local_obj.data, group_name)
+
+ for material_slot in local_obj.material_slots:
+ plugin.prepare_data(material_slot.material, group_name)
+
+ if not local_obj.get(AVALON_PROPERTY):
+ local_obj[AVALON_PROPERTY] = dict()
+
+ avalon_info = local_obj[AVALON_PROPERTY]
+ avalon_info.update({"container_name": group_name})
+
+ objects.reverse()
+
+ bpy.data.orphans_purge(do_local_ids=False)
bpy.ops.object.select_all(action='DESELECT')
- return model_container
+ return objects
def process_asset(
self, context: dict, name: str, namespace: Optional[str] = None,
@@ -82,54 +108,80 @@ class BlendModelLoader(plugin.AssetLoader):
context: Full parenthood of representation to load
options: Additional settings dictionary
"""
-
libpath = self.fname
asset = context["asset"]["name"]
subset = context["subset"]["name"]
- lib_container = plugin.asset_name(
- asset, subset
- )
- unique_number = plugin.get_unique_number(
- asset, subset
- )
+ asset_name = plugin.asset_name(asset, subset)
+ unique_number = plugin.get_unique_number(asset, subset)
+ group_name = plugin.asset_name(asset, subset, unique_number)
namespace = namespace or f"{asset}_{unique_number}"
- container_name = plugin.asset_name(
- asset, subset, unique_number
- )
- container = bpy.data.collections.new(lib_container)
- container.name = container_name
- blender.pipeline.containerise_existing(
- container,
- name,
- namespace,
- context,
- self.__class__.__name__,
- )
+ avalon_container = bpy.data.collections.get(AVALON_CONTAINERS)
+ if not avalon_container:
+ avalon_container = bpy.data.collections.new(name=AVALON_CONTAINERS)
+ bpy.context.scene.collection.children.link(avalon_container)
- metadata = container.get(blender.pipeline.AVALON_PROPERTY)
+ asset_group = bpy.data.objects.new(group_name, object_data=None)
+ asset_group.empty_display_type = 'SINGLE_ARROW'
+ avalon_container.objects.link(asset_group)
- metadata["libpath"] = libpath
- metadata["lib_container"] = lib_container
+ bpy.ops.object.select_all(action='DESELECT')
- obj_container = self._process(
- libpath, lib_container, container_name, None)
+ if options is not None:
+ parent = options.get('parent')
+ transform = options.get('transform')
- metadata["obj_container"] = obj_container
+ if parent and transform:
+ location = transform.get('translation')
+ rotation = transform.get('rotation')
+ scale = transform.get('scale')
- # Save the list of objects in the metadata container
- metadata["objects"] = obj_container.all_objects
+ asset_group.location = (
+ location.get('x'),
+ location.get('y'),
+ location.get('z')
+ )
+ asset_group.rotation_euler = (
+ rotation.get('x'),
+ rotation.get('y'),
+ rotation.get('z')
+ )
+ asset_group.scale = (
+ scale.get('x'),
+ scale.get('y'),
+ scale.get('z')
+ )
- metadata["parent"] = str(context["representation"]["parent"])
- metadata["family"] = context["representation"]["context"]["family"]
+ bpy.context.view_layer.objects.active = parent
+ asset_group.select_set(True)
- nodes = list(container.objects)
- nodes.append(container)
- self[:] = nodes
- return nodes
+ bpy.ops.object.parent_set(keep_transform=True)
- def update(self, container: Dict, representation: Dict):
+ bpy.ops.object.select_all(action='DESELECT')
+
+ objects = self._process(libpath, asset_group, group_name)
+
+ bpy.context.scene.collection.objects.link(asset_group)
+
+ asset_group[AVALON_PROPERTY] = {
+ "schema": "openpype:container-2.0",
+ "id": AVALON_CONTAINER_ID,
+ "name": name,
+ "namespace": namespace or '',
+ "loader": str(self.__class__.__name__),
+ "representation": str(context["representation"]["_id"]),
+ "libpath": libpath,
+ "asset_name": asset_name,
+ "parent": str(context["representation"]["parent"]),
+ "family": context["representation"]["context"]["family"],
+ "objectName": group_name
+ }
+
+ self[:] = objects
+ return objects
+
+ def exec_update(self, container: Dict, representation: Dict):
"""Update the loaded asset.
This will remove all objects of the current collection, load the new
@@ -137,13 +189,9 @@ class BlendModelLoader(plugin.AssetLoader):
If the objects of the collection are used in another collection they
will not be removed, only unlinked. Normally this should not be the
case though.
-
- Warning:
- No nested collections are supported at the moment!
"""
- collection = bpy.data.collections.get(
- container["objectName"]
- )
+ object_name = container["objectName"]
+ asset_group = bpy.data.objects.get(object_name)
libpath = Path(api.get_representation_path(representation))
extension = libpath.suffix.lower()
@@ -153,12 +201,9 @@ class BlendModelLoader(plugin.AssetLoader):
pformat(representation, indent=2),
)
- assert collection, (
+ assert asset_group, (
f"The asset is not loaded: {container['objectName']}"
)
- assert not (collection.children), (
- "Nested collections are not supported."
- )
assert libpath, (
"No existing library file found for {container['objectName']}"
)
@@ -169,47 +214,47 @@ class BlendModelLoader(plugin.AssetLoader):
f"Unsupported file: {libpath}"
)
- collection_metadata = collection.get(
- blender.pipeline.AVALON_PROPERTY)
- collection_libpath = collection_metadata["libpath"]
- lib_container = collection_metadata["lib_container"]
+ metadata = asset_group.get(AVALON_PROPERTY)
+ group_libpath = metadata["libpath"]
- obj_container = plugin.get_local_collection_with_name(
- collection_metadata["obj_container"].name
- )
- objects = obj_container.all_objects
-
- container_name = obj_container.name
-
- normalized_collection_libpath = (
- str(Path(bpy.path.abspath(collection_libpath)).resolve())
+ normalized_group_libpath = (
+ str(Path(bpy.path.abspath(group_libpath)).resolve())
)
normalized_libpath = (
str(Path(bpy.path.abspath(str(libpath))).resolve())
)
self.log.debug(
- "normalized_collection_libpath:\n %s\nnormalized_libpath:\n %s",
- normalized_collection_libpath,
+ "normalized_group_libpath:\n %s\nnormalized_libpath:\n %s",
+ normalized_group_libpath,
normalized_libpath,
)
- if normalized_collection_libpath == normalized_libpath:
+ if normalized_group_libpath == normalized_libpath:
self.log.info("Library already loaded, not updating...")
return
- parent = plugin.get_parent_collection(obj_container)
+ # Check how many assets use the same library
+ count = 0
+ for obj in bpy.data.collections.get(AVALON_CONTAINERS).objects:
+ if obj.get(AVALON_PROPERTY).get('libpath') == group_libpath:
+ count += 1
- self._remove(objects, obj_container)
+ mat = asset_group.matrix_basis.copy()
- obj_container = self._process(
- str(libpath), lib_container, container_name, parent)
+ self._remove(asset_group)
- # Save the list of objects in the metadata container
- collection_metadata["obj_container"] = obj_container
- collection_metadata["objects"] = obj_container.all_objects
- collection_metadata["libpath"] = str(libpath)
- collection_metadata["representation"] = str(representation["_id"])
+ # If it is the last object to use that library, remove it
+ if count == 1:
+ library = bpy.data.libraries.get(bpy.path.basename(group_libpath))
+ bpy.data.libraries.remove(library)
- def remove(self, container: Dict) -> bool:
+ self._process(str(libpath), asset_group, object_name)
+
+ asset_group.matrix_basis = mat
+
+ metadata["libpath"] = str(libpath)
+ metadata["representation"] = str(representation["_id"])
+
+ def exec_remove(self, container: Dict) -> bool:
"""Remove an existing container from a Blender scene.
Arguments:
@@ -218,29 +263,27 @@ class BlendModelLoader(plugin.AssetLoader):
Returns:
bool: Whether the container was deleted.
-
- Warning:
- No nested collections are supported at the moment!
"""
- collection = bpy.data.collections.get(
- container["objectName"]
- )
- if not collection:
+ object_name = container["objectName"]
+ asset_group = bpy.data.objects.get(object_name)
+ libpath = asset_group.get(AVALON_PROPERTY).get('libpath')
+
+ # Check how many assets use the same library
+ count = 0
+ for obj in bpy.data.collections.get(AVALON_CONTAINERS).objects:
+ if obj.get(AVALON_PROPERTY).get('libpath') == libpath:
+ count += 1
+
+ if not asset_group:
return False
- assert not (collection.children), (
- "Nested collections are not supported."
- )
- collection_metadata = collection.get(
- blender.pipeline.AVALON_PROPERTY)
+ self._remove(asset_group)
- obj_container = plugin.get_local_collection_with_name(
- collection_metadata["obj_container"].name
- )
- objects = obj_container.all_objects
+ bpy.data.objects.remove(asset_group)
- self._remove(objects, obj_container)
-
- bpy.data.collections.remove(collection)
+ # If it is the last object to use that library, remove it
+ if count == 1:
+ library = bpy.data.libraries.get(bpy.path.basename(libpath))
+ bpy.data.libraries.remove(library)
return True
diff --git a/openpype/hosts/blender/plugins/load/load_rig.py b/openpype/hosts/blender/plugins/load/load_rig.py
index b6be8f4cf6..5573c081e1 100644
--- a/openpype/hosts/blender/plugins/load/load_rig.py
+++ b/openpype/hosts/blender/plugins/load/load_rig.py
@@ -1,21 +1,21 @@
"""Load a rig asset in Blender."""
-import logging
from pathlib import Path
from pprint import pformat
from typing import Dict, List, Optional
-from avalon import api, blender
import bpy
-import openpype.hosts.blender.api.plugin as plugin
+
+from avalon import api
+from avalon.blender.pipeline import AVALON_CONTAINERS
+from avalon.blender.pipeline import AVALON_CONTAINER_ID
+from avalon.blender.pipeline import AVALON_PROPERTY
+from openpype import lib
+from openpype.hosts.blender.api import plugin
class BlendRigLoader(plugin.AssetLoader):
- """Load rigs from a .blend file.
-
- Because they come from a .blend file we can simply link the collection that
- contains the model. There is no further need to 'containerise' it.
- """
+ """Load rigs from a .blend file."""
families = ["rig"]
representations = ["blend"]
@@ -24,105 +24,113 @@ class BlendRigLoader(plugin.AssetLoader):
icon = "code-fork"
color = "orange"
- def _remove(self, objects, obj_container):
- for obj in list(objects):
- if obj.type == 'ARMATURE':
- bpy.data.armatures.remove(obj.data)
- elif obj.type == 'MESH':
+ def _remove(self, asset_group):
+ objects = list(asset_group.children)
+
+ for obj in objects:
+ if obj.type == 'MESH':
+ for material_slot in list(obj.material_slots):
+ if material_slot.material:
+ bpy.data.materials.remove(material_slot.material)
bpy.data.meshes.remove(obj.data)
+ elif obj.type == 'ARMATURE':
+ objects.extend(obj.children)
+ bpy.data.armatures.remove(obj.data)
elif obj.type == 'CURVE':
bpy.data.curves.remove(obj.data)
+ elif obj.type == 'EMPTY':
+ objects.extend(obj.children)
+ bpy.data.objects.remove(obj)
- for child in obj_container.children:
- bpy.data.collections.remove(child)
-
- bpy.data.collections.remove(obj_container)
-
- def make_local_and_metadata(self, obj, collection_name):
- local_obj = plugin.prepare_data(obj, collection_name)
- plugin.prepare_data(local_obj.data, collection_name)
-
- if not local_obj.get(blender.pipeline.AVALON_PROPERTY):
- local_obj[blender.pipeline.AVALON_PROPERTY] = dict()
-
- avalon_info = local_obj[blender.pipeline.AVALON_PROPERTY]
- avalon_info.update({"container_name": collection_name + '_CON'})
-
- return local_obj
-
- def _process(
- self, libpath, lib_container, collection_name,
- action, parent_collection
- ):
- relative = bpy.context.preferences.filepaths.use_relative_paths
+ def _process(self, libpath, asset_group, group_name, action):
with bpy.data.libraries.load(
- libpath, link=True, relative=relative
- ) as (_, data_to):
- data_to.collections = [lib_container]
+ libpath, link=True, relative=False
+ ) as (data_from, data_to):
+ data_to.objects = data_from.objects
- parent = parent_collection
+ parent = bpy.context.scene.collection
- if parent is None:
- parent = bpy.context.scene.collection
+ empties = [obj for obj in data_to.objects if obj.type == 'EMPTY']
- parent.children.link(bpy.data.collections[lib_container])
+ container = None
- rig_container = parent.children[lib_container].make_local()
- rig_container.name = collection_name
+ for empty in empties:
+ if empty.get(AVALON_PROPERTY):
+ container = empty
+ break
+ assert container, "No asset group found"
+
+ # Children must be linked before parents,
+ # otherwise the hierarchy will break
objects = []
- armatures = [
- obj for obj in rig_container.objects
- if obj.type == 'ARMATURE'
- ]
+ nodes = list(container.children)
- for child in rig_container.children:
- local_child = plugin.prepare_data(child, collection_name)
- objects.extend(local_child.objects)
+ for obj in nodes:
+ obj.parent = asset_group
- # for obj in bpy.data.objects:
- # obj.select_set(False)
+ for obj in nodes:
+ objects.append(obj)
+ nodes.extend(list(obj.children))
+
+ objects.reverse()
constraints = []
+ armatures = [obj for obj in objects if obj.type == 'ARMATURE']
+
for armature in armatures:
for bone in armature.pose.bones:
for constraint in bone.constraints:
if hasattr(constraint, 'target'):
constraints.append(constraint)
- # Link armatures after other objects.
- # The armature is unparented for all the non-local meshes,
- # when it is made local.
for obj in objects:
- local_obj = self.make_local_and_metadata(obj, collection_name)
+ parent.objects.link(obj)
- if obj != local_obj:
- for constraint in constraints:
- if constraint.target == obj:
- constraint.target = local_obj
+ for obj in objects:
+ local_obj = plugin.prepare_data(obj, group_name)
- for armature in armatures:
- local_obj = self.make_local_and_metadata(armature, collection_name)
+ if local_obj.type == 'MESH':
+ plugin.prepare_data(local_obj.data, group_name)
- if action is not None:
- local_obj.animation_data.action = action
- elif local_obj.animation_data.action is not None:
- plugin.prepare_data(
- local_obj.animation_data.action, collection_name)
+ if obj != local_obj:
+ for constraint in constraints:
+ if constraint.target == obj:
+ constraint.target = local_obj
- # Set link the drivers to the local object
- if local_obj.data.animation_data:
- for d in local_obj.data.animation_data.drivers:
- for v in d.driver.variables:
- for t in v.targets:
- t.id = local_obj
+ for material_slot in local_obj.material_slots:
+ if material_slot.material:
+ plugin.prepare_data(material_slot.material, group_name)
+ elif local_obj.type == 'ARMATURE':
+ plugin.prepare_data(local_obj.data, group_name)
- rig_container.pop(blender.pipeline.AVALON_PROPERTY)
+ if action is not None:
+ local_obj.animation_data.action = action
+ elif local_obj.animation_data.action is not None:
+ plugin.prepare_data(
+ local_obj.animation_data.action, group_name)
+
+ # Set link the drivers to the local object
+ if local_obj.data.animation_data:
+ for d in local_obj.data.animation_data.drivers:
+ for v in d.driver.variables:
+ for t in v.targets:
+ t.id = local_obj
+
+ if not local_obj.get(AVALON_PROPERTY):
+ local_obj[AVALON_PROPERTY] = dict()
+
+ avalon_info = local_obj[AVALON_PROPERTY]
+ avalon_info.update({"container_name": group_name})
+
+ objects.reverse()
+
+ bpy.data.orphans_purge(do_local_ids=False)
bpy.ops.object.select_all(action='DESELECT')
- return rig_container
+ return objects
def process_asset(
self, context: dict, name: str, namespace: Optional[str] = None,
@@ -138,61 +146,111 @@ class BlendRigLoader(plugin.AssetLoader):
libpath = self.fname
asset = context["asset"]["name"]
subset = context["subset"]["name"]
- lib_container = plugin.asset_name(
- asset, subset
- )
- unique_number = plugin.get_unique_number(
- asset, subset
- )
+
+ asset_name = plugin.asset_name(asset, subset)
+ unique_number = plugin.get_unique_number(asset, subset)
+ group_name = plugin.asset_name(asset, subset, unique_number)
namespace = namespace or f"{asset}_{unique_number}"
- collection_name = plugin.asset_name(
- asset, subset, unique_number
- )
- container = bpy.data.collections.new(collection_name)
- blender.pipeline.containerise_existing(
- container,
- name,
- namespace,
- context,
- self.__class__.__name__,
- )
+ avalon_container = bpy.data.collections.get(AVALON_CONTAINERS)
+ if not avalon_container:
+ avalon_container = bpy.data.collections.new(name=AVALON_CONTAINERS)
+ bpy.context.scene.collection.children.link(avalon_container)
- metadata = container.get(blender.pipeline.AVALON_PROPERTY)
+ asset_group = bpy.data.objects.new(group_name, object_data=None)
+ asset_group.empty_display_type = 'SINGLE_ARROW'
+ avalon_container.objects.link(asset_group)
- metadata["libpath"] = libpath
- metadata["lib_container"] = lib_container
+ action = None
- obj_container = self._process(
- libpath, lib_container, collection_name, None, None)
+ bpy.ops.object.select_all(action='DESELECT')
- metadata["obj_container"] = obj_container
- # Save the list of objects in the metadata container
- metadata["objects"] = obj_container.all_objects
+ create_animation = False
- metadata["parent"] = str(context["representation"]["parent"])
- metadata["family"] = context["representation"]["context"]["family"]
+ if options is not None:
+ parent = options.get('parent')
+ transform = options.get('transform')
+ action = options.get('action')
+ create_animation = options.get('create_animation')
- nodes = list(container.objects)
- nodes.append(container)
- self[:] = nodes
- return nodes
+ if parent and transform:
+ location = transform.get('translation')
+ rotation = transform.get('rotation')
+ scale = transform.get('scale')
- def update(self, container: Dict, representation: Dict):
+ asset_group.location = (
+ location.get('x'),
+ location.get('y'),
+ location.get('z')
+ )
+ asset_group.rotation_euler = (
+ rotation.get('x'),
+ rotation.get('y'),
+ rotation.get('z')
+ )
+ asset_group.scale = (
+ scale.get('x'),
+ scale.get('y'),
+ scale.get('z')
+ )
+
+ bpy.context.view_layer.objects.active = parent
+ asset_group.select_set(True)
+
+ bpy.ops.object.parent_set(keep_transform=True)
+
+ bpy.ops.object.select_all(action='DESELECT')
+
+ objects = self._process(libpath, asset_group, group_name, action)
+
+ if create_animation:
+ creator_plugin = lib.get_creator_by_name("CreateAnimation")
+ if not creator_plugin:
+ raise ValueError("Creator plugin \"CreateAnimation\" was "
+ "not found.")
+
+ asset_group.select_set(True)
+
+ animation_asset = options.get('animation_asset')
+
+ api.create(
+ creator_plugin,
+ name=namespace + "_animation",
+ # name=f"{unique_number}_{subset}_animation",
+ asset=animation_asset,
+ options={"useSelection": False, "asset_group": asset_group},
+ data={"dependencies": str(context["representation"]["_id"])}
+ )
+
+ bpy.ops.object.select_all(action='DESELECT')
+
+ bpy.context.scene.collection.objects.link(asset_group)
+
+ asset_group[AVALON_PROPERTY] = {
+ "schema": "openpype:container-2.0",
+ "id": AVALON_CONTAINER_ID,
+ "name": name,
+ "namespace": namespace or '',
+ "loader": str(self.__class__.__name__),
+ "representation": str(context["representation"]["_id"]),
+ "libpath": libpath,
+ "asset_name": asset_name,
+ "parent": str(context["representation"]["parent"]),
+ "family": context["representation"]["context"]["family"],
+ "objectName": group_name
+ }
+
+ self[:] = objects
+ return objects
+
+ def exec_update(self, container: Dict, representation: Dict):
"""Update the loaded asset.
- This will remove all objects of the current collection, load the new
- ones and add them to the collection.
- If the objects of the collection are used in another collection they
- will not be removed, only unlinked. Normally this should not be the
- case though.
-
- Warning:
- No nested collections are supported at the moment!
+ This will remove all children of the asset group, load the new ones
+ and add them as children of the group.
"""
- collection = bpy.data.collections.get(
- container["objectName"]
- )
+ object_name = container["objectName"]
+ asset_group = bpy.data.objects.get(object_name)
libpath = Path(api.get_representation_path(representation))
extension = libpath.suffix.lower()
@@ -202,12 +260,9 @@ class BlendRigLoader(plugin.AssetLoader):
pformat(representation, indent=2),
)
- assert collection, (
+ assert asset_group, (
f"The asset is not loaded: {container['objectName']}"
)
- assert not (collection.children), (
- "Nested collections are not supported."
- )
assert libpath, (
"No existing library file found for {container['objectName']}"
)
@@ -218,89 +273,84 @@ class BlendRigLoader(plugin.AssetLoader):
f"Unsupported file: {libpath}"
)
- collection_metadata = collection.get(
- blender.pipeline.AVALON_PROPERTY)
- collection_libpath = collection_metadata["libpath"]
- lib_container = collection_metadata["lib_container"]
+ metadata = asset_group.get(AVALON_PROPERTY)
+ group_libpath = metadata["libpath"]
- obj_container = plugin.get_local_collection_with_name(
- collection_metadata["obj_container"].name
- )
- objects = obj_container.all_objects
-
- container_name = obj_container.name
-
- normalized_collection_libpath = (
- str(Path(bpy.path.abspath(collection_libpath)).resolve())
+ normalized_group_libpath = (
+ str(Path(bpy.path.abspath(group_libpath)).resolve())
)
normalized_libpath = (
str(Path(bpy.path.abspath(str(libpath))).resolve())
)
self.log.debug(
- "normalized_collection_libpath:\n %s\nnormalized_libpath:\n %s",
- normalized_collection_libpath,
+ "normalized_group_libpath:\n %s\nnormalized_libpath:\n %s",
+ normalized_group_libpath,
normalized_libpath,
)
- if normalized_collection_libpath == normalized_libpath:
+ if normalized_group_libpath == normalized_libpath:
self.log.info("Library already loaded, not updating...")
return
+ # Check how many assets use the same library
+ count = 0
+ for obj in bpy.data.collections.get(AVALON_CONTAINERS).objects:
+ if obj.get(AVALON_PROPERTY).get('libpath') == group_libpath:
+ count += 1
+
# Get the armature of the rig
- armatures = [obj for obj in objects if obj.type == 'ARMATURE']
- assert(len(armatures) == 1)
+ objects = asset_group.children
+ armature = [obj for obj in objects if obj.type == 'ARMATURE'][0]
action = None
- if armatures[0].animation_data and armatures[0].animation_data.action:
- action = armatures[0].animation_data.action
+ if armature.animation_data and armature.animation_data.action:
+ action = armature.animation_data.action
- parent = plugin.get_parent_collection(obj_container)
+ mat = asset_group.matrix_basis.copy()
- self._remove(objects, obj_container)
+ self._remove(asset_group)
- obj_container = self._process(
- str(libpath), lib_container, container_name, action, parent)
+ # If it is the last object to use that library, remove it
+ if count == 1:
+ library = bpy.data.libraries.get(bpy.path.basename(group_libpath))
+ bpy.data.libraries.remove(library)
- # Save the list of objects in the metadata container
- collection_metadata["obj_container"] = obj_container
- collection_metadata["objects"] = obj_container.all_objects
- collection_metadata["libpath"] = str(libpath)
- collection_metadata["representation"] = str(representation["_id"])
+ self._process(str(libpath), asset_group, object_name, action)
- bpy.ops.object.select_all(action='DESELECT')
+ asset_group.matrix_basis = mat
- def remove(self, container: Dict) -> bool:
- """Remove an existing container from a Blender scene.
+ metadata["libpath"] = str(libpath)
+ metadata["representation"] = str(representation["_id"])
+
+ def exec_remove(self, container: Dict) -> bool:
+ """Remove an existing asset group from a Blender scene.
Arguments:
container (openpype:container-1.0): Container to remove,
from `host.ls()`.
Returns:
- bool: Whether the container was deleted.
-
- Warning:
- No nested collections are supported at the moment!
+ bool: Whether the asset group was deleted.
"""
+ object_name = container["objectName"]
+ asset_group = bpy.data.objects.get(object_name)
+ libpath = asset_group.get(AVALON_PROPERTY).get('libpath')
- collection = bpy.data.collections.get(
- container["objectName"]
- )
- if not collection:
+ # Check how many assets use the same library
+ count = 0
+ for obj in bpy.data.collections.get(AVALON_CONTAINERS).objects:
+ if obj.get(AVALON_PROPERTY).get('libpath') == libpath:
+ count += 1
+
+ if not asset_group:
return False
- assert not (collection.children), (
- "Nested collections are not supported."
- )
- collection_metadata = collection.get(
- blender.pipeline.AVALON_PROPERTY)
+ self._remove(asset_group)
- obj_container = plugin.get_local_collection_with_name(
- collection_metadata["obj_container"].name
- )
- objects = obj_container.all_objects
+ bpy.data.objects.remove(asset_group)
- self._remove(objects, obj_container)
-
- bpy.data.collections.remove(collection)
+ # If it is the last object to use that library, remove it
+ if count == 1:
+ library = bpy.data.libraries.get(bpy.path.basename(libpath))
+ bpy.data.libraries.remove(library)
return True
diff --git a/openpype/hosts/blender/plugins/publish/collect_instances.py b/openpype/hosts/blender/plugins/publish/collect_instances.py
index 1d3693216d..0d683dace4 100644
--- a/openpype/hosts/blender/plugins/publish/collect_instances.py
+++ b/openpype/hosts/blender/plugins/publish/collect_instances.py
@@ -5,6 +5,7 @@ import json
import pyblish.api
from avalon.blender.pipeline import AVALON_PROPERTY
+from avalon.blender.pipeline import AVALON_INSTANCES
class CollectInstances(pyblish.api.ContextPlugin):
@@ -14,6 +15,20 @@ class CollectInstances(pyblish.api.ContextPlugin):
label = "Collect Instances"
order = pyblish.api.CollectorOrder
+ @staticmethod
+ def get_asset_groups() -> Generator:
+ """Return all 'model' collections.
+
+ Check if the family is 'model' and if it doesn't have the
+ representation set. If the representation is set, it is a loaded model
+ and we don't want to publish it.
+ """
+ instances = bpy.data.collections.get(AVALON_INSTANCES)
+ for obj in instances.objects:
+ avalon_prop = obj.get(AVALON_PROPERTY) or dict()
+ if avalon_prop.get('id') == 'pyblish.avalon.instance':
+ yield obj
+
@staticmethod
def get_collections() -> Generator:
"""Return all 'model' collections.
@@ -29,8 +44,35 @@ class CollectInstances(pyblish.api.ContextPlugin):
def process(self, context):
"""Collect the models from the current Blender scene."""
+ asset_groups = self.get_asset_groups()
collections = self.get_collections()
+ for group in asset_groups:
+ avalon_prop = group[AVALON_PROPERTY]
+ asset = avalon_prop['asset']
+ family = avalon_prop['family']
+ subset = avalon_prop['subset']
+ task = avalon_prop['task']
+ name = f"{asset}_{subset}"
+ instance = context.create_instance(
+ name=name,
+ family=family,
+ families=[family],
+ subset=subset,
+ asset=asset,
+ task=task,
+ )
+ objects = list(group.children)
+ members = set()
+ for obj in objects:
+ objects.extend(list(obj.children))
+ members.add(obj)
+ members.add(group)
+ instance[:] = list(members)
+ self.log.debug(json.dumps(instance.data, indent=4))
+ for obj in instance:
+ self.log.debug(obj)
+
for collection in collections:
avalon_prop = collection[AVALON_PROPERTY]
asset = avalon_prop['asset']
@@ -47,6 +89,12 @@ class CollectInstances(pyblish.api.ContextPlugin):
task=task,
)
members = list(collection.objects)
+ if family == "animation":
+ for obj in collection.objects:
+ if obj.type == 'EMPTY' and obj.get(AVALON_PROPERTY):
+ for child in obj.children:
+ if child.type == 'ARMATURE':
+ members.append(child)
members.append(collection)
instance[:] = members
self.log.debug(json.dumps(instance.data, indent=4))
diff --git a/openpype/hosts/blender/plugins/publish/extract_abc.py b/openpype/hosts/blender/plugins/publish/extract_abc.py
index a6315908fc..4696da3db4 100644
--- a/openpype/hosts/blender/plugins/publish/extract_abc.py
+++ b/openpype/hosts/blender/plugins/publish/extract_abc.py
@@ -1,12 +1,13 @@
import os
-import openpype.api
-import openpype.hosts.blender.api.plugin
+from openpype import api
+from openpype.hosts.blender.api import plugin
+from avalon.blender.pipeline import AVALON_PROPERTY
import bpy
-class ExtractABC(openpype.api.Extractor):
+class ExtractABC(api.Extractor):
"""Extract as ABC."""
label = "Extract ABC"
@@ -16,7 +17,6 @@ class ExtractABC(openpype.api.Extractor):
def process(self, instance):
# Define extract output file path
-
stagingdir = self.staging_dir(instance)
filename = f"{instance.name}.abc"
filepath = os.path.join(stagingdir, filename)
@@ -28,57 +28,29 @@ class ExtractABC(openpype.api.Extractor):
# Perform extraction
self.log.info("Performing extraction..")
- collections = [
- obj for obj in instance if type(obj) is bpy.types.Collection]
-
- assert len(collections) == 1, "There should be one and only one " \
- "collection collected for this asset"
-
- old_active_layer_collection = view_layer.active_layer_collection
-
- layers = view_layer.layer_collection.children
-
- # Get the layer collection from the collection we need to export.
- # This is needed because in Blender you can only set the active
- # collection with the layer collection, and there is no way to get
- # the layer collection from the collection
- # (but there is the vice versa).
- layer_collections = [
- layer for layer in layers if layer.collection == collections[0]]
-
- assert len(layer_collections) == 1
-
- view_layer.active_layer_collection = layer_collections[0]
-
- old_scale = scene.unit_settings.scale_length
-
bpy.ops.object.select_all(action='DESELECT')
- selected = list()
+ selected = []
+ asset_group = None
for obj in instance:
- try:
- obj.select_set(True)
- selected.append(obj)
- except:
- continue
+ obj.select_set(True)
+ selected.append(obj)
+ if obj.get(AVALON_PROPERTY):
+ asset_group = obj
- new_context = openpype.hosts.blender.api.plugin.create_blender_context(
- active=selected[0], selected=selected)
-
- # We set the scale of the scene for the export
- scene.unit_settings.scale_length = 0.01
+ context = plugin.create_blender_context(
+ active=asset_group, selected=selected)
# We export the abc
bpy.ops.wm.alembic_export(
- new_context,
+ context,
filepath=filepath,
- selected=True
+ selected=True,
+ flatten=False
)
- view_layer.active_layer_collection = old_active_layer_collection
-
- scene.unit_settings.scale_length = old_scale
+ bpy.ops.object.select_all(action='DESELECT')
if "representations" not in instance.data:
instance.data["representations"] = []
diff --git a/openpype/hosts/blender/plugins/publish/extract_blend.py b/openpype/hosts/blender/plugins/publish/extract_blend.py
index 890c8b5ffd..6687c9fe76 100644
--- a/openpype/hosts/blender/plugins/publish/extract_blend.py
+++ b/openpype/hosts/blender/plugins/publish/extract_blend.py
@@ -1,6 +1,8 @@
import os
-import avalon.blender.workio
+import bpy
+
+# import avalon.blender.workio
import openpype.api
@@ -9,7 +11,7 @@ class ExtractBlend(openpype.api.Extractor):
label = "Extract Blend"
hosts = ["blender"]
- families = ["model", "camera", "rig", "action", "layout", "animation"]
+ families = ["model", "camera", "rig", "action", "layout"]
optional = True
def process(self, instance):
@@ -22,15 +24,12 @@ class ExtractBlend(openpype.api.Extractor):
# Perform extraction
self.log.info("Performing extraction..")
- # Just save the file to a temporary location. At least for now it's no
- # problem to have (possibly) extra stuff in the file.
- avalon.blender.workio.save_file(filepath, copy=True)
- #
- # # Store reference for integration
- # if "files" not in instance.data:
- # instance.data["files"] = list()
- #
- # # instance.data["files"].append(filename)
+ data_blocks = set()
+
+ for obj in instance:
+ data_blocks.add(obj)
+
+ bpy.data.libraries.write(filepath, data_blocks)
if "representations" not in instance.data:
instance.data["representations"] = []
diff --git a/openpype/hosts/blender/plugins/publish/extract_blend_animation.py b/openpype/hosts/blender/plugins/publish/extract_blend_animation.py
new file mode 100644
index 0000000000..239ca53f98
--- /dev/null
+++ b/openpype/hosts/blender/plugins/publish/extract_blend_animation.py
@@ -0,0 +1,53 @@
+import os
+
+import bpy
+
+import openpype.api
+
+
+class ExtractBlendAnimation(openpype.api.Extractor):
+ """Extract a blend file."""
+
+ label = "Extract Blend"
+ hosts = ["blender"]
+ families = ["animation"]
+ optional = True
+
+ def process(self, instance):
+ # Define extract output file path
+
+ stagingdir = self.staging_dir(instance)
+ filename = f"{instance.name}.blend"
+ filepath = os.path.join(stagingdir, filename)
+
+ # Perform extraction
+ self.log.info("Performing extraction..")
+
+ data_blocks = set()
+
+ for obj in instance:
+ if isinstance(obj, bpy.types.Object) and obj.type == 'EMPTY':
+ child = obj.children[0]
+ if child and child.type == 'ARMATURE':
+ if not obj.animation_data:
+ obj.animation_data_create()
+ obj.animation_data.action = child.animation_data.action
+ obj.animation_data_clear()
+ data_blocks.add(child.animation_data.action)
+ data_blocks.add(obj)
+
+ bpy.data.libraries.write(filepath, data_blocks)
+
+ if "representations" not in instance.data:
+ instance.data["representations"] = []
+
+ representation = {
+ 'name': 'blend',
+ 'ext': 'blend',
+ 'files': filename,
+ "stagingDir": stagingdir,
+ }
+ instance.data["representations"].append(representation)
+
+ self.log.info("Extracted instance '%s' to: %s",
+ instance.name, representation)
diff --git a/openpype/hosts/blender/plugins/publish/extract_fbx.py b/openpype/hosts/blender/plugins/publish/extract_fbx.py
index 05149eacc1..b91f2a75ef 100644
--- a/openpype/hosts/blender/plugins/publish/extract_fbx.py
+++ b/openpype/hosts/blender/plugins/publish/extract_fbx.py
@@ -1,11 +1,13 @@
import os
-import openpype.api
+from openpype import api
+from openpype.hosts.blender.api import plugin
+from avalon.blender.pipeline import AVALON_PROPERTY
import bpy
-class ExtractFBX(openpype.api.Extractor):
+class ExtractFBX(api.Extractor):
"""Extract as FBX."""
label = "Extract FBX"
@@ -15,71 +17,56 @@ class ExtractFBX(openpype.api.Extractor):
def process(self, instance):
# Define extract output file path
-
stagingdir = self.staging_dir(instance)
filename = f"{instance.name}.fbx"
filepath = os.path.join(stagingdir, filename)
- context = bpy.context
- scene = context.scene
- view_layer = context.view_layer
-
# Perform extraction
self.log.info("Performing extraction..")
- collections = [
- obj for obj in instance if type(obj) is bpy.types.Collection]
+ bpy.ops.object.select_all(action='DESELECT')
- assert len(collections) == 1, "There should be one and only one " \
- "collection collected for this asset"
+ selected = []
+ asset_group = None
- old_active_layer_collection = view_layer.active_layer_collection
+ for obj in instance:
+ obj.select_set(True)
+ selected.append(obj)
+ if obj.get(AVALON_PROPERTY):
+ asset_group = obj
- layers = view_layer.layer_collection.children
-
- # Get the layer collection from the collection we need to export.
- # This is needed because in Blender you can only set the active
- # collection with the layer collection, and there is no way to get
- # the layer collection from the collection
- # (but there is the vice versa).
- layer_collections = [
- layer for layer in layers if layer.collection == collections[0]]
-
- assert len(layer_collections) == 1
-
- view_layer.active_layer_collection = layer_collections[0]
-
- old_scale = scene.unit_settings.scale_length
-
- # We set the scale of the scene for the export
- scene.unit_settings.scale_length = 0.01
+ context = plugin.create_blender_context(
+ active=asset_group, selected=selected)
new_materials = []
+ new_materials_objs = []
+ objects = list(asset_group.children)
- for obj in collections[0].all_objects:
- if obj.type == 'MESH':
+ for obj in objects:
+ objects.extend(obj.children)
+ if obj.type == 'MESH' and len(obj.data.materials) == 0:
mat = bpy.data.materials.new(obj.name)
obj.data.materials.append(mat)
new_materials.append(mat)
+ new_materials_objs.append(obj)
# We export the fbx
bpy.ops.export_scene.fbx(
+ context,
filepath=filepath,
- use_active_collection=True,
+ use_active_collection=False,
+ use_selection=True,
mesh_smooth_type='FACE',
add_leaf_bones=False
)
- view_layer.active_layer_collection = old_active_layer_collection
-
- scene.unit_settings.scale_length = old_scale
+ bpy.ops.object.select_all(action='DESELECT')
for mat in new_materials:
bpy.data.materials.remove(mat)
- for obj in collections[0].all_objects:
- if obj.type == 'MESH':
- obj.data.materials.pop()
+ for obj in new_materials_objs:
+ obj.data.materials.pop()
if "representations" not in instance.data:
instance.data["representations"] = []
diff --git a/openpype/hosts/blender/plugins/publish/extract_fbx_animation.py b/openpype/hosts/blender/plugins/publish/extract_fbx_animation.py
index 8312114c7b..16443b760c 100644
--- a/openpype/hosts/blender/plugins/publish/extract_fbx_animation.py
+++ b/openpype/hosts/blender/plugins/publish/extract_fbx_animation.py
@@ -1,14 +1,16 @@
import os
import json
-import openpype.api
-
import bpy
import bpy_extras
import bpy_extras.anim_utils
+from openpype import api
+from openpype.hosts.blender.api import plugin
+from avalon.blender.pipeline import AVALON_PROPERTY
-class ExtractAnimationFBX(openpype.api.Extractor):
+
+class ExtractAnimationFBX(api.Extractor):
"""Extract as animation."""
label = "Extract FBX"
@@ -20,33 +22,26 @@ class ExtractAnimationFBX(openpype.api.Extractor):
# Define extract output file path
stagingdir = self.staging_dir(instance)
- context = bpy.context
- scene = context.scene
-
# Perform extraction
self.log.info("Performing extraction..")
- collections = [
- obj for obj in instance if type(obj) is bpy.types.Collection]
+ # The first collection object in the instance is taken, as there
+ # should be only one that contains the asset group.
+ collection = [
+ obj for obj in instance if type(obj) is bpy.types.Collection][0]
- assert len(collections) == 1, "There should be one and only one " \
- "collection collected for this asset"
+ # Again, the first object in the collection is taken , as there
+ # should be only the asset group in the collection.
+ asset_group = collection.objects[0]
- old_scale = scene.unit_settings.scale_length
+ armature = [
+ obj for obj in asset_group.children if obj.type == 'ARMATURE'][0]
- # We set the scale of the scene for the export
- scene.unit_settings.scale_length = 0.01
-
- armatures = [
- obj for obj in collections[0].objects if obj.type == 'ARMATURE']
-
- assert len(collections) == 1, "There should be one and only one " \
- "armature collected for this asset"
-
- armature = armatures[0]
+ asset_group_name = asset_group.name
+ asset_group.name = asset_group.get(AVALON_PROPERTY).get("asset_name")
armature_name = armature.name
- original_name = armature_name.split(':')[0]
+ original_name = armature_name.split(':')[1]
armature.name = original_name
object_action_pairs = []
@@ -89,27 +84,29 @@ class ExtractAnimationFBX(openpype.api.Extractor):
for obj in bpy.data.objects:
obj.select_set(False)
+ asset_group.select_set(True)
armature.select_set(True)
fbx_filename = f"{instance.name}_{armature.name}.fbx"
filepath = os.path.join(stagingdir, fbx_filename)
- override = bpy.context.copy()
- override['selected_objects'] = [armature]
+ override = plugin.create_blender_context(
+ active=asset_group, selected=[asset_group, armature])
bpy.ops.export_scene.fbx(
override,
filepath=filepath,
+ use_active_collection=False,
use_selection=True,
bake_anim_use_nla_strips=False,
bake_anim_use_all_actions=False,
add_leaf_bones=False,
armature_nodetype='ROOT',
- object_types={'ARMATURE'}
+ object_types={'EMPTY', 'ARMATURE'}
)
armature.name = armature_name
+ asset_group.name = asset_group_name
+ asset_group.select_set(False)
armature.select_set(False)
- scene.unit_settings.scale_length = old_scale
-
# We delete the baked action and set the original one back
for i in range(0, len(object_action_pairs)):
pair = object_action_pairs[i]
@@ -125,18 +122,20 @@ class ExtractAnimationFBX(openpype.api.Extractor):
json_filename = f"{instance.name}.json"
json_path = os.path.join(stagingdir, json_filename)
- json_dict = {}
+ json_dict = {
+ "instance_name": asset_group.get(AVALON_PROPERTY).get("namespace")
+ }
- collection = instance.data.get("name")
- container = None
- for obj in bpy.data.collections[collection].objects:
- if obj.type == "ARMATURE":
- container_name = obj.get("avalon").get("container_name")
- container = bpy.data.collections[container_name]
- if container:
- json_dict = {
- "instance_name": container.get("avalon").get("instance_name")
- }
+ # collection = instance.data.get("name")
+ # container = None
+ # for obj in bpy.data.collections[collection].objects:
+ # if obj.type == "ARMATURE":
+ # container_name = obj.get("avalon").get("container_name")
+ # container = bpy.data.collections[container_name]
+ # if container:
+ # json_dict = {
+ # "instance_name": container.get("avalon").get("instance_name")
+ # }
with open(json_path, "w+") as file:
json.dump(json_dict, fp=file, indent=2)
@@ -159,6 +158,5 @@ class ExtractAnimationFBX(openpype.api.Extractor):
instance.data["representations"].append(fbx_representation)
instance.data["representations"].append(json_representation)
-
self.log.info("Extracted instance '{}' to: {}".format(
instance.name, fbx_representation))
diff --git a/openpype/hosts/blender/plugins/publish/extract_layout.py b/openpype/hosts/blender/plugins/publish/extract_layout.py
index c6c9bf67f5..cd081b4479 100644
--- a/openpype/hosts/blender/plugins/publish/extract_layout.py
+++ b/openpype/hosts/blender/plugins/publish/extract_layout.py
@@ -3,7 +3,8 @@ import json
import bpy
-from avalon import blender, io
+from avalon import io
+from avalon.blender.pipeline import AVALON_PROPERTY
import openpype.api
@@ -24,52 +25,49 @@ class ExtractLayout(openpype.api.Extractor):
json_data = []
- for collection in instance:
- for asset in collection.children:
- collection = bpy.data.collections[asset.name]
- container = bpy.data.collections[asset.name + '_CON']
- metadata = container.get(blender.pipeline.AVALON_PROPERTY)
+ asset_group = bpy.data.objects[str(instance)]
- parent = metadata["parent"]
- family = metadata["family"]
+ for asset in asset_group.children:
+ metadata = asset.get(AVALON_PROPERTY)
- self.log.debug("Parent: {}".format(parent))
- blend = io.find_one(
- {
- "type": "representation",
- "parent": io.ObjectId(parent),
- "name": "blend"
- },
- projection={"_id": True})
- blend_id = blend["_id"]
+ parent = metadata["parent"]
+ family = metadata["family"]
- json_element = {}
- json_element["reference"] = str(blend_id)
- json_element["family"] = family
- json_element["instance_name"] = asset.name
- json_element["asset_name"] = metadata["lib_container"]
- json_element["file_path"] = metadata["libpath"]
+ self.log.debug("Parent: {}".format(parent))
+ blend = io.find_one(
+ {
+ "type": "representation",
+ "parent": io.ObjectId(parent),
+ "name": "blend"
+ },
+ projection={"_id": True})
+ blend_id = blend["_id"]
- obj = collection.objects[0]
+ json_element = {}
+ json_element["reference"] = str(blend_id)
+ json_element["family"] = family
+ json_element["instance_name"] = asset.name
+ json_element["asset_name"] = metadata["asset_name"]
+ json_element["file_path"] = metadata["libpath"]
- json_element["transform"] = {
- "translation": {
- "x": obj.location.x,
- "y": obj.location.y,
- "z": obj.location.z
- },
- "rotation": {
- "x": obj.rotation_euler.x,
- "y": obj.rotation_euler.y,
- "z": obj.rotation_euler.z,
- },
- "scale": {
- "x": obj.scale.x,
- "y": obj.scale.y,
- "z": obj.scale.z
- }
+ json_element["transform"] = {
+ "translation": {
+ "x": asset.location.x,
+ "y": asset.location.y,
+ "z": asset.location.z
+ },
+ "rotation": {
+ "x": asset.rotation_euler.x,
+ "y": asset.rotation_euler.y,
+ "z": asset.rotation_euler.z,
+ },
+ "scale": {
+ "x": asset.scale.x,
+ "y": asset.scale.y,
+ "z": asset.scale.z
}
- json_data.append(json_element)
+ }
+ json_data.append(json_element)
json_filename = "{}.json".format(instance.name)
json_path = os.path.join(stagingdir, json_filename)
diff --git a/openpype/hosts/blender/plugins/publish/validate_no_colons_in_name.py b/openpype/hosts/blender/plugins/publish/validate_no_colons_in_name.py
new file mode 100644
index 0000000000..261ff864d5
--- /dev/null
+++ b/openpype/hosts/blender/plugins/publish/validate_no_colons_in_name.py
@@ -0,0 +1,39 @@
+from typing import List
+
+import pyblish.api
+import openpype.hosts.blender.api.action
+
+
+class ValidateNoColonsInName(pyblish.api.InstancePlugin):
+ """There cannot be colons in names
+
+ Object or bone names cannot include colons. Other software do not
+ handle colons correctly.
+
+ """
+
+ order = openpype.api.ValidateContentsOrder
+ hosts = ["blender"]
+ families = ["model", "rig"]
+ version = (0, 1, 0)
+ label = "No Colons in names"
+ actions = [openpype.hosts.blender.api.action.SelectInvalidAction]
+
+ @classmethod
+ def get_invalid(cls, instance) -> List:
+ invalid = []
+ for obj in [obj for obj in instance]:
+ if ':' in obj.name:
+ invalid.append(obj)
+ if obj.type == 'ARMATURE':
+ for bone in obj.data.bones:
+ if ':' in bone.name:
+ invalid.append(obj)
+ break
+ return invalid
+
+ def process(self, instance):
+ invalid = self.get_invalid(instance)
+ if invalid:
+ raise RuntimeError(
+ f"Objects found with colon in name: {invalid}")
diff --git a/openpype/hosts/blender/plugins/publish/validate_transform_zero.py b/openpype/hosts/blender/plugins/publish/validate_transform_zero.py
new file mode 100644
index 0000000000..7456dbc423
--- /dev/null
+++ b/openpype/hosts/blender/plugins/publish/validate_transform_zero.py
@@ -0,0 +1,40 @@
+from typing import List
+
+import mathutils
+
+import pyblish.api
+import openpype.hosts.blender.api.action
+
+
+class ValidateTransformZero(pyblish.api.InstancePlugin):
+ """Transforms can't have any values
+
+ To solve this issue, try freezing the transforms. So long
+ as the transforms, rotation and scale values are zero,
+ you're all good.
+
+ """
+
+ order = openpype.api.ValidateContentsOrder
+ hosts = ["blender"]
+ families = ["model"]
+ category = "geometry"
+ version = (0, 1, 0)
+ label = "Transform Zero"
+ actions = [openpype.hosts.blender.api.action.SelectInvalidAction]
+
+ _identity = mathutils.Matrix()
+
+ @classmethod
+ def get_invalid(cls, instance) -> List:
+ invalid = []
+ for obj in [obj for obj in instance]:
+ if obj.matrix_basis != cls._identity:
+ invalid.append(obj)
+ return invalid
+
+ def process(self, instance):
+ invalid = self.get_invalid(instance)
+ if invalid:
+ raise RuntimeError(
+ f"Object found in instance is not in Object Mode: {invalid}")
diff --git a/openpype/hosts/houdini/api/__init__.py b/openpype/hosts/houdini/api/__init__.py
index 21f4ae41c3..7328236b97 100644
--- a/openpype/hosts/houdini/api/__init__.py
+++ b/openpype/hosts/houdini/api/__init__.py
@@ -1,17 +1,21 @@
import os
+import sys
import logging
+import contextlib
import hou
from pyblish import api as pyblish
-
from avalon import api as avalon
-from avalon.houdini import pipeline as houdini
import openpype.hosts.houdini
from openpype.hosts.houdini.api import lib
-from openpype.lib import any_outdated
+from openpype.lib import (
+ any_outdated
+)
+
+from .lib import get_asset_fps
log = logging.getLogger("openpype.hosts.houdini")
@@ -22,6 +26,7 @@ LOAD_PATH = os.path.join(PLUGINS_DIR, "load")
CREATE_PATH = os.path.join(PLUGINS_DIR, "create")
INVENTORY_PATH = os.path.join(PLUGINS_DIR, "inventory")
+
def install():
pyblish.register_plugin_path(PUBLISH_PATH)
@@ -29,19 +34,28 @@ def install():
avalon.register_plugin_path(avalon.Creator, CREATE_PATH)
log.info("Installing callbacks ... ")
- avalon.on("init", on_init)
+ # avalon.on("init", on_init)
avalon.before("save", before_save)
avalon.on("save", on_save)
avalon.on("open", on_open)
+ avalon.on("new", on_new)
pyblish.register_callback("instanceToggled", on_pyblish_instance_toggled)
log.info("Setting default family states for loader..")
- avalon.data["familiesStateToggled"] = ["imagesequence"]
+ avalon.data["familiesStateToggled"] = [
+ "imagesequence",
+ "review"
+ ]
+ # add houdini vendor packages
+ hou_pythonpath = os.path.join(os.path.dirname(HOST_DIR), "vendor")
-def on_init(*args):
- houdini.on_houdini_initialize()
+ sys.path.append(hou_pythonpath)
+
+ # Set asset FPS for the empty scene directly after launch of Houdini
+ # so it initializes into the correct scene FPS
+ _set_asset_fps()
def before_save(*args):
@@ -59,10 +73,18 @@ def on_save(*args):
def on_open(*args):
+ if not hou.isUIAvailable():
+ log.debug("Batch mode detected, ignoring `on_open` callbacks..")
+ return
+
avalon.logger.info("Running callback on open..")
+ # Validate FPS after update_task_from_path to
+ # ensure it is using correct FPS for the asset
+ lib.validate_fps()
+
if any_outdated():
- from ..widgets import popup
+ from openpype.widgets import popup
log.warning("Scene has outdated content.")
@@ -70,7 +92,7 @@ def on_open(*args):
parent = hou.ui.mainQtWindow()
if parent is None:
log.info("Skipping outdated content pop-up "
- "because Maya window can't be found.")
+ "because Houdini window can't be found.")
else:
# Show outdated pop-up
@@ -79,15 +101,52 @@ def on_open(*args):
tool.show(parent=parent)
dialog = popup.Popup(parent=parent)
- dialog.setWindowTitle("Maya scene has outdated content")
+ dialog.setWindowTitle("Houdini scene has outdated content")
dialog.setMessage("There are outdated containers in "
- "your Maya scene.")
- dialog.on_show.connect(_on_show_inventory)
+ "your Houdini scene.")
+ dialog.on_clicked.connect(_on_show_inventory)
dialog.show()
+def on_new(_):
+ """Set project resolution and fps when create a new file"""
+ avalon.logger.info("Running callback on new..")
+ _set_asset_fps()
+
+
+def _set_asset_fps():
+ """Set Houdini scene FPS to the default required for current asset"""
+
+ # Set new scene fps
+ fps = get_asset_fps()
+ print("Setting scene FPS to %i" % fps)
+ lib.set_scene_fps(fps)
+
+
def on_pyblish_instance_toggled(instance, new_value, old_value):
"""Toggle saver tool passthrough states on instance toggles."""
+ @contextlib.contextmanager
+ def main_take(no_update=True):
+ """Enter root take during context"""
+ original_take = hou.takes.currentTake()
+ original_update_mode = hou.updateModeSetting()
+ root = hou.takes.rootTake()
+ has_changed = False
+ try:
+ if original_take != root:
+ has_changed = True
+ if no_update:
+ hou.setUpdateMode(hou.updateMode.Manual)
+ hou.takes.setCurrentTake(root)
+ yield
+ finally:
+ if has_changed:
+ if no_update:
+ hou.setUpdateMode(original_update_mode)
+ hou.takes.setCurrentTake(original_take)
+
+ if not instance.data.get("_allowToggleBypass", True):
+ return
nodes = instance[:]
if not nodes:
@@ -96,8 +155,20 @@ def on_pyblish_instance_toggled(instance, new_value, old_value):
# Assume instance node is first node
instance_node = nodes[0]
+ if not hasattr(instance_node, "isBypassed"):
+ # Likely not a node that can actually be bypassed
+ log.debug("Can't bypass node: %s", instance_node.path())
+ return
+
if instance_node.isBypassed() != (not old_value):
print("%s old bypass state didn't match old instance state, "
"updating anyway.." % instance_node.path())
- instance_node.bypass(not new_value)
+ try:
+ # Go into the main take, because when in another take changing
+ # the bypass state of a note cannot be done due to it being locked
+ # by default.
+ with main_take(no_update=True):
+ instance_node.bypass(not new_value)
+ except hou.PermissionError as exc:
+ log.warning("%s - %s", instance_node.path(), exc)
diff --git a/openpype/hosts/houdini/api/lib.py b/openpype/hosts/houdini/api/lib.py
index 1f0f90811f..53f0e59ea9 100644
--- a/openpype/hosts/houdini/api/lib.py
+++ b/openpype/hosts/houdini/api/lib.py
@@ -1,14 +1,19 @@
import uuid
-
+import logging
from contextlib import contextmanager
-import hou
-
-from openpype import lib
-
+from openpype.api import get_asset
from avalon import api, io
from avalon.houdini import lib as houdini
+import hou
+
+log = logging.getLogger(__name__)
+
+
+def get_asset_fps():
+ """Return current asset fps."""
+ return get_asset()["data"].get("fps")
def set_id(node, unique_id, overwrite=False):
@@ -171,10 +176,10 @@ def get_output_parameter(node):
node_type = node.type().name()
if node_type == "geometry":
return node.parm("sopoutput")
-
elif node_type == "alembic":
return node.parm("filename")
-
+ elif node_type == "comp":
+ return node.parm("copoutput")
else:
raise TypeError("Node type '%s' not supported" % node_type)
@@ -205,7 +210,7 @@ def validate_fps():
"""
- fps = lib.get_asset()["data"]["fps"]
+ fps = get_asset_fps()
current_fps = hou.fps() # returns float
if current_fps != fps:
@@ -217,18 +222,123 @@ def validate_fps():
if parent is None:
pass
else:
- dialog = popup.Popup2(parent=parent)
+ dialog = popup.Popup(parent=parent)
dialog.setModal(True)
- dialog.setWindowTitle("Houdini scene not in line with project")
- dialog.setMessage("The FPS is out of sync, please fix it")
+ dialog.setWindowTitle("Houdini scene does not match project FPS")
+ dialog.setMessage("Scene %i FPS does not match project %i FPS" %
+ (current_fps, fps))
+ dialog.setButtonText("Fix")
- # Set new text for button (add optional argument for the popup?)
- toggle = dialog.widgets["toggle"]
- toggle.setEnabled(False)
- dialog.on_show.connect(lambda: set_scene_fps(fps))
+ # on_show is the Fix button clicked callback
+ dialog.on_clicked.connect(lambda: set_scene_fps(fps))
dialog.show()
return False
return True
+
+
+def create_remote_publish_node(force=True):
+ """Function to create a remote publish node in /out
+
+ This is a hacked "Shell" node that does *nothing* except for triggering
+ `colorbleed.lib.publish_remote()` as pre-render script.
+
+ All default attributes of the Shell node are hidden to the Artist to
+ avoid confusion.
+
+ Additionally some custom attributes are added that can be collected
+ by a Collector to set specific settings for the publish, e.g. whether
+ to separate the jobs per instance or process in one single job.
+
+ """
+
+ cmd = "import colorbleed.lib; colorbleed.lib.publish_remote()"
+
+ existing = hou.node("/out/REMOTE_PUBLISH")
+ if existing:
+ if force:
+ log.warning("Removing existing '/out/REMOTE_PUBLISH' node..")
+ existing.destroy()
+ else:
+ raise RuntimeError("Node already exists /out/REMOTE_PUBLISH. "
+ "Please remove manually or set `force` to "
+ "True.")
+
+ # Create the shell node
+ out = hou.node("/out")
+ node = out.createNode("shell", node_name="REMOTE_PUBLISH")
+ node.moveToGoodPosition()
+
+ # Set color make it stand out (avalon/pyblish color)
+ node.setColor(hou.Color(0.439, 0.709, 0.933))
+
+ # Set the pre-render script
+ node.setParms({
+ "prerender": cmd,
+ "lprerender": "python" # command language
+ })
+
+ # Lock the attributes to ensure artists won't easily mess things up.
+ node.parm("prerender").lock(True)
+ node.parm("lprerender").lock(True)
+
+ # Lock up the actual shell command
+ command_parm = node.parm("command")
+ command_parm.set("")
+ command_parm.lock(True)
+ shellexec_parm = node.parm("shellexec")
+ shellexec_parm.set(False)
+ shellexec_parm.lock(True)
+
+ # Get the node's parm template group so we can customize it
+ template = node.parmTemplateGroup()
+
+ # Hide default tabs
+ template.hideFolder("Shell", True)
+ template.hideFolder("Scripts", True)
+
+ # Hide default settings
+ template.hide("execute", True)
+ template.hide("renderdialog", True)
+ template.hide("trange", True)
+ template.hide("f", True)
+ template.hide("take", True)
+
+ # Add custom settings to this node.
+ parm_folder = hou.FolderParmTemplate("folder", "Submission Settings")
+
+ # Separate Jobs per Instance
+ parm = hou.ToggleParmTemplate(name="separateJobPerInstance",
+ label="Separate Job per Instance",
+ default_value=False)
+ parm_folder.addParmTemplate(parm)
+
+ # Add our custom Submission Settings folder
+ template.append(parm_folder)
+
+ # Apply template back to the node
+ node.setParmTemplateGroup(template)
+
+
+def render_rop(ropnode):
+ """Render ROP node utility for Publishing.
+
+ This renders a ROP node with the settings we want during Publishing.
+ """
+ # Print verbose when in batch mode without UI
+ verbose = not hou.isUIAvailable()
+
+ # Render
+ try:
+ ropnode.render(verbose=verbose,
+ # Allow Deadline to capture completion percentage
+ output_progress=verbose)
+ except hou.Error as exc:
+ # The hou.Error is not inherited from a Python Exception class,
+ # so we explicitly capture the houdini error, otherwise pyblish
+ # will remain hanging.
+ import traceback
+ traceback.print_exc()
+ raise RuntimeError("Render failed: {0}".format(exc))
diff --git a/openpype/hosts/houdini/api/plugin.py b/openpype/hosts/houdini/api/plugin.py
index 9820ed49c3..efdaa60084 100644
--- a/openpype/hosts/houdini/api/plugin.py
+++ b/openpype/hosts/houdini/api/plugin.py
@@ -1,6 +1,26 @@
+# -*- coding: utf-8 -*-
+"""Houdini specific Avalon/Pyblish plugin definitions."""
+import sys
from avalon import houdini
+import six
+
+import hou
from openpype.api import PypeCreatorMixin
-class Creator(PypeCreatorMixin, houdini.Creator):
+class OpenPypeCreatorError(Exception):
pass
+
+
+class Creator(PypeCreatorMixin, houdini.Creator):
+ def process(self):
+ try:
+ # re-raise as standard Python exception so
+ # Avalon can catch it
+ instance = super(Creator, self).process()
+ self._process(instance)
+ except hou.Error as er:
+ six.reraise(
+ OpenPypeCreatorError,
+ OpenPypeCreatorError("Creator error: {}".format(er)),
+ sys.exc_info()[2])
diff --git a/openpype/hosts/houdini/api/usd.py b/openpype/hosts/houdini/api/usd.py
new file mode 100644
index 0000000000..850ffb60e5
--- /dev/null
+++ b/openpype/hosts/houdini/api/usd.py
@@ -0,0 +1,255 @@
+"""Houdini-specific USD Library functions."""
+
+import contextlib
+
+import logging
+from Qt import QtCore, QtGui
+from avalon.tools.widgets import AssetWidget
+from avalon import style
+
+from pxr import Sdf
+
+
+log = logging.getLogger(__name__)
+
+
+def pick_asset(node):
+ """Show a user interface to select an Asset in the project
+
+ When double clicking an asset it will set the Asset value in the
+ 'asset' parameter.
+
+ """
+
+ pos = QtGui.QCursor.pos()
+
+ parm = node.parm("asset_name")
+ if not parm:
+ log.error("Node has no 'asset' parameter: %s", node)
+ return
+
+ # Construct the AssetWidget as a frameless popup so it automatically
+ # closes when clicked outside of it.
+ global tool
+ tool = AssetWidget(silo_creatable=False)
+ tool.setContentsMargins(5, 5, 5, 5)
+ tool.setWindowTitle("Pick Asset")
+ tool.setStyleSheet(style.load_stylesheet())
+ tool.setWindowFlags(QtCore.Qt.FramelessWindowHint | QtCore.Qt.Popup)
+ tool.refresh()
+
+ # Select the current asset if there is any
+ name = parm.eval()
+ if name:
+ from avalon import io
+
+ db_asset = io.find_one({"name": name, "type": "asset"})
+ if db_asset:
+ silo = db_asset.get("silo")
+ if silo:
+ tool.set_silo(silo)
+ tool.select_assets([name], expand=True)
+
+ # Show cursor (top right of window) near cursor
+ tool.resize(250, 400)
+ tool.move(tool.mapFromGlobal(pos) - QtCore.QPoint(tool.width(), 0))
+
+ def set_parameter_callback(index):
+ name = index.data(tool.model.DocumentRole)["name"]
+ parm.set(name)
+ tool.close()
+
+ tool.view.doubleClicked.connect(set_parameter_callback)
+ tool.show()
+
+
+def add_usd_output_processor(ropnode, processor):
+ """Add USD Output Processor to USD Rop node.
+
+ Args:
+ ropnode (hou.RopNode): The USD Rop node.
+ processor (str): The output processor name. This is the basename of
+ the python file that contains the Houdini USD Output Processor.
+
+ """
+
+ import loputils
+
+ loputils.handleOutputProcessorAdd(
+ {
+ "node": ropnode,
+ "parm": ropnode.parm("outputprocessors"),
+ "script_value": processor,
+ }
+ )
+
+
+def remove_usd_output_processor(ropnode, processor):
+ """Removes USD Output Processor from USD Rop node.
+
+ Args:
+ ropnode (hou.RopNode): The USD Rop node.
+ processor (str): The output processor name. This is the basename of
+ the python file that contains the Houdini USD Output Processor.
+
+ """
+ import loputils
+
+ parm = ropnode.parm(processor + "_remove")
+ if not parm:
+ raise RuntimeError(
+ "Output Processor %s does not "
+ "exist on %s" % (processor, ropnode.name())
+ )
+
+ loputils.handleOutputProcessorRemove({"node": ropnode, "parm": parm})
+
+
+@contextlib.contextmanager
+def outputprocessors(ropnode, processors=tuple(), disable_all_others=True):
+ """Context manager to temporarily add Output Processors to USD ROP node.
+
+ Args:
+ ropnode (hou.RopNode): The USD Rop node.
+ processors (tuple or list): The processors to add.
+ disable_all_others (bool, Optional): Whether to disable all
+ output processors currently on the ROP node that are not in the
+ `processors` list passed to this function.
+
+ """
+ # TODO: Add support for forcing the correct Order of the processors
+
+ original = []
+ prefix = "enableoutputprocessor_"
+ processor_parms = ropnode.globParms(prefix + "*")
+ for parm in processor_parms:
+ original.append((parm, parm.eval()))
+
+ if disable_all_others:
+ for parm in processor_parms:
+ parm.set(False)
+
+ added = []
+ for processor in processors:
+
+ parm = ropnode.parm(prefix + processor)
+ if parm:
+ # If processor already exists, just enable it
+ parm.set(True)
+
+ else:
+ # Else add the new processor
+ add_usd_output_processor(ropnode, processor)
+ added.append(processor)
+
+ try:
+ yield
+ finally:
+
+ # Remove newly added processors
+ for processor in added:
+ remove_usd_output_processor(ropnode, processor)
+
+ # Revert to original values
+ for parm, value in original:
+ if parm:
+ parm.set(value)
+
+
+def get_usd_rop_loppath(node):
+
+ # Get sop path
+ node_type = node.type().name()
+ if node_type == "usd":
+ return node.parm("loppath").evalAsNode()
+
+ elif node_type in {"usd_rop", "usdrender_rop"}:
+ # Inside Solaris e.g. /stage (not in ROP context)
+ # When incoming connection is present it takes it directly
+ inputs = node.inputs()
+ if inputs:
+ return inputs[0]
+ else:
+ return node.parm("loppath").evalAsNode()
+
+
+def get_layer_save_path(layer):
+ """Get custom HoudiniLayerInfo->HoudiniSavePath from SdfLayer.
+
+ Args:
+ layer (pxr.Sdf.Layer): The Layer to retrieve the save pah data from.
+
+ Returns:
+ str or None: Path to save to when data exists.
+
+ """
+ hou_layer_info = layer.rootPrims.get("HoudiniLayerInfo")
+ if not hou_layer_info:
+ return
+
+ save_path = hou_layer_info.customData.get("HoudiniSavePath", None)
+ if save_path:
+ # Unfortunately this doesn't actually resolve the full absolute path
+ return layer.ComputeAbsolutePath(save_path)
+
+
+def get_referenced_layers(layer):
+ """Return SdfLayers for all external references of the current layer
+
+ Args:
+ layer (pxr.Sdf.Layer): The Layer to retrieve the save pah data from.
+
+ Returns:
+ list: List of pxr.Sdf.Layer that are external references to this layer
+
+ """
+
+ layers = []
+ for layer_id in layer.GetExternalReferences():
+ layer = Sdf.Layer.Find(layer_id)
+ if not layer:
+ # A file may not be in memory and is
+ # referenced from disk. As such it cannot
+ # be found. We will ignore those layers.
+ continue
+
+ layers.append(layer)
+
+ return layers
+
+
+def iter_layer_recursive(layer):
+ """Recursively iterate all 'external' referenced layers"""
+
+ layers = get_referenced_layers(layer)
+ traversed = set(layers) # Avoid recursion to itself (if even possible)
+ traverse = list(layers)
+ for layer in traverse:
+
+ # Include children layers (recursion)
+ children_layers = get_referenced_layers(layer)
+ children_layers = [x for x in children_layers if x not in traversed]
+ traverse.extend(children_layers)
+ traversed.update(children_layers)
+
+ yield layer
+
+
+def get_configured_save_layers(usd_rop):
+
+ lop_node = get_usd_rop_loppath(usd_rop)
+ stage = lop_node.stage(apply_viewport_overrides=False)
+ if not stage:
+ raise RuntimeError(
+ "No valid USD stage for ROP node: " "%s" % usd_rop.path()
+ )
+
+ root_layer = stage.GetRootLayer()
+
+ save_layers = []
+ for layer in iter_layer_recursive(root_layer):
+ save_path = get_layer_save_path(layer)
+ if save_path is not None:
+ save_layers.append(layer)
+
+ return save_layers
diff --git a/openpype/hosts/houdini/hooks/set_paths.py b/openpype/hosts/houdini/hooks/set_paths.py
new file mode 100644
index 0000000000..cd2f98fb76
--- /dev/null
+++ b/openpype/hosts/houdini/hooks/set_paths.py
@@ -0,0 +1,18 @@
+from openpype.lib import PreLaunchHook
+import os
+
+
+class SetPath(PreLaunchHook):
+ """Set current dir to workdir.
+
+ Hook `GlobalHostDataHook` must be executed before this hook.
+ """
+ app_groups = ["houdini"]
+
+ def execute(self):
+ workdir = self.launch_context.env.get("AVALON_WORKDIR", "")
+ if not workdir:
+ self.log.warning("BUG: Workdir is not filled.")
+ return
+
+ os.chdir(workdir)
diff --git a/openpype/hosts/houdini/plugins/create/create_alembic_camera.py b/openpype/hosts/houdini/plugins/create/create_alembic_camera.py
index adcfb48539..eef86005f5 100644
--- a/openpype/hosts/houdini/plugins/create/create_alembic_camera.py
+++ b/openpype/hosts/houdini/plugins/create/create_alembic_camera.py
@@ -18,28 +18,29 @@ class CreateAlembicCamera(plugin.Creator):
# Set node type to create for output
self.data.update({"node_type": "alembic"})
- def process(self):
- instance = super(CreateAlembicCamera, self).process()
+ def _process(self, instance):
+ """Creator main entry point.
+ Args:
+ instance (hou.Node): Created Houdini instance.
+
+ """
parms = {
"filename": "$HIP/pyblish/%s.abc" % self.name,
- "use_sop_path": False
+ "use_sop_path": False,
}
if self.nodes:
node = self.nodes[0]
path = node.path()
-
# Split the node path into the first root and the remainder
# So we can set the root and objects parameters correctly
_, root, remainder = path.split("/", 2)
- parms.update({
- "root": "/" + root,
- "objects": remainder
- })
+ parms.update({"root": "/" + root, "objects": remainder})
instance.setParms(parms)
# Lock the Use Sop Path setting so the
# user doesn't accidentally enable it.
instance.parm("use_sop_path").lock(True)
+ instance.parm("trange").set(1)
diff --git a/openpype/hosts/houdini/plugins/create/create_composite.py b/openpype/hosts/houdini/plugins/create/create_composite.py
new file mode 100644
index 0000000000..e278708076
--- /dev/null
+++ b/openpype/hosts/houdini/plugins/create/create_composite.py
@@ -0,0 +1,44 @@
+from openpype.hosts.houdini.api import plugin
+
+
+class CreateCompositeSequence(plugin.Creator):
+ """Composite ROP to Image Sequence"""
+
+ label = "Composite (Image Sequence)"
+ family = "imagesequence"
+ icon = "gears"
+
+ def __init__(self, *args, **kwargs):
+ super(CreateCompositeSequence, self).__init__(*args, **kwargs)
+
+ # Remove the active, we are checking the bypass flag of the nodes
+ self.data.pop("active", None)
+
+ # Type of ROP node to create
+ self.data.update({"node_type": "comp"})
+
+ def _process(self, instance):
+ """Creator main entry point.
+
+ Args:
+ instance (hou.Node): Created Houdini instance.
+
+ """
+ parms = {"copoutput": "$HIP/pyblish/%s.$F4.exr" % self.name}
+
+ if self.nodes:
+ node = self.nodes[0]
+ parms.update({"coppath": node.path()})
+
+ instance.setParms(parms)
+
+ # Lock any parameters in this list
+ to_lock = ["prim_to_detail_pattern"]
+ for name in to_lock:
+ try:
+ parm = instance.parm(name)
+ parm.lock(True)
+ except AttributeError:
+ # missing lock pattern
+ self.log.debug(
+ "missing lock pattern {}".format(name))
diff --git a/openpype/hosts/houdini/plugins/create/create_pointcache.py b/openpype/hosts/houdini/plugins/create/create_pointcache.py
index 6be854ac28..feb683edf6 100644
--- a/openpype/hosts/houdini/plugins/create/create_pointcache.py
+++ b/openpype/hosts/houdini/plugins/create/create_pointcache.py
@@ -17,21 +17,29 @@ class CreatePointCache(plugin.Creator):
self.data.update({"node_type": "alembic"})
- def process(self):
- instance = super(CreatePointCache, self).process()
+ def _process(self, instance):
+ """Creator main entry point.
- parms = {"use_sop_path": True, # Export single node from SOP Path
- "build_from_path": True, # Direct path of primitive in output
- "path_attrib": "path", # Pass path attribute for output
- "prim_to_detail_pattern": "cbId",
- "format": 2, # Set format to Ogawa
- "filename": "$HIP/pyblish/%s.abc" % self.name}
+ Args:
+ instance (hou.Node): Created Houdini instance.
+
+ """
+ parms = {
+ "use_sop_path": True, # Export single node from SOP Path
+ "build_from_path": True, # Direct path of primitive in output
+ "path_attrib": "path", # Pass path attribute for output
+ "prim_to_detail_pattern": "cbId",
+ "format": 2, # Set format to Ogawa
+ "facesets": 0, # No face sets (by default exclude them)
+ "filename": "$HIP/pyblish/%s.abc" % self.name,
+ }
if self.nodes:
node = self.nodes[0]
parms.update({"sop_path": node.path()})
instance.setParms(parms)
+ instance.parm("trange").set(1)
# Lock any parameters in this list
to_lock = ["prim_to_detail_pattern"]
diff --git a/openpype/hosts/houdini/plugins/create/create_redshift_rop.py b/openpype/hosts/houdini/plugins/create/create_redshift_rop.py
new file mode 100644
index 0000000000..6949ca169b
--- /dev/null
+++ b/openpype/hosts/houdini/plugins/create/create_redshift_rop.py
@@ -0,0 +1,70 @@
+import hou
+from openpype.hosts.houdini.api import plugin
+
+
+class CreateRedshiftROP(plugin.Creator):
+ """Redshift ROP"""
+
+ label = "Redshift ROP"
+ family = "redshift_rop"
+ icon = "magic"
+ defaults = ["master"]
+
+ def __init__(self, *args, **kwargs):
+ super(CreateRedshiftROP, self).__init__(*args, **kwargs)
+
+ # Clear the family prefix from the subset
+ subset = self.data["subset"]
+ subset_no_prefix = subset[len(self.family):]
+ subset_no_prefix = subset_no_prefix[0].lower() + subset_no_prefix[1:]
+ self.data["subset"] = subset_no_prefix
+
+ # Add chunk size attribute
+ self.data["chunkSize"] = 10
+
+ # Remove the active, we are checking the bypass flag of the nodes
+ self.data.pop("active", None)
+
+ self.data.update({"node_type": "Redshift_ROP"})
+
+ def _process(self, instance):
+ """Creator main entry point.
+
+ Args:
+ instance (hou.Node): Created Houdini instance.
+
+ """
+ basename = instance.name()
+ instance.setName(basename + "_ROP", unique_name=True)
+
+ # Also create the linked Redshift IPR Rop
+ try:
+ ipr_rop = self.parent.createNode(
+ "Redshift_IPR", node_name=basename + "_IPR"
+ )
+ except hou.OperationFailed:
+ raise Exception(("Cannot create Redshift node. Is Redshift "
+ "installed and enabled?"))
+
+ # Move it to directly under the Redshift ROP
+ ipr_rop.setPosition(instance.position() + hou.Vector2(0, -1))
+
+ # Set the linked rop to the Redshift ROP
+ ipr_rop.parm("linked_rop").set(ipr_rop.relativePathTo(instance))
+
+ prefix = '${HIP}/render/${HIPNAME}/`chs("subset")`.${AOV}.$F4.exr'
+ parms = {
+ # Render frame range
+ "trange": 1,
+ # Redshift ROP settings
+ "RS_outputFileNamePrefix": prefix,
+ "RS_outputMultilayerMode": 0, # no multi-layered exr
+ "RS_outputBeautyAOVSuffix": "beauty",
+ }
+ instance.setParms(parms)
+
+ # Lock some Avalon attributes
+ to_lock = ["family", "id"]
+ for name in to_lock:
+ parm = instance.parm(name)
+ parm.lock(True)
diff --git a/openpype/hosts/houdini/plugins/create/create_usd.py b/openpype/hosts/houdini/plugins/create/create_usd.py
new file mode 100644
index 0000000000..5bcb7840c0
--- /dev/null
+++ b/openpype/hosts/houdini/plugins/create/create_usd.py
@@ -0,0 +1,47 @@
+from openpype.hosts.houdini.api import plugin
+
+
+class CreateUSD(plugin.Creator):
+ """Universal Scene Description"""
+
+ label = "USD (experimental)"
+ family = "usd"
+ icon = "gears"
+ enabled = False
+
+ def __init__(self, *args, **kwargs):
+ super(CreateUSD, self).__init__(*args, **kwargs)
+
+ # Remove the active, we are checking the bypass flag of the nodes
+ self.data.pop("active", None)
+
+ self.data.update({"node_type": "usd"})
+
+ def _process(self, instance):
+ """Creator main entry point.
+
+ Args:
+ instance (hou.Node): Created Houdini instance.
+
+ """
+ parms = {
+ "lopoutput": "$HIP/pyblish/%s.usd" % self.name,
+ "enableoutputprocessor_simplerelativepaths": False,
+ }
+
+ if self.nodes:
+ node = self.nodes[0]
+ parms.update({"loppath": node.path()})
+
+ instance.setParms(parms)
+
+ # Lock any parameters in this list
+ to_lock = [
+ "fileperframe",
+ # Lock some Avalon attributes
+ "family",
+ "id",
+ ]
+ for name in to_lock:
+ parm = instance.parm(name)
+ parm.lock(True)
diff --git a/openpype/hosts/houdini/plugins/create/create_usdrender.py b/openpype/hosts/houdini/plugins/create/create_usdrender.py
new file mode 100644
index 0000000000..cb3fe3f02b
--- /dev/null
+++ b/openpype/hosts/houdini/plugins/create/create_usdrender.py
@@ -0,0 +1,42 @@
+import hou
+from openpype.hosts.houdini.api import plugin
+
+
+class CreateUSDRender(plugin.Creator):
+ """USD Render ROP in /stage"""
+
+ label = "USD Render (experimental)"
+ family = "usdrender"
+ icon = "magic"
+
+ def __init__(self, *args, **kwargs):
+ super(CreateUSDRender, self).__init__(*args, **kwargs)
+
+ self.parent = hou.node("/stage")
+
+ # Remove the active, we are checking the bypass flag of the nodes
+ self.data.pop("active", None)
+
+ self.data.update({"node_type": "usdrender"})
+
+ def _process(self, instance):
+ """Creator main entry point.
+
+ Args:
+ instance (hou.Node): Created Houdini instance.
+
+ """
+ parms = {
+ # Render frame range
+ "trange": 1
+ }
+ if self.nodes:
+ node = self.nodes[0]
+ parms.update({"loppath": node.path()})
+ instance.setParms(parms)
+
+ # Lock some Avalon attributes
+ to_lock = ["family", "id"]
+ for name in to_lock:
+ parm = instance.parm(name)
+ parm.lock(True)
diff --git a/openpype/hosts/houdini/plugins/create/create_vbd_cache.py b/openpype/hosts/houdini/plugins/create/create_vbd_cache.py
index f8f3bbf9c3..242c21fc72 100644
--- a/openpype/hosts/houdini/plugins/create/create_vbd_cache.py
+++ b/openpype/hosts/houdini/plugins/create/create_vbd_cache.py
@@ -18,11 +18,18 @@ class CreateVDBCache(plugin.Creator):
# Set node type to create for output
self.data["node_type"] = "geometry"
- def process(self):
- instance = super(CreateVDBCache, self).process()
+ def _process(self, instance):
+ """Creator main entry point.
- parms = {"sopoutput": "$HIP/pyblish/%s.$F4.vdb" % self.name,
- "initsim": True}
+ Args:
+ instance (hou.Node): Created Houdini instance.
+
+ """
+ parms = {
+ "sopoutput": "$HIP/pyblish/%s.$F4.vdb" % self.name,
+ "initsim": True,
+ "trange": 1
+ }
if self.nodes:
node = self.nodes[0]
diff --git a/openpype/hosts/houdini/plugins/load/actions.py b/openpype/hosts/houdini/plugins/load/actions.py
new file mode 100644
index 0000000000..6e9410ff58
--- /dev/null
+++ b/openpype/hosts/houdini/plugins/load/actions.py
@@ -0,0 +1,86 @@
+"""A module containing generic loader actions that will display in the Loader.
+
+"""
+
+from avalon import api
+
+
+class SetFrameRangeLoader(api.Loader):
+ """Set Houdini frame range"""
+
+ families = [
+ "animation",
+ "camera",
+ "pointcache",
+ "vdbcache",
+ "usd",
+ ]
+ representations = ["abc", "vdb", "usd"]
+
+ label = "Set frame range"
+ order = 11
+ icon = "clock-o"
+ color = "white"
+
+ def load(self, context, name, namespace, data):
+
+ import hou
+
+ version = context["version"]
+ version_data = version.get("data", {})
+
+ start = version_data.get("startFrame", None)
+ end = version_data.get("endFrame", None)
+
+ if start is None or end is None:
+ print(
+ "Skipping setting frame range because start or "
+ "end frame data is missing.."
+ )
+ return
+
+ hou.playbar.setFrameRange(start, end)
+ hou.playbar.setPlaybackRange(start, end)
+
+
+class SetFrameRangeWithHandlesLoader(api.Loader):
+ """Set Maya frame range including pre- and post-handles"""
+
+ families = [
+ "animation",
+ "camera",
+ "pointcache",
+ "vdbcache",
+ "usd",
+ ]
+ representations = ["abc", "vdb", "usd"]
+
+ label = "Set frame range (with handles)"
+ order = 12
+ icon = "clock-o"
+ color = "white"
+
+ def load(self, context, name, namespace, data):
+
+ import hou
+
+ version = context["version"]
+ version_data = version.get("data", {})
+
+ start = version_data.get("startFrame", None)
+ end = version_data.get("endFrame", None)
+
+ if start is None or end is None:
+ print(
+ "Skipping setting frame range because start or "
+ "end frame data is missing.."
+ )
+ return
+
+ # Include handles
+ handles = version_data.get("handles", 0)
+ start -= handles
+ end += handles
+
+ hou.playbar.setFrameRange(start, end)
+ hou.playbar.setPlaybackRange(start, end)
diff --git a/openpype/hosts/houdini/plugins/load/load_alembic.py b/openpype/hosts/houdini/plugins/load/load_alembic.py
index 8fc2b6a61a..cd0f0f0d2d 100644
--- a/openpype/hosts/houdini/plugins/load/load_alembic.py
+++ b/openpype/hosts/houdini/plugins/load/load_alembic.py
@@ -6,9 +6,7 @@ from avalon.houdini import pipeline, lib
class AbcLoader(api.Loader):
"""Specific loader of Alembic for the avalon.animation family"""
- families = ["model",
- "animation",
- "pointcache"]
+ families = ["model", "animation", "pointcache", "gpuCache"]
label = "Load Alembic"
representations = ["abc"]
order = -10
@@ -68,8 +66,9 @@ class AbcLoader(api.Loader):
null = container.createNode("null", node_name="OUT".format(name))
null.setInput(0, normal_node)
- # Set display on last node
- null.setDisplayFlag(True)
+ # Ensure display flag is on the Alembic input node and not on the OUT
+ # node to optimize "debug" displaying in the viewport.
+ alembic.setDisplayFlag(True)
# Set new position for unpack node else it gets cluttered
nodes = [container, alembic, unpack, normal_node, null]
@@ -78,18 +77,22 @@ class AbcLoader(api.Loader):
self[:] = nodes
- return pipeline.containerise(node_name,
- namespace,
- nodes,
- context,
- self.__class__.__name__)
+ return pipeline.containerise(
+ node_name,
+ namespace,
+ nodes,
+ context,
+ self.__class__.__name__,
+ suffix="",
+ )
def update(self, container, representation):
node = container["node"]
try:
- alembic_node = next(n for n in node.children() if
- n.type().name() == "alembic")
+ alembic_node = next(
+ n for n in node.children() if n.type().name() == "alembic"
+ )
except StopIteration:
self.log.error("Could not find node of type `alembic`")
return
diff --git a/openpype/hosts/houdini/plugins/load/load_camera.py b/openpype/hosts/houdini/plugins/load/load_camera.py
index a3d67f6e5e..83246b7d97 100644
--- a/openpype/hosts/houdini/plugins/load/load_camera.py
+++ b/openpype/hosts/houdini/plugins/load/load_camera.py
@@ -1,8 +1,79 @@
from avalon import api
-
from avalon.houdini import pipeline, lib
+ARCHIVE_EXPRESSION = ('__import__("_alembic_hom_extensions")'
+ '.alembicGetCameraDict')
+
+
+def transfer_non_default_values(src, dest, ignore=None):
+ """Copy parm from src to dest.
+
+ Because the Alembic Archive rebuilds the entire node
+ hierarchy on triggering "Build Hierarchy" we want to
+ preserve any local tweaks made by the user on the camera
+ for ease of use. That could be a background image, a
+ resolution change or even Redshift camera parameters.
+
+ We try to do so by finding all Parms that exist on both
+ source and destination node, include only those that both
+ are not at their default value, they must be visible,
+ we exclude those that have the special "alembic archive"
+ channel expression and ignore certain Parm types.
+
+ """
+ import hou
+
+ src.updateParmStates()
+
+ for parm in src.allParms():
+
+ if ignore and parm.name() in ignore:
+ continue
+
+ # If destination parm does not exist, ignore..
+ dest_parm = dest.parm(parm.name())
+ if not dest_parm:
+ continue
+
+ # Ignore values that are currently at default
+ if parm.isAtDefault() and dest_parm.isAtDefault():
+ continue
+
+ if not parm.isVisible():
+ # Ignore hidden parameters, assume they
+ # are implementation details
+ continue
+
+ expression = None
+ try:
+ expression = parm.expression()
+ except hou.OperationFailed:
+ # No expression present
+ pass
+
+ if expression is not None and ARCHIVE_EXPRESSION in expression:
+ # Assume it's part of the automated connections that the
+ # Alembic Archive makes on loading of the camera and thus we do
+ # not want to transfer the expression
+ continue
+
+ # Ignore folders, separators, etc.
+ ignore_types = {
+ hou.parmTemplateType.Toggle,
+ hou.parmTemplateType.Menu,
+ hou.parmTemplateType.Button,
+ hou.parmTemplateType.FolderSet,
+ hou.parmTemplateType.Separator,
+ hou.parmTemplateType.Label,
+ }
+ if parm.parmTemplate().type() in ignore_types:
+ continue
+
+ print("Preserving attribute: %s" % parm.name())
+ dest_parm.setFromParm(parm)
+
+
class CameraLoader(api.Loader):
"""Specific loader of Alembic for the avalon.animation family"""
@@ -30,7 +101,7 @@ class CameraLoader(api.Loader):
counter = 1
asset_name = context["asset"]["name"]
- namespace = namespace if namespace else asset_name
+ namespace = namespace or asset_name
formatted = "{}_{}".format(namespace, name) if namespace else name
node_name = "{0}_{1:03d}".format(formatted, counter)
@@ -59,7 +130,8 @@ class CameraLoader(api.Loader):
namespace,
nodes,
context,
- self.__class__.__name__)
+ self.__class__.__name__,
+ suffix="")
def update(self, container, representation):
@@ -73,14 +145,40 @@ class CameraLoader(api.Loader):
node.setParms({"fileName": file_path,
"representation": str(representation["_id"])})
+ # Store the cam temporarily next to the Alembic Archive
+ # so that we can preserve parm values the user set on it
+ # after build hierarchy was triggered.
+ old_camera = self._get_camera(node)
+ temp_camera = old_camera.copyTo(node.parent())
+
# Rebuild
node.parm("buildHierarchy").pressButton()
+ # Apply values to the new camera
+ new_camera = self._get_camera(node)
+ transfer_non_default_values(temp_camera,
+ new_camera,
+ # The hidden uniform scale attribute
+ # gets a default connection to
+ # "icon_scale" just skip that completely
+ ignore={"scale"})
+
+ temp_camera.destroy()
+
def remove(self, container):
node = container["node"]
node.destroy()
+ def _get_camera(self, node):
+ import hou
+ cameras = node.recursiveGlob("*",
+ filter=hou.nodeTypeFilter.ObjCamera,
+ include_subnets=False)
+
+ assert len(cameras) == 1, "Camera instance must have only one camera"
+ return cameras[0]
+
def create_and_connect(self, node, node_type, name=None):
"""Create a node within a node which and connect it to the input
@@ -93,27 +191,10 @@ class CameraLoader(api.Loader):
hou.Node
"""
+ if name:
+ new_node = node.createNode(node_type, node_name=name)
+ else:
+ new_node = node.createNode(node_type)
- import hou
-
- try:
-
- if name:
- new_node = node.createNode(node_type, node_name=name)
- else:
- new_node = node.createNode(node_type)
-
- new_node.moveToGoodPosition()
-
- try:
- input_node = next(i for i in node.allItems() if
- isinstance(i, hou.SubnetIndirectInput))
- except StopIteration:
- return new_node
-
- new_node.setInput(0, input_node)
- return new_node
-
- except Exception:
- raise RuntimeError("Could not created node type `%s` in node `%s`"
- % (node_type, node))
+ new_node.moveToGoodPosition()
+ return new_node
diff --git a/openpype/hosts/houdini/plugins/load/load_image.py b/openpype/hosts/houdini/plugins/load/load_image.py
new file mode 100644
index 0000000000..4ff2777d77
--- /dev/null
+++ b/openpype/hosts/houdini/plugins/load/load_image.py
@@ -0,0 +1,123 @@
+import os
+
+from avalon import api
+from avalon.houdini import pipeline, lib
+
+import hou
+
+
+def get_image_avalon_container():
+ """The COP2 files must be in a COP2 network.
+
+ So we maintain a single entry point within AVALON_CONTAINERS,
+ just for ease of use.
+
+ """
+
+ path = pipeline.AVALON_CONTAINERS
+ avalon_container = hou.node(path)
+ if not avalon_container:
+ # Let's create avalon container secretly
+ # but make sure the pipeline still is built the
+ # way we anticipate it was built, asserting it.
+ assert path == "/obj/AVALON_CONTAINERS"
+
+ parent = hou.node("/obj")
+ avalon_container = parent.createNode(
+ "subnet", node_name="AVALON_CONTAINERS"
+ )
+
+ image_container = hou.node(path + "/IMAGES")
+ if not image_container:
+ image_container = avalon_container.createNode(
+ "cop2net", node_name="IMAGES"
+ )
+ image_container.moveToGoodPosition()
+
+ return image_container
+
+
+class ImageLoader(api.Loader):
+ """Specific loader of Alembic for the avalon.animation family"""
+
+ families = ["colorbleed.imagesequence"]
+ label = "Load Image (COP2)"
+ representations = ["*"]
+ order = -10
+
+ icon = "code-fork"
+ color = "orange"
+
+ def load(self, context, name=None, namespace=None, data=None):
+
+ # Format file name, Houdini only wants forward slashes
+ file_path = os.path.normpath(self.fname)
+ file_path = file_path.replace("\\", "/")
+ file_path = self._get_file_sequence(file_path)
+
+ # Get the root node
+ parent = get_image_avalon_container()
+
+ # Define node name
+ namespace = namespace if namespace else context["asset"]["name"]
+ node_name = "{}_{}".format(namespace, name) if namespace else name
+
+ node = parent.createNode("file", node_name=node_name)
+ node.moveToGoodPosition()
+
+ node.setParms({"filename1": file_path})
+
+ # Imprint it manually
+ data = {
+ "schema": "avalon-core:container-2.0",
+ "id": pipeline.AVALON_CONTAINER_ID,
+ "name": node_name,
+ "namespace": namespace,
+ "loader": str(self.__class__.__name__),
+ "representation": str(context["representation"]["_id"]),
+ }
+
+ # todo: add folder="Avalon"
+ lib.imprint(node, data)
+
+ return node
+
+ def update(self, container, representation):
+
+ node = container["node"]
+
+ # Update the file path
+ file_path = api.get_representation_path(representation)
+ file_path = file_path.replace("\\", "/")
+ file_path = self._get_file_sequence(file_path)
+
+ # Update attributes
+ node.setParms(
+ {
+ "filename1": file_path,
+ "representation": str(representation["_id"]),
+ }
+ )
+
+ def remove(self, container):
+
+ node = container["node"]
+
+ # Let's clean up the IMAGES COP2 network
+ # if it ends up being empty and we deleted
+ # the last file node. Store the parent
+ # before we delete the node.
+ parent = node.parent()
+
+ node.destroy()
+
+ if not parent.children():
+ parent.destroy()
+
+ def _get_file_sequence(self, root):
+ files = sorted(os.listdir(root))
+
+ first_fname = files[0]
+ prefix, padding, suffix = first_fname.rsplit(".", 2)
+ fname = ".".join([prefix, "$F{}".format(len(padding)), suffix])
+ return os.path.join(root, fname).replace("\\", "/")
diff --git a/openpype/hosts/houdini/plugins/load/load_usd_layer.py b/openpype/hosts/houdini/plugins/load/load_usd_layer.py
new file mode 100644
index 0000000000..7483101409
--- /dev/null
+++ b/openpype/hosts/houdini/plugins/load/load_usd_layer.py
@@ -0,0 +1,80 @@
+from avalon import api
+from avalon.houdini import pipeline, lib
+
+
+class USDSublayerLoader(api.Loader):
+ """Sublayer USD file in Solaris"""
+
+ families = [
+ "colorbleed.usd",
+ "colorbleed.pointcache",
+ "colorbleed.animation",
+ "colorbleed.camera",
+ "usdCamera",
+ ]
+ label = "Sublayer USD"
+ representations = ["usd", "usda", "usdlc", "usdnc", "abc"]
+ order = 1
+
+ icon = "code-fork"
+ color = "orange"
+
+ def load(self, context, name=None, namespace=None, data=None):
+
+ import os
+ import hou
+
+ # Format file name, Houdini only wants forward slashes
+ file_path = os.path.normpath(self.fname)
+ file_path = file_path.replace("\\", "/")
+
+ # Get the root node
+ stage = hou.node("/stage")
+
+ # Define node name
+ namespace = namespace if namespace else context["asset"]["name"]
+ node_name = "{}_{}".format(namespace, name) if namespace else name
+
+ # Create USD reference
+ container = stage.createNode("sublayer", node_name=node_name)
+ container.setParms({"filepath1": file_path})
+ container.moveToGoodPosition()
+
+ # Imprint it manually
+ data = {
+ "schema": "avalon-core:container-2.0",
+ "id": pipeline.AVALON_CONTAINER_ID,
+ "name": node_name,
+ "namespace": namespace,
+ "loader": str(self.__class__.__name__),
+ "representation": str(context["representation"]["_id"]),
+ }
+
+ # todo: add folder="Avalon"
+ lib.imprint(container, data)
+
+ return container
+
+ def update(self, container, representation):
+
+ node = container["node"]
+
+ # Update the file path
+ file_path = api.get_representation_path(representation)
+ file_path = file_path.replace("\\", "/")
+
+ # Update attributes
+ node.setParms(
+ {
+ "filepath1": file_path,
+ "representation": str(representation["_id"]),
+ }
+ )
+
+ # Reload files
+ node.parm("reload").pressButton()
+
+ def remove(self, container):
+
+ node = container["node"]
+ node.destroy()
diff --git a/openpype/hosts/houdini/plugins/load/load_usd_reference.py b/openpype/hosts/houdini/plugins/load/load_usd_reference.py
new file mode 100644
index 0000000000..cab3cb5269
--- /dev/null
+++ b/openpype/hosts/houdini/plugins/load/load_usd_reference.py
@@ -0,0 +1,80 @@
+from avalon import api
+from avalon.houdini import pipeline, lib
+
+
+class USDReferenceLoader(api.Loader):
+ """Reference USD file in Solaris"""
+
+ families = [
+ "colorbleed.usd",
+ "colorbleed.pointcache",
+ "colorbleed.animation",
+ "colorbleed.camera",
+ "usdCamera",
+ ]
+ label = "Reference USD"
+ representations = ["usd", "usda", "usdlc", "usdnc", "abc"]
+ order = -8
+
+ icon = "code-fork"
+ color = "orange"
+
+ def load(self, context, name=None, namespace=None, data=None):
+
+ import os
+ import hou
+
+ # Format file name, Houdini only wants forward slashes
+ file_path = os.path.normpath(self.fname)
+ file_path = file_path.replace("\\", "/")
+
+ # Get the root node
+ stage = hou.node("/stage")
+
+ # Define node name
+ namespace = namespace if namespace else context["asset"]["name"]
+ node_name = "{}_{}".format(namespace, name) if namespace else name
+
+ # Create USD reference
+ container = stage.createNode("reference", node_name=node_name)
+ container.setParms({"filepath1": file_path})
+ container.moveToGoodPosition()
+
+ # Imprint it manually
+ data = {
+ "schema": "avalon-core:container-2.0",
+ "id": pipeline.AVALON_CONTAINER_ID,
+ "name": node_name,
+ "namespace": namespace,
+ "loader": str(self.__class__.__name__),
+ "representation": str(context["representation"]["_id"]),
+ }
+
+ # todo: add folder="Avalon"
+ lib.imprint(container, data)
+
+ return container
+
+ def update(self, container, representation):
+
+ node = container["node"]
+
+ # Update the file path
+ file_path = api.get_representation_path(representation)
+ file_path = file_path.replace("\\", "/")
+
+ # Update attributes
+ node.setParms(
+ {
+ "filepath1": file_path,
+ "representation": str(representation["_id"]),
+ }
+ )
+
+ # Reload files
+ node.parm("reload").pressButton()
+
+ def remove(self, container):
+
+ node = container["node"]
+ node.destroy()
diff --git a/openpype/hosts/houdini/plugins/load/load_vdb.py b/openpype/hosts/houdini/plugins/load/load_vdb.py
new file mode 100644
index 0000000000..5f7e400b39
--- /dev/null
+++ b/openpype/hosts/houdini/plugins/load/load_vdb.py
@@ -0,0 +1,110 @@
+import os
+import re
+from avalon import api
+
+from avalon.houdini import pipeline
+
+
+class VdbLoader(api.Loader):
+ """Specific loader of Alembic for the avalon.animation family"""
+
+ families = ["vdbcache"]
+ label = "Load VDB"
+ representations = ["vdb"]
+ order = -10
+ icon = "code-fork"
+ color = "orange"
+
+ def load(self, context, name=None, namespace=None, data=None):
+
+ import hou
+
+ # Get the root node
+ obj = hou.node("/obj")
+
+ # Define node name
+ namespace = namespace if namespace else context["asset"]["name"]
+ node_name = "{}_{}".format(namespace, name) if namespace else name
+
+ # Create a new geo node
+ container = obj.createNode("geo", node_name=node_name)
+
+ # Remove the file node, it only loads static meshes
+ # Houdini 17 has removed the file node from the geo node
+ file_node = container.node("file1")
+ if file_node:
+ file_node.destroy()
+
+ # Explicitly create a file node
+ file_node = container.createNode("file", node_name=node_name)
+ file_node.setParms({"file": self.format_path(self.fname)})
+
+ # Set display on last node
+ file_node.setDisplayFlag(True)
+
+ nodes = [container, file_node]
+ self[:] = nodes
+
+ return pipeline.containerise(
+ node_name,
+ namespace,
+ nodes,
+ context,
+ self.__class__.__name__,
+ suffix="",
+ )
+
+ def format_path(self, path):
+ """Format file path correctly for single vdb or vdb sequence."""
+ if not os.path.exists(path):
+ raise RuntimeError("Path does not exist: %s" % path)
+
+ # The path is either a single file or sequence in a folder.
+ is_single_file = os.path.isfile(path)
+ if is_single_file:
+ filename = path
+ else:
+ # The path points to the publish .vdb sequence folder so we
+ # find the first file in there that ends with .vdb
+ files = sorted(os.listdir(path))
+ first = next((x for x in files if x.endswith(".vdb")), None)
+ if first is None:
+ raise RuntimeError(
+ "Couldn't find first .vdb file of "
+ "sequence in: %s" % path
+ )
+
+ # Set .vdb to $F.vdb
+ first = re.sub(r"\.(\d+)\.vdb$", ".$F.vdb", first)
+
+ filename = os.path.join(path, first)
+
+ filename = os.path.normpath(filename)
+ filename = filename.replace("\\", "/")
+
+ return filename
+
+ def update(self, container, representation):
+
+ node = container["node"]
+ try:
+ file_node = next(
+ n for n in node.children() if n.type().name() == "file"
+ )
+ except StopIteration:
+ self.log.error("Could not find node of type `alembic`")
+ return
+
+ # Update the file path
+ file_path = api.get_representation_path(representation)
+ file_path = self.format_path(file_path)
+
+ file_node.setParms({"fileName": file_path})
+
+ # Update attribute
+ node.setParms({"representation": str(representation["_id"])})
+
+ def remove(self, container):
+
+ node = container["node"]
+ node.destroy()
diff --git a/openpype/hosts/houdini/plugins/load/show_usdview.py b/openpype/hosts/houdini/plugins/load/show_usdview.py
new file mode 100644
index 0000000000..f23974094e
--- /dev/null
+++ b/openpype/hosts/houdini/plugins/load/show_usdview.py
@@ -0,0 +1,43 @@
+from avalon import api
+
+
+class ShowInUsdview(api.Loader):
+ """Open USD file in usdview"""
+
+ families = ["colorbleed.usd"]
+ label = "Show in usdview"
+ representations = ["usd", "usda", "usdlc", "usdnc"]
+ order = 10
+
+ icon = "code-fork"
+ color = "white"
+
+ def load(self, context, name=None, namespace=None, data=None):
+
+ import os
+ import subprocess
+
+ import avalon.lib as lib
+
+ usdview = lib.which("usdview")
+
+ filepath = os.path.normpath(self.fname)
+ filepath = filepath.replace("\\", "/")
+
+ if not os.path.exists(filepath):
+ self.log.error("File does not exist: %s" % filepath)
+ return
+
+ self.log.info("Start houdini variant of usdview...")
+
+ # For now avoid some pipeline environment variables that initialize
+ # Avalon in Houdini as it is redundant for usdview and slows boot time
+ env = os.environ.copy()
+ env.pop("PYTHONPATH", None)
+ env.pop("HOUDINI_SCRIPT_PATH", None)
+ env.pop("HOUDINI_MENU_PATH", None)
+
+ # Force string to avoid unicode issues
+ env = {str(key): str(value) for key, value in env.items()}
+
+ subprocess.Popen([usdview, filepath, "--renderer", "GL"], env=env)
diff --git a/openpype/hosts/houdini/plugins/publish/collect_active_state.py b/openpype/hosts/houdini/plugins/publish/collect_active_state.py
new file mode 100644
index 0000000000..1193f0cd19
--- /dev/null
+++ b/openpype/hosts/houdini/plugins/publish/collect_active_state.py
@@ -0,0 +1,38 @@
+import pyblish.api
+
+
+class CollectInstanceActiveState(pyblish.api.InstancePlugin):
+ """Collect default active state for instance from its node bypass state.
+
+ This is done at the very end of the CollectorOrder so that any required
+ collecting of data iterating over instances (with InstancePlugin) will
+ actually collect the data for when the user enables the state in the UI.
+ Otherwise potentially required data might have skipped collecting.
+
+ """
+
+ order = pyblish.api.CollectorOrder + 0.299
+ families = ["*"]
+ hosts = ["houdini"]
+ label = "Instance Active State"
+
+ def process(self, instance):
+
+ # Must have node to check for bypass state
+ if len(instance) == 0:
+ return
+
+ # Check bypass state and reverse
+ node = instance[0]
+ active = not node.isBypassed()
+
+ # Set instance active state
+ instance.data.update(
+ {
+ "active": active,
+ # temporarily translation of `active` to `publish` till
+ # issue has been resolved:
+ # https://github.com/pyblish/pyblish-base/issues/307
+ "publish": active,
+ }
+ )
diff --git a/openpype/hosts/houdini/plugins/publish/collect_current_file.py b/openpype/hosts/houdini/plugins/publish/collect_current_file.py
index b35a943833..c0b987ebbc 100644
--- a/openpype/hosts/houdini/plugins/publish/collect_current_file.py
+++ b/openpype/hosts/houdini/plugins/publish/collect_current_file.py
@@ -9,7 +9,7 @@ class CollectHoudiniCurrentFile(pyblish.api.ContextPlugin):
order = pyblish.api.CollectorOrder - 0.5
label = "Houdini Current File"
- hosts = ['houdini']
+ hosts = ["houdini"]
def process(self, context):
"""Inject the current working file"""
@@ -27,8 +27,10 @@ class CollectHoudiniCurrentFile(pyblish.api.ContextPlugin):
# could have existed already. We will allow it if the file exists,
# but show a warning for this edge case to clarify the potential
# false positive.
- self.log.warning("Current file is 'untitled.hip' and we are "
- "unable to detect whether the current scene is "
- "saved correctly.")
+ self.log.warning(
+ "Current file is 'untitled.hip' and we are "
+ "unable to detect whether the current scene is "
+ "saved correctly."
+ )
- context.data['currentFile'] = filepath
+ context.data["currentFile"] = filepath
diff --git a/openpype/hosts/houdini/plugins/publish/collect_frames.py b/openpype/hosts/houdini/plugins/publish/collect_frames.py
index 1d664aeaeb..ef77c3230b 100644
--- a/openpype/hosts/houdini/plugins/publish/collect_frames.py
+++ b/openpype/hosts/houdini/plugins/publish/collect_frames.py
@@ -6,11 +6,11 @@ from openpype.hosts.houdini.api import lib
class CollectFrames(pyblish.api.InstancePlugin):
- """Collect all frames which would be a resukl"""
+ """Collect all frames which would be saved from the ROP nodes"""
order = pyblish.api.CollectorOrder
label = "Collect Frames"
- families = ["vdbcache"]
+ families = ["vdbcache", "imagesequence"]
def process(self, instance):
@@ -19,10 +19,17 @@ class CollectFrames(pyblish.api.InstancePlugin):
output_parm = lib.get_output_parameter(ropnode)
output = output_parm.eval()
+ _, ext = os.path.splitext(output)
file_name = os.path.basename(output)
- match = re.match("(\w+)\.(\d+)\.vdb", file_name)
result = file_name
+ # Get the filename pattern match from the output
+ # path so we can compute all frames that would
+ # come out from rendering the ROP node if there
+ # is a frame pattern in the name
+ pattern = r"\w+\.(\d+)" + re.escape(ext)
+ match = re.match(pattern, file_name)
+
start_frame = instance.data.get("frameStart", None)
end_frame = instance.data.get("frameEnd", None)
@@ -31,10 +38,12 @@ class CollectFrames(pyblish.api.InstancePlugin):
# Check if frames are bigger than 1 (file collection)
# override the result
if end_frame - start_frame > 1:
- result = self.create_file_list(match,
- int(start_frame),
- int(end_frame))
+ result = self.create_file_list(
+ match, int(start_frame), int(end_frame)
+ )
+ # todo: `frames` currently conflicts with "explicit frames" for a
+ # for a custom frame list. So this should be refactored.
instance.data.update({"frames": result})
def create_file_list(self, match, start_frame, end_frame):
@@ -50,17 +59,24 @@ class CollectFrames(pyblish.api.InstancePlugin):
"""
+ # Get the padding length
+ frame = match.group(1)
+ padding = len(frame)
+
+ # Get the parts of the filename surrounding the frame number
+ # so we can put our own frame numbers in.
+ span = match.span(1)
+ prefix = match.string[: span[0]]
+ suffix = match.string[span[1]:]
+
+ # Generate filenames for all frames
result = []
+ for i in range(start_frame, end_frame + 1):
- padding = len(match.group(2))
- name = match.group(1)
- padding_format = "{number:0{width}d}"
+ # Format frame number by the padding amount
+ str_frame = "{number:0{width}d}".format(number=i, width=padding)
- count = start_frame
- while count <= end_frame:
- str_count = padding_format.format(number=count, width=padding)
- file_name = "{}.{}.vdb".format(name, str_count)
+ file_name = prefix + str_frame + suffix
result.append(file_name)
- count += 1
return result
diff --git a/openpype/hosts/houdini/plugins/publish/collect_inputs.py b/openpype/hosts/houdini/plugins/publish/collect_inputs.py
new file mode 100644
index 0000000000..39e2737e8c
--- /dev/null
+++ b/openpype/hosts/houdini/plugins/publish/collect_inputs.py
@@ -0,0 +1,120 @@
+import avalon.api as api
+import pyblish.api
+
+
+def collect_input_containers(nodes):
+ """Collect containers that contain any of the node in `nodes`.
+
+ This will return any loaded Avalon container that contains at least one of
+ the nodes. As such, the Avalon container is an input for it. Or in short,
+ there are member nodes of that container.
+
+ Returns:
+ list: Input avalon containers
+
+ """
+
+ # Lookup by node ids
+ lookup = frozenset(nodes)
+
+ containers = []
+ host = api.registered_host()
+ for container in host.ls():
+
+ node = container["node"]
+
+ # Usually the loaded containers don't have any complex references
+ # and the contained children should be all we need. So we disregard
+ # checking for .references() on the nodes.
+ members = set(node.allSubChildren())
+ members.add(node) # include the node itself
+
+ # If there's an intersection
+ if not lookup.isdisjoint(members):
+ containers.append(container)
+
+ return containers
+
+
+def iter_upstream(node):
+ """Yields all upstream inputs for the current node.
+
+ This includes all `node.inputAncestors()` but also traverses through all
+ `node.references()` for the node itself and for any of the upstream nodes.
+ This method has no max-depth and will collect all upstream inputs.
+
+ Yields:
+ hou.Node: The upstream nodes, including references.
+
+ """
+
+ upstream = node.inputAncestors(
+ include_ref_inputs=True, follow_subnets=True
+ )
+
+ # Initialize process queue with the node's ancestors itself
+ queue = list(upstream)
+ collected = set(upstream)
+
+ # Traverse upstream references for all nodes and yield them as we
+ # process the queue.
+ while queue:
+ upstream_node = queue.pop()
+ yield upstream_node
+
+ # Find its references that are not collected yet.
+ references = upstream_node.references()
+ references = [n for n in references if n not in collected]
+
+ queue.extend(references)
+ collected.update(references)
+
+ # Include the references' ancestors that have not been collected yet.
+ for reference in references:
+ ancestors = reference.inputAncestors(
+ include_ref_inputs=True, follow_subnets=True
+ )
+ ancestors = [n for n in ancestors if n not in collected]
+
+ queue.extend(ancestors)
+ collected.update(ancestors)
+
+
+class CollectUpstreamInputs(pyblish.api.InstancePlugin):
+ """Collect source input containers used for this publish.
+
+ This will include `inputs` data of which loaded publishes were used in the
+ generation of this publish. This leaves an upstream trace to what was used
+ as input.
+
+ """
+
+ label = "Collect Inputs"
+ order = pyblish.api.CollectorOrder + 0.4
+ hosts = ["houdini"]
+
+ def process(self, instance):
+ # We can't get the "inputAncestors" directly from the ROP
+ # node, so we find the related output node (set in SOP/COP path)
+ # and include that together with its ancestors
+ output = instance.data.get("output_node")
+
+ if output is None:
+ # If no valid output node is set then ignore it as validation
+ # will be checking those cases.
+ self.log.debug(
+ "No output node found, skipping " "collecting of inputs.."
+ )
+ return
+
+ # Collect all upstream parents
+ nodes = list(iter_upstream(output))
+ nodes.append(output)
+
+ # Collect containers for the given set of nodes
+ containers = collect_input_containers(nodes)
+
+ inputs = [c["representation"] for c in containers]
+ instance.data["inputs"] = inputs
+
+ self.log.info("Collected inputs: %s" % inputs)
diff --git a/openpype/hosts/houdini/plugins/publish/collect_instances.py b/openpype/hosts/houdini/plugins/publish/collect_instances.py
index 2e294face2..1b36526783 100644
--- a/openpype/hosts/houdini/plugins/publish/collect_instances.py
+++ b/openpype/hosts/houdini/plugins/publish/collect_instances.py
@@ -31,6 +31,13 @@ class CollectInstances(pyblish.api.ContextPlugin):
def process(self, context):
nodes = hou.node("/out").children()
+
+ # Include instances in USD stage only when it exists so it
+ # remains backwards compatible with version before houdini 18
+ stage = hou.node("/stage")
+ if stage:
+ nodes += stage.recursiveGlob("*", filter=hou.nodeTypeFilter.Rop)
+
for node in nodes:
if not node.parm("id"):
@@ -55,6 +62,8 @@ class CollectInstances(pyblish.api.ContextPlugin):
# Create nice name if the instance has a frame range.
label = data.get("name", node.name())
+ label += " (%s)" % data["asset"] # include asset in name
+
if "frameStart" in data and "frameEnd" in data:
frames = "[{frameStart} - {frameEnd}]".format(**data)
label = "{} {}".format(label, frames)
diff --git a/openpype/hosts/houdini/plugins/publish/collect_instances_usd_layered.py b/openpype/hosts/houdini/plugins/publish/collect_instances_usd_layered.py
new file mode 100644
index 0000000000..7df5e8b6f2
--- /dev/null
+++ b/openpype/hosts/houdini/plugins/publish/collect_instances_usd_layered.py
@@ -0,0 +1,152 @@
+import hou
+import pyblish.api
+from avalon.houdini import lib
+import openpype.hosts.houdini.api.usd as hou_usdlib
+import openpype.lib.usdlib as usdlib
+
+
+class CollectInstancesUsdLayered(pyblish.api.ContextPlugin):
+ """Collect Instances from a ROP Network and its configured layer paths.
+
+ The output nodes of the ROP node will only be published when *any* of the
+ layers remain set to 'publish' by the user.
+
+ This works differently from most of our Avalon instances in the pipeline.
+ As opposed to storing `pyblish.avalon.instance` as id on the node we store
+ `pyblish.avalon.usdlayered`.
+
+ Additionally this instance has no need for storing family, asset, subset
+ or name on the nodes. Instead all information is retrieved solely from
+ the output filepath, which is an Avalon URI:
+ avalon://{asset}/{subset}.{representation}
+
+ Each final ROP node is considered a dependency for any of the Configured
+ Save Path layers it sets along the way. As such, the instances shown in
+ the Pyblish UI are solely the configured layers. The encapsulating usd
+ files are generated whenever *any* of the dependencies is published.
+
+ These dependency instances are stored in:
+ instance.data["publishDependencies"]
+
+ """
+
+ order = pyblish.api.CollectorOrder - 0.01
+ label = "Collect Instances (USD Configured Layers)"
+ hosts = ["houdini"]
+
+ def process(self, context):
+
+ stage = hou.node("/stage")
+ if not stage:
+ # Likely Houdini version <18
+ return
+
+ nodes = stage.recursiveGlob("*", filter=hou.nodeTypeFilter.Rop)
+ for node in nodes:
+
+ if not node.parm("id"):
+ continue
+
+ if node.evalParm("id") != "pyblish.avalon.usdlayered":
+ continue
+
+ has_family = node.evalParm("family")
+ assert has_family, "'%s' is missing 'family'" % node.name()
+
+ self.process_node(node, context)
+
+ def sort_by_family(instance):
+ """Sort by family"""
+ return instance.data.get("families", instance.data.get("family"))
+
+ # Sort/grouped by family (preserving local index)
+ context[:] = sorted(context, key=sort_by_family)
+
+ return context
+
+ def process_node(self, node, context):
+
+ # Allow a single ROP node or a full ROP network of USD ROP nodes
+ # to be processed as a single entry that should "live together" on
+ # a publish.
+ if node.type().name() == "ropnet":
+ # All rop nodes inside ROP Network
+ ropnodes = node.recursiveGlob("*", filter=hou.nodeTypeFilter.Rop)
+ else:
+ # A single node
+ ropnodes = [node]
+
+ data = lib.read(node)
+
+ # Don't use the explicit "colorbleed.usd.layered" family for publishing
+ # instead use the "colorbleed.usd" family to integrate.
+ data["publishFamilies"] = ["colorbleed.usd"]
+
+ # For now group ALL of them into USD Layer subset group
+ # Allow this subset to be grouped into a USD Layer on creation
+ data["subsetGroup"] = "USD Layer"
+
+ instances = list()
+ dependencies = []
+ for ropnode in ropnodes:
+
+ # Create a dependency instance per ROP Node.
+ lopoutput = ropnode.evalParm("lopoutput")
+ dependency_save_data = self.get_save_data(lopoutput)
+ dependency = context.create_instance(dependency_save_data["name"])
+ dependency.append(ropnode)
+ dependency.data.update(data)
+ dependency.data.update(dependency_save_data)
+ dependency.data["family"] = "colorbleed.usd.dependency"
+ dependency.data["optional"] = False
+ dependencies.append(dependency)
+
+ # Hide the dependency instance from the context
+ context.pop()
+
+ # Get all configured layers for this USD ROP node
+ # and create a Pyblish instance for each one
+ layers = hou_usdlib.get_configured_save_layers(ropnode)
+ for layer in layers:
+ save_path = hou_usdlib.get_layer_save_path(layer)
+ save_data = self.get_save_data(save_path)
+ if not save_data:
+ continue
+ self.log.info(save_path)
+
+ instance = context.create_instance(save_data["name"])
+ instance[:] = [node]
+
+ # Set the instance data
+ instance.data.update(data)
+ instance.data.update(save_data)
+ instance.data["usdLayer"] = layer
+
+ # Don't allow the Pyblish `instanceToggled` we have installed
+ # to set this node to bypass.
+ instance.data["_allowToggleBypass"] = False
+
+ instances.append(instance)
+
+ # Store the collected ROP node dependencies
+ self.log.debug("Collected dependencies: %s" % (dependencies,))
+ for instance in instances:
+ instance.data["publishDependencies"] = dependencies
+
+ def get_save_data(self, save_path):
+
+ # Resolve Avalon URI
+ uri_data = usdlib.parse_avalon_uri(save_path)
+ if not uri_data:
+ self.log.warning("Non Avalon URI Layer Path: %s" % save_path)
+ return {}
+
+ # Collect asset + subset from URI
+ name = "{subset} ({asset})".format(**uri_data)
+ fname = "{asset}_{subset}.{ext}".format(**uri_data)
+
+ data = dict(uri_data)
+ data["usdSavePath"] = save_path
+ data["usdFilename"] = fname
+ data["name"] = name
+ return data
diff --git a/openpype/hosts/houdini/plugins/publish/collect_output_node.py b/openpype/hosts/houdini/plugins/publish/collect_output_node.py
index c0587d5336..938ee81cc3 100644
--- a/openpype/hosts/houdini/plugins/publish/collect_output_node.py
+++ b/openpype/hosts/houdini/plugins/publish/collect_output_node.py
@@ -2,13 +2,20 @@ import pyblish.api
class CollectOutputSOPPath(pyblish.api.InstancePlugin):
- """Collect the out node's SOP Path value."""
+ """Collect the out node's SOP/COP Path value."""
order = pyblish.api.CollectorOrder
- families = ["pointcache",
- "vdbcache"]
+ families = [
+ "pointcache",
+ "camera",
+ "vdbcache",
+ "imagesequence",
+ "usd",
+ "usdrender",
+ ]
+
hosts = ["houdini"]
- label = "Collect Output SOP Path"
+ label = "Collect Output Node Path"
def process(self, instance):
@@ -17,12 +24,44 @@ class CollectOutputSOPPath(pyblish.api.InstancePlugin):
node = instance[0]
# Get sop path
- if node.type().name() == "alembic":
- sop_path_parm = "sop_path"
+ node_type = node.type().name()
+ if node_type == "geometry":
+ out_node = node.parm("soppath").evalAsNode()
+
+ elif node_type == "alembic":
+
+ # Alembic can switch between using SOP Path or object
+ if node.parm("use_sop_path").eval():
+ out_node = node.parm("sop_path").evalAsNode()
+ else:
+ root = node.parm("root").eval()
+ objects = node.parm("objects").eval()
+ path = root + "/" + objects
+ out_node = hou.node(path)
+
+ elif node_type == "comp":
+ out_node = node.parm("coppath").evalAsNode()
+
+ elif node_type == "usd" or node_type == "usdrender":
+ out_node = node.parm("loppath").evalAsNode()
+
+ elif node_type == "usd_rop" or node_type == "usdrender_rop":
+ # Inside Solaris e.g. /stage (not in ROP context)
+ # When incoming connection is present it takes it directly
+ inputs = node.inputs()
+ if inputs:
+ out_node = inputs[0]
+ else:
+ out_node = node.parm("loppath").evalAsNode()
+
else:
- sop_path_parm = "soppath"
+ raise ValueError(
+ "ROP node type '%s' is" " not supported." % node_type
+ )
- sop_path = node.parm(sop_path_parm).eval()
- out_node = hou.node(sop_path)
+ if not out_node:
+ self.log.warning("No output node collected.")
+ return
+ self.log.debug("Output node: %s" % out_node.path())
instance.data["output_node"] = out_node
diff --git a/openpype/hosts/houdini/plugins/publish/collect_redshift_rop.py b/openpype/hosts/houdini/plugins/publish/collect_redshift_rop.py
new file mode 100644
index 0000000000..72b554b567
--- /dev/null
+++ b/openpype/hosts/houdini/plugins/publish/collect_redshift_rop.py
@@ -0,0 +1,135 @@
+import re
+import os
+
+import hou
+import pyblish.api
+
+
+def get_top_referenced_parm(parm):
+
+ processed = set() # disallow infinite loop
+ while True:
+ if parm.path() in processed:
+ raise RuntimeError("Parameter references result in cycle.")
+
+ processed.add(parm.path())
+
+ ref = parm.getReferencedParm()
+ if ref.path() == parm.path():
+ # It returns itself when it doesn't reference
+ # another parameter
+ return ref
+ else:
+ parm = ref
+
+
+def evalParmNoFrame(node, parm, pad_character="#"):
+
+ parameter = node.parm(parm)
+ assert parameter, "Parameter does not exist: %s.%s" % (node, parm)
+
+ # If the parameter has a parameter reference, then get that
+ # parameter instead as otherwise `unexpandedString()` fails.
+ parameter = get_top_referenced_parm(parameter)
+
+ # Substitute out the frame numbering with padded characters
+ try:
+ raw = parameter.unexpandedString()
+ except hou.Error as exc:
+ print("Failed: %s" % parameter)
+ raise RuntimeError(exc)
+
+ def replace(match):
+ padding = 1
+ n = match.group(2)
+ if n and int(n):
+ padding = int(n)
+ return pad_character * padding
+
+ expression = re.sub(r"(\$F([0-9]*))", replace, raw)
+
+ with hou.ScriptEvalContext(parameter):
+ return hou.expandStringAtFrame(expression, 0)
+
+
+class CollectRedshiftROPRenderProducts(pyblish.api.InstancePlugin):
+ """Collect USD Render Products
+
+ Collects the instance.data["files"] for the render products.
+
+ Provides:
+ instance -> files
+
+ """
+
+ label = "Redshift ROP Render Products"
+ order = pyblish.api.CollectorOrder + 0.4
+ hosts = ["houdini"]
+ families = ["redshift_rop"]
+
+ def process(self, instance):
+
+ rop = instance[0]
+
+ # Collect chunkSize
+ chunk_size_parm = rop.parm("chunkSize")
+ if chunk_size_parm:
+ chunk_size = int(chunk_size_parm.eval())
+ instance.data["chunkSize"] = chunk_size
+ self.log.debug("Chunk Size: %s" % chunk_size)
+
+ default_prefix = evalParmNoFrame(rop, "RS_outputFileNamePrefix")
+ beauty_suffix = rop.evalParm("RS_outputBeautyAOVSuffix")
+ render_products = []
+
+ # Default beauty AOV
+ beauty_product = self.get_render_product_name(
+ prefix=default_prefix, suffix=beauty_suffix
+ )
+ render_products.append(beauty_product)
+
+ num_aovs = rop.evalParm("RS_aov")
+ for index in range(num_aovs):
+ i = index + 1
+
+ # Skip disabled AOVs
+ if not rop.evalParm("RS_aovEnable_%s" % i):
+ continue
+
+ aov_suffix = rop.evalParm("RS_aovSuffix_%s" % i)
+ aov_prefix = evalParmNoFrame(rop, "RS_aovCustomPrefix_%s" % i)
+ if not aov_prefix:
+ aov_prefix = default_prefix
+
+ aov_product = self.get_render_product_name(aov_prefix, aov_suffix)
+ render_products.append(aov_product)
+
+ for product in render_products:
+ self.log.debug("Found render product: %s" % product)
+
+ filenames = list(render_products)
+ instance.data["files"] = filenames
+
+ def get_render_product_name(self, prefix, suffix):
+ """Return the output filename using the AOV prefix and suffix"""
+
+ # When AOV is explicitly defined in prefix we just swap it out
+ # directly with the AOV suffix to embed it.
+ # Note: ${AOV} seems to be evaluated in the parameter as %AOV%
+ has_aov_in_prefix = "%AOV%" in prefix
+ if has_aov_in_prefix:
+ # It seems that when some special separator characters are present
+ # before the %AOV% token that Redshift will secretly remove it if
+ # there is no suffix for the current product, for example:
+ # foo_%AOV% -> foo.exr
+ pattern = "%AOV%" if suffix else "[._-]?%AOV%"
+ product_name = re.sub(pattern, suffix, prefix, flags=re.IGNORECASE)
+ else:
+ if suffix:
+ # Add ".{suffix}" before the extension
+ prefix_base, ext = os.path.splitext(prefix)
+ product_name = prefix_base + "." + suffix + ext
+ else:
+ product_name = prefix
+
+ return product_name
diff --git a/openpype/hosts/houdini/plugins/publish/collect_remote_publish.py b/openpype/hosts/houdini/plugins/publish/collect_remote_publish.py
new file mode 100644
index 0000000000..3ae16efe56
--- /dev/null
+++ b/openpype/hosts/houdini/plugins/publish/collect_remote_publish.py
@@ -0,0 +1,30 @@
+import pyblish.api
+import openpype.api
+
+import hou
+from avalon.houdini import lib
+
+
+class CollectRemotePublishSettings(pyblish.api.ContextPlugin):
+ """Collect custom settings of the Remote Publish node."""
+
+ order = pyblish.api.CollectorOrder
+ families = ["*"]
+ hosts = ["houdini"]
+ targets = ["deadline"]
+ label = "Remote Publish Submission Settings"
+ actions = [openpype.api.RepairAction]
+
+ def process(self, context):
+
+ node = hou.node("/out/REMOTE_PUBLISH")
+ if not node:
+ return
+
+ attributes = lib.read(node)
+
+ # Debug the settings we have collected
+ for key, value in sorted(attributes.items()):
+ self.log.debug("Collected %s: %s" % (key, value))
+
+ context.data.update(attributes)
diff --git a/openpype/hosts/houdini/plugins/publish/collect_render_products.py b/openpype/hosts/houdini/plugins/publish/collect_render_products.py
new file mode 100644
index 0000000000..d7163b43c0
--- /dev/null
+++ b/openpype/hosts/houdini/plugins/publish/collect_render_products.py
@@ -0,0 +1,133 @@
+import re
+import os
+
+import hou
+import pxr.UsdRender
+
+import pyblish.api
+
+
+def get_var_changed(variable=None):
+ """Return changed variables and operators that use it.
+
+ Note: `varchange` hscript states that it forces a recook of the nodes
+ that use Variables. That was tested in Houdini
+ 18.0.391.
+
+ Args:
+ variable (str, Optional): A specific variable to query the operators
+ for. When None is provided it will return all variables that have
+ had recent changes and require a recook. Defaults to None.
+
+ Returns:
+ dict: Variable that changed with the operators that use it.
+
+ """
+ cmd = "varchange -V"
+ if variable:
+ cmd += " {0}".format(variable)
+ output, _ = hou.hscript(cmd)
+
+ changed = {}
+ for line in output.split("Variable: "):
+ if not line.strip():
+ continue
+
+ split = line.split()
+ var = split[0]
+ operators = split[1:]
+ changed[var] = operators
+
+ return changed
+
+
+class CollectRenderProducts(pyblish.api.InstancePlugin):
+ """Collect USD Render Products."""
+
+ label = "Collect Render Products"
+ order = pyblish.api.CollectorOrder + 0.4
+ hosts = ["houdini"]
+ families = ["usdrender"]
+
+ def process(self, instance):
+
+ node = instance.data.get("output_node")
+ if not node:
+ rop_path = instance[0].path()
+ raise RuntimeError(
+ "No output node found. Make sure to connect an "
+ "input to the USD ROP: %s" % rop_path
+ )
+
+ # Workaround Houdini 18.0.391 bug where $HIPNAME doesn't automatically
+ # update after scene save.
+ if hou.applicationVersion() == (18, 0, 391):
+ self.log.debug(
+ "Checking for recook to workaround " "$HIPNAME refresh bug..."
+ )
+ changed = get_var_changed("HIPNAME").get("HIPNAME")
+ if changed:
+ self.log.debug("Recooking for $HIPNAME refresh bug...")
+ for operator in changed:
+ hou.node(operator).cook(force=True)
+
+ # Make sure to recook any 'cache' nodes in the history chain
+ chain = [node]
+ chain.extend(node.inputAncestors())
+ for input_node in chain:
+ if input_node.type().name() == "cache":
+ input_node.cook(force=True)
+
+ stage = node.stage()
+
+ filenames = []
+ for prim in stage.Traverse():
+
+ if not prim.IsA(pxr.UsdRender.Product):
+ continue
+
+ # Get Render Product Name
+ product = pxr.UsdRender.Product(prim)
+
+ # We force taking it from any random time sample as opposed to
+ # "default" that the USD Api falls back to since that won't return
+ # time sampled values if they were set per time sample.
+ name = product.GetProductNameAttr().Get(time=0)
+ dirname = os.path.dirname(name)
+ basename = os.path.basename(name)
+
+ dollarf_regex = r"(\$F([0-9]?))"
+ frame_regex = r"^(.+\.)([0-9]+)(\.[a-zA-Z]+)$"
+ if re.match(dollarf_regex, basename):
+ # TODO: Confirm this actually is allowed USD stages and HUSK
+ # Substitute $F
+ def replace(match):
+ """Replace $F4 with padded #."""
+ padding = int(match.group(2)) if match.group(2) else 1
+ return "#" * padding
+
+ filename_base = re.sub(dollarf_regex, replace, basename)
+ filename = os.path.join(dirname, filename_base)
+ else:
+ # Substitute basename.0001.ext
+ def replace(match):
+ prefix, frame, ext = match.groups()
+ padding = "#" * len(frame)
+ return prefix + padding + ext
+
+ filename_base = re.sub(frame_regex, replace, basename)
+ filename = os.path.join(dirname, filename_base)
+ filename = filename.replace("\\", "/")
+
+ assert "#" in filename, (
+ "Couldn't resolve render product name "
+ "with frame number: %s" % name
+ )
+
+ filenames.append(filename)
+
+ prim_path = str(prim.GetPath())
+ self.log.info("Collected %s name: %s" % (prim_path, filename))
+
+ # Filenames for Deadline
+ instance.data["files"] = filenames
diff --git a/openpype/hosts/houdini/plugins/publish/collect_usd_bootstrap.py b/openpype/hosts/houdini/plugins/publish/collect_usd_bootstrap.py
new file mode 100644
index 0000000000..66dfba64df
--- /dev/null
+++ b/openpype/hosts/houdini/plugins/publish/collect_usd_bootstrap.py
@@ -0,0 +1,110 @@
+import pyblish.api
+
+from avalon import io
+import openpype.lib.usdlib as usdlib
+
+
+class CollectUsdBootstrap(pyblish.api.InstancePlugin):
+ """Collect special Asset/Shot bootstrap instances if those are needed.
+
+ Some specific subsets are intended to be part of the default structure
+ of an "Asset" or "Shot" in our USD pipeline. For example, for an Asset
+ we layer a Model and Shade USD file over each other and expose that in
+ a Asset USD file, ready to use.
+
+ On the first publish of any of the components of a Asset or Shot the
+ missing pieces are bootstrapped and generated in the pipeline too. This
+ means that on the very first publish of your model the Asset USD file
+ will exist too.
+
+ """
+
+ order = pyblish.api.CollectorOrder + 0.35
+ label = "Collect USD Bootstrap"
+ hosts = ["houdini"]
+ families = ["usd", "usd.layered"]
+
+ def process(self, instance):
+
+ # Detect whether the current subset is a subset in a pipeline
+ def get_bootstrap(instance):
+ instance_subset = instance.data["subset"]
+ for name, layers in usdlib.PIPELINE.items():
+ if instance_subset in set(layers):
+ return name # e.g. "asset"
+ break
+ else:
+ return
+
+ bootstrap = get_bootstrap(instance)
+ if bootstrap:
+ self.add_bootstrap(instance, bootstrap)
+
+ # Check if any of the dependencies requires a bootstrap
+ for dependency in instance.data.get("publishDependencies", list()):
+ bootstrap = get_bootstrap(dependency)
+ if bootstrap:
+ self.add_bootstrap(dependency, bootstrap)
+
+ def add_bootstrap(self, instance, bootstrap):
+
+ self.log.debug("Add bootstrap for: %s" % bootstrap)
+
+ asset = io.find_one({"name": instance.data["asset"], "type": "asset"})
+ assert asset, "Asset must exist: %s" % asset
+
+ # Check which are not about to be created and don't exist yet
+ required = {"shot": ["usdShot"], "asset": ["usdAsset"]}.get(bootstrap)
+
+ require_all_layers = instance.data.get("requireAllLayers", False)
+ if require_all_layers:
+ # USD files load fine in usdview and Houdini even when layered or
+ # referenced files do not exist. So by default we don't require
+ # the layers to exist.
+ layers = usdlib.PIPELINE.get(bootstrap)
+ if layers:
+ required += list(layers)
+
+ self.log.debug("Checking required bootstrap: %s" % required)
+ for subset in required:
+ if self._subset_exists(instance, subset, asset):
+ continue
+
+ self.log.debug(
+ "Creating {0} USD bootstrap: {1} {2}".format(
+ bootstrap, asset["name"], subset
+ )
+ )
+
+ new = instance.context.create_instance(subset)
+ new.data["subset"] = subset
+ new.data["label"] = "{0} ({1})".format(subset, asset["name"])
+ new.data["family"] = "usd.bootstrap"
+ new.data["comment"] = "Automated bootstrap USD file."
+ new.data["publishFamilies"] = ["usd"]
+
+ # Do not allow the user to toggle this instance
+ new.data["optional"] = False
+
+ # Copy some data from the instance for which we bootstrap
+ for key in ["asset"]:
+ new.data[key] = instance.data[key]
+
+ def _subset_exists(self, instance, subset, asset):
+ """Return whether subset exists in current context or in database."""
+ # Allow it to be created during this publish session
+ context = instance.context
+ for inst in context:
+ if (
+ inst.data["subset"] == subset
+ and inst.data["asset"] == asset["name"]
+ ):
+ return True
+
+ # Or, if they already exist in the database we can
+ # skip them too.
+ return bool(
+ io.find_one(
+ {"name": subset, "type": "subset", "parent": asset["_id"]}
+ )
+ )
diff --git a/openpype/hosts/houdini/plugins/publish/collect_usd_layers.py b/openpype/hosts/houdini/plugins/publish/collect_usd_layers.py
new file mode 100644
index 0000000000..8be6ead1b1
--- /dev/null
+++ b/openpype/hosts/houdini/plugins/publish/collect_usd_layers.py
@@ -0,0 +1,61 @@
+import os
+
+import pyblish.api
+import openpype.hosts.houdini.api.usd as usdlib
+
+
+class CollectUsdLayers(pyblish.api.InstancePlugin):
+ """Collect the USD Layers that have configured save paths."""
+
+ order = pyblish.api.CollectorOrder + 0.35
+ label = "Collect USD Layers"
+ hosts = ["houdini"]
+ families = ["usd"]
+
+ def process(self, instance):
+
+ output = instance.data.get("output_node")
+ if not output:
+ self.log.debug("No output node found..")
+ return
+
+ rop_node = instance[0]
+
+ save_layers = []
+ for layer in usdlib.get_configured_save_layers(rop_node):
+
+ info = layer.rootPrims.get("HoudiniLayerInfo")
+ save_path = info.customData.get("HoudiniSavePath")
+ creator = info.customData.get("HoudiniCreatorNode")
+
+ self.log.debug("Found configured save path: "
+ "%s -> %s" % (layer, save_path))
+
+ # Log node that configured this save path
+ if creator:
+ self.log.debug("Created by: %s" % creator)
+
+ save_layers.append((layer, save_path))
+
+ # Store on the instance
+ instance.data["usdConfiguredSavePaths"] = save_layers
+
+ # Create configured layer instances so User can disable updating
+ # specific configured layers for publishing.
+ context = instance.context
+ for layer, save_path in save_layers:
+ name = os.path.basename(save_path)
+ label = "{0} -> {1}".format(instance.data["name"], name)
+ layer_inst = context.create_instance(name)
+
+ family = "colorbleed.usdlayer"
+ layer_inst.data["family"] = family
+ layer_inst.data["families"] = [family]
+ layer_inst.data["subset"] = "__stub__"
+ layer_inst.data["label"] = label
+ layer_inst.data["asset"] = instance.data["asset"]
+ layer_inst.append(instance[0]) # include same USD ROP
+ layer_inst.append((layer, save_path)) # include layer data
+
+ # Allow this subset to be grouped into a USD Layer on creation
+ layer_inst.data["subsetGroup"] = "USD Layer"
diff --git a/openpype/hosts/houdini/plugins/publish/collect_workscene_fps.py b/openpype/hosts/houdini/plugins/publish/collect_workscene_fps.py
index c145eea519..6f6cc978cd 100644
--- a/openpype/hosts/houdini/plugins/publish/collect_workscene_fps.py
+++ b/openpype/hosts/houdini/plugins/publish/collect_workscene_fps.py
@@ -3,7 +3,7 @@ import hou
class CollectWorksceneFPS(pyblish.api.ContextPlugin):
- """Get the FPS of the work scene"""
+ """Get the FPS of the work scene."""
label = "Workscene FPS"
order = pyblish.api.CollectorOrder
diff --git a/openpype/hosts/houdini/plugins/publish/extract_alembic.py b/openpype/hosts/houdini/plugins/publish/extract_alembic.py
index b251ebdc90..83b790407f 100644
--- a/openpype/hosts/houdini/plugins/publish/extract_alembic.py
+++ b/openpype/hosts/houdini/plugins/publish/extract_alembic.py
@@ -2,6 +2,7 @@ import os
import pyblish.api
import openpype.api
+from openpype.hosts.houdini.api.lib import render_rop
class ExtractAlembic(openpype.api.Extractor):
@@ -13,29 +14,20 @@ class ExtractAlembic(openpype.api.Extractor):
def process(self, instance):
- import hou
-
ropnode = instance[0]
# Get the filename from the filename parameter
output = ropnode.evalParm("filename")
staging_dir = os.path.dirname(output)
- # instance.data["stagingDir"] = staging_dir
+ instance.data["stagingDir"] = staging_dir
file_name = os.path.basename(output)
# We run the render
self.log.info("Writing alembic '%s' to '%s'" % (file_name,
staging_dir))
- try:
- ropnode.render()
- except hou.Error as exc:
- # The hou.Error is not inherited from a Python Exception class,
- # so we explicitly capture the houdini error, otherwise pyblish
- # will remain hanging.
- import traceback
- traceback.print_exc()
- raise RuntimeError("Render failed: {0}".format(exc))
+
+ render_rop(ropnode)
if "representations" not in instance.data:
instance.data["representations"] = []
diff --git a/openpype/hosts/houdini/plugins/publish/extract_composite.py b/openpype/hosts/houdini/plugins/publish/extract_composite.py
new file mode 100644
index 0000000000..f300b6d28d
--- /dev/null
+++ b/openpype/hosts/houdini/plugins/publish/extract_composite.py
@@ -0,0 +1,35 @@
+import os
+
+import pyblish.api
+import openpype.api
+
+from openpype.hosts.houdini.api.lib import render_rop
+
+
+class ExtractComposite(openpype.api.Extractor):
+
+ order = pyblish.api.ExtractorOrder
+ label = "Extract Composite (Image Sequence)"
+ hosts = ["houdini"]
+ families = ["imagesequence"]
+
+ def process(self, instance):
+
+ ropnode = instance[0]
+
+ # Get the filename from the copoutput parameter
+ # `.evalParm(parameter)` will make sure all tokens are resolved
+ output = ropnode.evalParm("copoutput")
+ staging_dir = os.path.dirname(output)
+ instance.data["stagingDir"] = staging_dir
+ file_name = os.path.basename(output)
+
+ self.log.info("Writing comp '%s' to '%s'" % (file_name, staging_dir))
+
+ render_rop(ropnode)
+
+ if "files" not in instance.data:
+ instance.data["files"] = []
+
+ frames = instance.data["frames"]
+ instance.data["files"].append(frames)
diff --git a/openpype/hosts/houdini/plugins/publish/extract_usd.py b/openpype/hosts/houdini/plugins/publish/extract_usd.py
new file mode 100644
index 0000000000..0fc26900fb
--- /dev/null
+++ b/openpype/hosts/houdini/plugins/publish/extract_usd.py
@@ -0,0 +1,42 @@
+import os
+
+import pyblish.api
+import openpype.api
+from openpype.hosts.houdini.api.lib import render_rop
+
+
+class ExtractUSD(openpype.api.Extractor):
+
+ order = pyblish.api.ExtractorOrder
+ label = "Extract USD"
+ hosts = ["houdini"]
+ families = ["usd",
+ "usdModel",
+ "usdSetDress"]
+
+ def process(self, instance):
+
+ ropnode = instance[0]
+
+ # Get the filename from the filename parameter
+ output = ropnode.evalParm("lopoutput")
+ staging_dir = os.path.dirname(output)
+ instance.data["stagingDir"] = staging_dir
+ file_name = os.path.basename(output)
+
+ self.log.info("Writing USD '%s' to '%s'" % (file_name, staging_dir))
+
+ render_rop(ropnode)
+
+ assert os.path.exists(output), "Output does not exist: %s" % output
+
+ if "representations" not in instance.data:
+ instance.data["representations"] = []
+
+ representation = {
+ 'name': 'usd',
+ 'ext': 'usd',
+ 'files': file_name,
+ "stagingDir": staging_dir,
+ }
+ instance.data["representations"].append(representation)
diff --git a/openpype/hosts/houdini/plugins/publish/extract_usd_layered.py b/openpype/hosts/houdini/plugins/publish/extract_usd_layered.py
new file mode 100644
index 0000000000..645bd05d4b
--- /dev/null
+++ b/openpype/hosts/houdini/plugins/publish/extract_usd_layered.py
@@ -0,0 +1,315 @@
+import os
+import contextlib
+import hou
+import sys
+from collections import deque
+
+import pyblish.api
+import openpype.api
+
+import openpype.hosts.houdini.api.usd as hou_usdlib
+from openpype.hosts.houdini.api.lib import render_rop
+
+
+class ExitStack(object):
+ """Context manager for dynamic management of a stack of exit callbacks.
+
+ For example:
+
+ with ExitStack() as stack:
+ files = [stack.enter_context(open(fname)) for fname in filenames]
+ # All opened files will automatically be closed at the end of
+ # the with statement, even if attempts to open files later
+ # in the list raise an exception
+
+ """
+
+ def __init__(self):
+ self._exit_callbacks = deque()
+
+ def pop_all(self):
+ """Preserve the context stack by transferring it to a new instance"""
+ new_stack = type(self)()
+ new_stack._exit_callbacks = self._exit_callbacks
+ self._exit_callbacks = deque()
+ return new_stack
+
+ def _push_cm_exit(self, cm, cm_exit):
+ """Helper to correctly register callbacks to __exit__ methods"""
+
+ def _exit_wrapper(*exc_details):
+ return cm_exit(cm, *exc_details)
+
+ _exit_wrapper.__self__ = cm
+ self.push(_exit_wrapper)
+
+ def push(self, exit):
+ """Registers a callback with the standard __exit__ method signature.
+
+ Can suppress exceptions the same way __exit__ methods can.
+
+ Also accepts any object with an __exit__ method (registering a call
+ to the method instead of the object itself)
+
+ """
+ # We use an unbound method rather than a bound method to follow
+ # the standard lookup behaviour for special methods
+ _cb_type = type(exit)
+ try:
+ exit_method = _cb_type.__exit__
+ except AttributeError:
+ # Not a context manager, so assume its a callable
+ self._exit_callbacks.append(exit)
+ else:
+ self._push_cm_exit(exit, exit_method)
+ return exit # Allow use as a decorator
+
+ def callback(self, callback, *args, **kwds):
+ """Registers an arbitrary callback and arguments.
+
+ Cannot suppress exceptions.
+ """
+
+ def _exit_wrapper(exc_type, exc, tb):
+ callback(*args, **kwds)
+
+ # We changed the signature, so using @wraps is not appropriate, but
+ # setting __wrapped__ may still help with introspection
+ _exit_wrapper.__wrapped__ = callback
+ self.push(_exit_wrapper)
+ return callback # Allow use as a decorator
+
+ def enter_context(self, cm):
+ """Enters the supplied context manager
+
+ If successful, also pushes its __exit__ method as a callback and
+ returns the result of the __enter__ method.
+ """
+ # We look up the special methods on the type to match the with
+ # statement
+ _cm_type = type(cm)
+ _exit = _cm_type.__exit__
+ result = _cm_type.__enter__(cm)
+ self._push_cm_exit(cm, _exit)
+ return result
+
+ def close(self):
+ """Immediately unwind the context stack"""
+ self.__exit__(None, None, None)
+
+ def __enter__(self):
+ return self
+
+ def __exit__(self, *exc_details):
+ # We manipulate the exception state so it behaves as though
+ # we were actually nesting multiple with statements
+ frame_exc = sys.exc_info()[1]
+
+ def _fix_exception_context(new_exc, old_exc):
+ while 1:
+ exc_context = new_exc.__context__
+ if exc_context in (None, frame_exc):
+ break
+ new_exc = exc_context
+ new_exc.__context__ = old_exc
+
+ # Callbacks are invoked in LIFO order to match the behaviour of
+ # nested context managers
+ suppressed_exc = False
+ while self._exit_callbacks:
+ cb = self._exit_callbacks.pop()
+ try:
+ if cb(*exc_details):
+ suppressed_exc = True
+ exc_details = (None, None, None)
+ except Exception:
+ new_exc_details = sys.exc_info()
+ # simulate the stack of exceptions by setting the context
+ _fix_exception_context(new_exc_details[1], exc_details[1])
+ if not self._exit_callbacks:
+ raise
+ exc_details = new_exc_details
+ return suppressed_exc
+
+
+@contextlib.contextmanager
+def parm_values(overrides):
+ """Override Parameter values during the context."""
+
+ originals = []
+ try:
+ for parm, value in overrides:
+ originals.append((parm, parm.eval()))
+ parm.set(value)
+ yield
+ finally:
+ for parm, value in originals:
+ # Parameter might not exist anymore so first
+ # check whether it's still valid
+ if hou.parm(parm.path()):
+ parm.set(value)
+
+
+class ExtractUSDLayered(openpype.api.Extractor):
+
+ order = pyblish.api.ExtractorOrder
+ label = "Extract Layered USD"
+ hosts = ["houdini"]
+ families = ["usdLayered", "usdShade"]
+
+ # Force Output Processors so it will always save any file
+ # into our unique staging directory with processed Avalon paths
+ output_processors = ["avalon_uri_processor", "stagingdir_processor"]
+
+ def process(self, instance):
+
+ self.log.info("Extracting: %s" % instance)
+
+ staging_dir = self.staging_dir(instance)
+ fname = instance.data.get("usdFilename")
+
+ # The individual rop nodes are collected as "publishDependencies"
+ dependencies = instance.data["publishDependencies"]
+ ropnodes = [dependency[0] for dependency in dependencies]
+ assert all(
+ node.type().name() in {"usd", "usd_rop"} for node in ropnodes
+ )
+
+ # Main ROP node, either a USD Rop or ROP network with
+ # multiple USD ROPs
+ node = instance[0]
+
+ # Collect any output dependencies that have not been processed yet
+ # during extraction of other instances
+ outputs = [fname]
+ active_dependencies = [
+ dep
+ for dep in dependencies
+ if dep.data.get("publish", True)
+ and not dep.data.get("_isExtracted", False)
+ ]
+ for dependency in active_dependencies:
+ outputs.append(dependency.data["usdFilename"])
+
+ pattern = r"*[/\]{0} {0}"
+ save_pattern = " ".join(pattern.format(fname) for fname in outputs)
+
+ # Run a stack of context managers before we start the render to
+ # temporarily adjust USD ROP settings for our publish output.
+ rop_overrides = {
+ # This sets staging directory on the processor to force our
+ # output files to end up in the Staging Directory.
+ "stagingdiroutputprocessor_stagingDir": staging_dir,
+ # Force the Avalon URI Output Processor to refactor paths for
+ # references, payloads and layers to published paths.
+ "avalonurioutputprocessor_use_publish_paths": True,
+ # Only write out specific USD files based on our outputs
+ "savepattern": save_pattern,
+ }
+ overrides = list()
+ with ExitStack() as stack:
+
+ for ropnode in ropnodes:
+ manager = hou_usdlib.outputprocessors(
+ ropnode,
+ processors=self.output_processors,
+ disable_all_others=True,
+ )
+ stack.enter_context(manager)
+
+ # Some of these must be added after we enter the output
+ # processor context manager because those parameters only
+ # exist when the Output Processor is added to the ROP node.
+ for name, value in rop_overrides.items():
+ parm = ropnode.parm(name)
+ assert parm, "Parm not found: %s.%s" % (
+ ropnode.path(),
+ name,
+ )
+ overrides.append((parm, value))
+
+ stack.enter_context(parm_values(overrides))
+
+ # Render the single ROP node or the full ROP network
+ render_rop(node)
+
+ # Assert all output files in the Staging Directory
+ for output_fname in outputs:
+ path = os.path.join(staging_dir, output_fname)
+ assert os.path.exists(path), "Output file must exist: %s" % path
+
+ # Set up the dependency for publish if they have new content
+ # compared to previous publishes
+ for dependency in active_dependencies:
+ dependency_fname = dependency.data["usdFilename"]
+
+ filepath = os.path.join(staging_dir, dependency_fname)
+ similar = self._compare_with_latest_publish(dependency, filepath)
+ if similar:
+ # Deactivate this dependency
+ self.log.debug(
+ "Dependency matches previous publish version,"
+ " deactivating %s for publish" % dependency
+ )
+ dependency.data["publish"] = False
+ else:
+ self.log.debug("Extracted dependency: %s" % dependency)
+ # This dependency should be published
+ dependency.data["files"] = [dependency_fname]
+ dependency.data["stagingDir"] = staging_dir
+ dependency.data["_isExtracted"] = True
+
+ # Store the created files on the instance
+ if "files" not in instance.data:
+ instance.data["files"] = []
+ instance.data["files"].append(fname)
+
+ def _compare_with_latest_publish(self, dependency, new_file):
+
+ from avalon import api, io
+ import filecmp
+
+ _, ext = os.path.splitext(new_file)
+
+ # Compare this dependency with the latest published version
+ # to detect whether we should make this into a new publish
+ # version. If not, skip it.
+ asset = io.find_one(
+ {"name": dependency.data["asset"], "type": "asset"}
+ )
+ subset = io.find_one(
+ {
+ "name": dependency.data["subset"],
+ "type": "subset",
+ "parent": asset["_id"],
+ }
+ )
+ if not subset:
+ # Subset doesn't exist yet. Definitely new file
+ self.log.debug("No existing subset..")
+ return False
+
+ version = io.find_one(
+ {"type": "version", "parent": subset["_id"], },
+ sort=[("name", -1)]
+ )
+ if not version:
+ self.log.debug("No existing version..")
+ return False
+
+ representation = io.find_one(
+ {
+ "name": ext.lstrip("."),
+ "type": "representation",
+ "parent": version["_id"],
+ }
+ )
+ if not representation:
+ self.log.debug("No existing representation..")
+ return False
+
+ old_file = api.get_representation_path(representation)
+ if not os.path.exists(old_file):
+ return False
+
+ return filecmp.cmp(old_file, new_file)
diff --git a/openpype/hosts/houdini/plugins/publish/extract_vdb_cache.py b/openpype/hosts/houdini/plugins/publish/extract_vdb_cache.py
index f480fe6236..78794acc97 100644
--- a/openpype/hosts/houdini/plugins/publish/extract_vdb_cache.py
+++ b/openpype/hosts/houdini/plugins/publish/extract_vdb_cache.py
@@ -2,6 +2,7 @@ import os
import pyblish.api
import openpype.api
+from openpype.hosts.houdini.api.lib import render_rop
class ExtractVDBCache(openpype.api.Extractor):
@@ -13,8 +14,6 @@ class ExtractVDBCache(openpype.api.Extractor):
def process(self, instance):
- import hou
-
ropnode = instance[0]
# Get the filename from the filename parameter
@@ -25,15 +24,8 @@ class ExtractVDBCache(openpype.api.Extractor):
file_name = os.path.basename(sop_output)
self.log.info("Writing VDB '%s' to '%s'" % (file_name, staging_dir))
- try:
- ropnode.render()
- except hou.Error as exc:
- # The hou.Error is not inherited from a Python Exception class,
- # so we explicitly capture the houdini error, otherwise pyblish
- # will remain hanging.
- import traceback
- traceback.print_exc()
- raise RuntimeError("Render failed: {0}".format(exc))
+
+ render_rop(ropnode)
output = instance.data["frames"]
@@ -41,9 +33,9 @@ class ExtractVDBCache(openpype.api.Extractor):
instance.data["representations"] = []
representation = {
- 'name': 'mov',
- 'ext': 'mov',
- 'files': output,
+ "name": "vdb",
+ "ext": "vdb",
+ "files": output,
"stagingDir": staging_dir,
}
instance.data["representations"].append(representation)
diff --git a/openpype/hosts/houdini/plugins/publish/increment_current_file.py b/openpype/hosts/houdini/plugins/publish/increment_current_file.py
new file mode 100644
index 0000000000..31c2954ee7
--- /dev/null
+++ b/openpype/hosts/houdini/plugins/publish/increment_current_file.py
@@ -0,0 +1,51 @@
+import pyblish.api
+import avalon.api
+
+from openpype.api import version_up
+from openpype.action import get_errored_plugins_from_data
+
+
+class IncrementCurrentFile(pyblish.api.InstancePlugin):
+ """Increment the current file.
+
+ Saves the current scene with an increased version number.
+
+ """
+
+ label = "Increment current file"
+ order = pyblish.api.IntegratorOrder + 9.0
+ hosts = ["houdini"]
+ families = ["colorbleed.usdrender", "redshift_rop"]
+ targets = ["local"]
+
+ def process(self, instance):
+
+ # This should be a ContextPlugin, but this is a workaround
+ # for a bug in pyblish to run once for a family: issue #250
+ context = instance.context
+ key = "__hasRun{}".format(self.__class__.__name__)
+ if context.data.get(key, False):
+ return
+ else:
+ context.data[key] = True
+
+ context = instance.context
+ errored_plugins = get_errored_plugins_from_data(context)
+ if any(
+ plugin.__name__ == "HoudiniSubmitPublishDeadline"
+ for plugin in errored_plugins
+ ):
+ raise RuntimeError(
+ "Skipping incrementing current file because "
+ "submission to deadline failed."
+ )
+
+ # Filename must not have changed since collecting
+ host = avalon.api.registered_host()
+ current_file = host.current_file()
+ assert (
+ context.data["currentFile"] == current_file
+ ), "Collected filename from current scene name."
+
+ new_filepath = version_up(current_file)
+ host.save(new_filepath)
diff --git a/openpype/hosts/houdini/plugins/publish/increment_current_file_deadline.py b/openpype/hosts/houdini/plugins/publish/increment_current_file_deadline.py
new file mode 100644
index 0000000000..faa015f739
--- /dev/null
+++ b/openpype/hosts/houdini/plugins/publish/increment_current_file_deadline.py
@@ -0,0 +1,35 @@
+import pyblish.api
+
+import hou
+from openpype.api import version_up
+from openpype.action import get_errored_plugins_from_data
+
+
+class IncrementCurrentFileDeadline(pyblish.api.ContextPlugin):
+ """Increment the current file.
+
+ Saves the current scene with an increased version number.
+
+ """
+
+ label = "Increment current file"
+ order = pyblish.api.IntegratorOrder + 9.0
+ hosts = ["houdini"]
+ targets = ["deadline"]
+
+ def process(self, context):
+
+ errored_plugins = get_errored_plugins_from_data(context)
+ if any(
+ plugin.__name__ == "HoudiniSubmitPublishDeadline"
+ for plugin in errored_plugins
+ ):
+ raise RuntimeError(
+ "Skipping incrementing current file because "
+ "submission to deadline failed."
+ )
+
+ current_filepath = context.data["currentFile"]
+ new_filepath = version_up(current_filepath)
+
+ hou.hipFile.save(file_name=new_filepath, save_to_recent_files=True)
diff --git a/openpype/hosts/houdini/plugins/publish/save_scene.py b/openpype/hosts/houdini/plugins/publish/save_scene.py
new file mode 100644
index 0000000000..1b12efa603
--- /dev/null
+++ b/openpype/hosts/houdini/plugins/publish/save_scene.py
@@ -0,0 +1,37 @@
+import pyblish.api
+import avalon.api
+
+
+class SaveCurrentScene(pyblish.api.InstancePlugin):
+ """Save current scene"""
+
+ label = "Save current file"
+ order = pyblish.api.IntegratorOrder - 0.49
+ hosts = ["houdini"]
+ families = ["usdrender",
+ "redshift_rop"]
+ targets = ["local"]
+
+ def process(self, instance):
+
+ # This should be a ContextPlugin, but this is a workaround
+ # for a bug in pyblish to run once for a family: issue #250
+ context = instance.context
+ key = "__hasRun{}".format(self.__class__.__name__)
+ if context.data.get(key, False):
+ return
+ else:
+ context.data[key] = True
+
+ # Filename must not have changed since collecting
+ host = avalon.api.registered_host()
+ current_file = host.current_file()
+ assert context.data['currentFile'] == current_file, (
+ "Collected filename from current scene name."
+ )
+
+ if host.has_unsaved_changes():
+ self.log.info("Saving current file..")
+ host.save_file(current_file)
+ else:
+ self.log.debug("No unsaved changes, skipping file save..")
diff --git a/openpype/hosts/houdini/plugins/publish/save_scene_deadline.py b/openpype/hosts/houdini/plugins/publish/save_scene_deadline.py
new file mode 100644
index 0000000000..a0efd0610c
--- /dev/null
+++ b/openpype/hosts/houdini/plugins/publish/save_scene_deadline.py
@@ -0,0 +1,23 @@
+import pyblish.api
+
+
+class SaveCurrentSceneDeadline(pyblish.api.ContextPlugin):
+ """Save current scene"""
+
+ label = "Save current file"
+ order = pyblish.api.IntegratorOrder - 0.49
+ hosts = ["houdini"]
+ targets = ["deadline"]
+
+ def process(self, context):
+ import hou
+
+ assert (
+ context.data["currentFile"] == hou.hipFile.path()
+ ), "Collected filename from current scene name."
+
+ if hou.hipFile.hasUnsavedChanges():
+ self.log.info("Saving current file..")
+ hou.hipFile.save(save_to_recent_files=True)
+ else:
+ self.log.debug("No unsaved changes, skipping file save..")
diff --git a/openpype/hosts/houdini/plugins/publish/valiate_vdb_input_node.py b/openpype/hosts/houdini/plugins/publish/valiate_vdb_input_node.py
index 7b23d73ac7..0ae1bc94eb 100644
--- a/openpype/hosts/houdini/plugins/publish/valiate_vdb_input_node.py
+++ b/openpype/hosts/houdini/plugins/publish/valiate_vdb_input_node.py
@@ -3,7 +3,7 @@ import openpype.api
class ValidateVDBInputNode(pyblish.api.InstancePlugin):
- """Validate that the node connected to the output node is of type VDB
+ """Validate that the node connected to the output node is of type VDB.
Regardless of the amount of VDBs create the output will need to have an
equal amount of VDBs, points, primitives and vertices
@@ -24,8 +24,9 @@ class ValidateVDBInputNode(pyblish.api.InstancePlugin):
def process(self, instance):
invalid = self.get_invalid(instance)
if invalid:
- raise RuntimeError("Node connected to the output node is not"
- "of type VDB!")
+ raise RuntimeError(
+ "Node connected to the output node is not" "of type VDB!"
+ )
@classmethod
def get_invalid(cls, instance):
diff --git a/openpype/hosts/houdini/plugins/publish/validate_abc_primitive_to_detail.py b/openpype/hosts/houdini/plugins/publish/validate_abc_primitive_to_detail.py
new file mode 100644
index 0000000000..8fe1b44b7a
--- /dev/null
+++ b/openpype/hosts/houdini/plugins/publish/validate_abc_primitive_to_detail.py
@@ -0,0 +1,132 @@
+import pyblish.api
+import openpype.api
+
+from collections import defaultdict
+
+
+class ValidateAbcPrimitiveToDetail(pyblish.api.InstancePlugin):
+ """Validate Alembic ROP Primitive to Detail attribute is consistent.
+
+ The Alembic ROP crashes Houdini whenever an attribute in the "Primitive to
+ Detail" parameter exists on only a part of the primitives that belong to
+ the same hierarchy path. Whenever it encounters inconsistent values,
+ specifically where some are empty as opposed to others then Houdini
+ crashes. (Tested in Houdini 17.5.229)
+
+ """
+
+ order = openpype.api.ValidateContentsOrder + 0.1
+ families = ["pointcache"]
+ hosts = ["houdini"]
+ label = "Validate Primitive to Detail (Abc)"
+
+ def process(self, instance):
+ invalid = self.get_invalid(instance)
+ if invalid:
+ raise RuntimeError(
+ "Primitives found with inconsistent primitive "
+ "to detail attributes. See log."
+ )
+
+ @classmethod
+ def get_invalid(cls, instance):
+
+ output = instance.data["output_node"]
+
+ rop = instance[0]
+ pattern = rop.parm("prim_to_detail_pattern").eval().strip()
+ if not pattern:
+ cls.log.debug(
+ "Alembic ROP has no 'Primitive to Detail' pattern. "
+ "Validation is ignored.."
+ )
+ return
+
+ build_from_path = rop.parm("build_from_path").eval()
+ if not build_from_path:
+ cls.log.debug(
+ "Alembic ROP has 'Build from Path' disabled. "
+ "Validation is ignored.."
+ )
+ return
+
+ path_attr = rop.parm("path_attrib").eval()
+ if not path_attr:
+ cls.log.error(
+ "The Alembic ROP node has no Path Attribute"
+ "value set, but 'Build Hierarchy from Attribute'"
+ "is enabled."
+ )
+ return [rop.path()]
+
+ # Let's assume each attribute is explicitly named for now and has no
+ # wildcards for Primitive to Detail. This simplifies the check.
+ cls.log.debug("Checking Primitive to Detail pattern: %s" % pattern)
+ cls.log.debug("Checking with path attribute: %s" % path_attr)
+
+ # Check if the primitive attribute exists
+ frame = instance.data.get("startFrame", 0)
+ geo = output.geometryAtFrame(frame)
+
+ # If there are no primitives on the start frame then it might be
+ # something that is emitted over time. As such we can't actually
+ # validate whether the attributes exist, because they won't exist
+ # yet. In that case, just warn the user and allow it.
+ if len(geo.iterPrims()) == 0:
+ cls.log.warning(
+ "No primitives found on current frame. Validation"
+ " for Primitive to Detail will be skipped."
+ )
+ return
+
+ attrib = geo.findPrimAttrib(path_attr)
+ if not attrib:
+ cls.log.info(
+ "Geometry Primitives are missing "
+ "path attribute: `%s`" % path_attr
+ )
+ return [output.path()]
+
+ # Ensure at least a single string value is present
+ if not attrib.strings():
+ cls.log.info(
+ "Primitive path attribute has no "
+ "string values: %s" % path_attr
+ )
+ return [output.path()]
+
+ paths = None
+ for attr in pattern.split(" "):
+ if not attr.strip():
+ # Ignore empty values
+ continue
+
+ # Check if the primitive attribute exists
+ attrib = geo.findPrimAttrib(attr)
+ if not attrib:
+ # It is allowed to not have the attribute at all
+ continue
+
+ # The issue can only happen if at least one string attribute is
+ # present. So we ignore cases with no values whatsoever.
+ if not attrib.strings():
+ continue
+
+ check = defaultdict(set)
+ values = geo.primStringAttribValues(attr)
+ if paths is None:
+ paths = geo.primStringAttribValues(path_attr)
+
+ for path, value in zip(paths, values):
+ check[path].add(value)
+
+ for path, values in check.items():
+ # Whenever a single path has multiple values for the
+ # Primitive to Detail attribute then we consider it
+ # inconsistent and invalidate the ROP node's content.
+ if len(values) > 1:
+ cls.log.warning(
+ "Path has multiple values: %s (path: %s)"
+ % (list(values), path)
+ )
+ return [output.path()]
diff --git a/openpype/hosts/houdini/plugins/publish/validate_alembic_face_sets.py b/openpype/hosts/houdini/plugins/publish/validate_alembic_face_sets.py
new file mode 100644
index 0000000000..e9126ffef0
--- /dev/null
+++ b/openpype/hosts/houdini/plugins/publish/validate_alembic_face_sets.py
@@ -0,0 +1,37 @@
+import pyblish.api
+import openpype.api
+
+
+class ValidateAlembicROPFaceSets(pyblish.api.InstancePlugin):
+ """Validate Face Sets are disabled for extraction to pointcache.
+
+ When groups are saved as Face Sets with the Alembic these show up
+ as shadingEngine connections in Maya - however, with animated groups
+ these connections in Maya won't work as expected, it won't update per
+ frame. Additionally, it can break shader assignments in some cases
+ where it requires to first break this connection to allow a shader to
+ be assigned.
+
+ It is allowed to include Face Sets, so only an issue is logged to
+ identify that it could introduce issues down the pipeline.
+
+ """
+
+ order = openpype.api.ValidateContentsOrder + 0.1
+ families = ["pointcache"]
+ hosts = ["houdini"]
+ label = "Validate Alembic ROP Face Sets"
+
+ def process(self, instance):
+
+ rop = instance[0]
+ facesets = rop.parm("facesets").eval()
+
+ # 0 = No Face Sets
+ # 1 = Save Non-Empty Groups as Face Sets
+ # 2 = Save All Groups As Face Sets
+ if facesets != 0:
+ self.log.warning(
+ "Alembic ROP saves 'Face Sets' for Geometry. "
+ "Are you sure you want this?"
+ )
diff --git a/openpype/hosts/houdini/plugins/publish/validate_alembic_input_node.py b/openpype/hosts/houdini/plugins/publish/validate_alembic_input_node.py
index e8596b739d..17c9da837a 100644
--- a/openpype/hosts/houdini/plugins/publish/validate_alembic_input_node.py
+++ b/openpype/hosts/houdini/plugins/publish/validate_alembic_input_node.py
@@ -1,9 +1,9 @@
import pyblish.api
-import openpype.api
+import colorbleed.api
class ValidateAlembicInputNode(pyblish.api.InstancePlugin):
- """Validate that the node connected to the output is correct
+ """Validate that the node connected to the output is correct.
The connected node cannot be of the following types for Alembic:
- VDB
@@ -11,7 +11,7 @@ class ValidateAlembicInputNode(pyblish.api.InstancePlugin):
"""
- order = openpype.api.ValidateContentsOrder + 0.1
+ order = colorbleed.api.ValidateContentsOrder + 0.1
families = ["pointcache"]
hosts = ["houdini"]
label = "Validate Input Node (Abc)"
@@ -19,19 +19,35 @@ class ValidateAlembicInputNode(pyblish.api.InstancePlugin):
def process(self, instance):
invalid = self.get_invalid(instance)
if invalid:
- raise RuntimeError("Node connected to the output node incorrect")
+ raise RuntimeError(
+ "Primitive types found that are not supported"
+ "for Alembic output."
+ )
@classmethod
def get_invalid(cls, instance):
- invalid_nodes = ["VDB", "Volume"]
+ invalid_prim_types = ["VDB", "Volume"]
node = instance.data["output_node"]
- prims = node.geometry().prims()
+ if not hasattr(node, "geometry"):
+ # In the case someone has explicitly set an Object
+ # node instead of a SOP node in Geometry context
+ # then for now we ignore - this allows us to also
+ # export object transforms.
+ cls.log.warning("No geometry output node found, skipping check..")
+ return
- for prim in prims:
- prim_type = prim.type().name()
- if prim_type in invalid_nodes:
- cls.log.error("Found a primitive which is of type '%s' !"
- % prim_type)
- return [instance]
+ frame = instance.data.get("startFrame", 0)
+ geo = node.geometryAtFrame(frame)
+
+ invalid = False
+ for prim_type in invalid_prim_types:
+ if geo.countPrimType(prim_type) > 0:
+ cls.log.error(
+ "Found a primitive which is of type '%s' !" % prim_type
+ )
+ invalid = True
+
+ if invalid:
+ return [instance]
diff --git a/openpype/hosts/houdini/plugins/publish/validate_animation_settings.py b/openpype/hosts/houdini/plugins/publish/validate_animation_settings.py
index a42c3696da..5eb8f93d03 100644
--- a/openpype/hosts/houdini/plugins/publish/validate_animation_settings.py
+++ b/openpype/hosts/houdini/plugins/publish/validate_animation_settings.py
@@ -29,8 +29,9 @@ class ValidateAnimationSettings(pyblish.api.InstancePlugin):
invalid = self.get_invalid(instance)
if invalid:
- raise RuntimeError("Output settings do no match for '%s'" %
- instance)
+ raise RuntimeError(
+ "Output settings do no match for '%s'" % instance
+ )
@classmethod
def get_invalid(cls, instance):
diff --git a/openpype/hosts/houdini/plugins/publish/validate_bypass.py b/openpype/hosts/houdini/plugins/publish/validate_bypass.py
index 9118ae0e8c..79c67c3008 100644
--- a/openpype/hosts/houdini/plugins/publish/validate_bypass.py
+++ b/openpype/hosts/houdini/plugins/publish/validate_bypass.py
@@ -18,12 +18,17 @@ class ValidateBypassed(pyblish.api.InstancePlugin):
def process(self, instance):
+ if len(instance) == 0:
+ # Ignore instances without any nodes
+ # e.g. in memory bootstrap instances
+ return
+
invalid = self.get_invalid(instance)
if invalid:
rop = invalid[0]
raise RuntimeError(
- "ROP node %s is set to bypass, publishing cannot continue.." %
- rop.path()
+ "ROP node %s is set to bypass, publishing cannot continue.."
+ % rop.path()
)
@classmethod
diff --git a/openpype/hosts/houdini/plugins/publish/validate_camera_rop.py b/openpype/hosts/houdini/plugins/publish/validate_camera_rop.py
index ca75579267..a0919e1323 100644
--- a/openpype/hosts/houdini/plugins/publish/validate_camera_rop.py
+++ b/openpype/hosts/houdini/plugins/publish/validate_camera_rop.py
@@ -6,9 +6,9 @@ class ValidateCameraROP(pyblish.api.InstancePlugin):
"""Validate Camera ROP settings."""
order = openpype.api.ValidateContentsOrder
- families = ['camera']
- hosts = ['houdini']
- label = 'Camera ROP'
+ families = ["camera"]
+ hosts = ["houdini"]
+ label = "Camera ROP"
def process(self, instance):
@@ -16,8 +16,10 @@ class ValidateCameraROP(pyblish.api.InstancePlugin):
node = instance[0]
if node.parm("use_sop_path").eval():
- raise RuntimeError("Alembic ROP for Camera export should not be "
- "set to 'Use Sop Path'. Please disable.")
+ raise RuntimeError(
+ "Alembic ROP for Camera export should not be "
+ "set to 'Use Sop Path'. Please disable."
+ )
# Get the root and objects parameter of the Alembic ROP node
root = node.parm("root").eval()
@@ -34,8 +36,8 @@ class ValidateCameraROP(pyblish.api.InstancePlugin):
if not camera:
raise ValueError("Camera path does not exist: %s" % path)
- if not camera.type().name() == "cam":
- raise ValueError("Object set in Alembic ROP is not a camera: "
- "%s (type: %s)" % (camera, camera.type().name()))
-
-
+ if camera.type().name() != "cam":
+ raise ValueError(
+ "Object set in Alembic ROP is not a camera: "
+ "%s (type: %s)" % (camera, camera.type().name())
+ )
diff --git a/openpype/hosts/houdini/plugins/publish/validate_cop_output_node.py b/openpype/hosts/houdini/plugins/publish/validate_cop_output_node.py
new file mode 100644
index 0000000000..543539ffe3
--- /dev/null
+++ b/openpype/hosts/houdini/plugins/publish/validate_cop_output_node.py
@@ -0,0 +1,60 @@
+import pyblish.api
+
+
+class ValidateCopOutputNode(pyblish.api.InstancePlugin):
+ """Validate the instance COP Output Node.
+
+ This will ensure:
+ - The COP Path is set.
+ - The COP Path refers to an existing object.
+ - The COP Path node is a COP node.
+
+ """
+
+ order = pyblish.api.ValidatorOrder
+ families = ["imagesequence"]
+ hosts = ["houdini"]
+ label = "Validate COP Output Node"
+
+ def process(self, instance):
+
+ invalid = self.get_invalid(instance)
+ if invalid:
+ raise RuntimeError(
+ "Output node(s) `%s` are incorrect. "
+ "See plug-in log for details." % invalid
+ )
+
+ @classmethod
+ def get_invalid(cls, instance):
+
+ import hou
+
+ output_node = instance.data["output_node"]
+
+ if output_node is None:
+ node = instance[0]
+ cls.log.error(
+ "COP Output node in '%s' does not exist. "
+ "Ensure a valid COP output path is set." % node.path()
+ )
+
+ return [node.path()]
+
+ # Output node must be a Sop node.
+ if not isinstance(output_node, hou.CopNode):
+ cls.log.error(
+ "Output node %s is not a COP node. "
+ "COP Path must point to a COP node, "
+ "instead found category type: %s"
+ % (output_node.path(), output_node.type().category().name())
+ )
+ return [output_node.path()]
+
+ # For the sake of completeness also assert the category type
+ # is Cop2 to avoid potential edge case scenarios even though
+ # the isinstance check above should be stricter than this category
+ assert output_node.type().category().name() == "Cop2", (
+ "Output node %s is not of category Cop2. This is a bug.."
+ % output_node.path()
+ )
diff --git a/openpype/hosts/houdini/plugins/publish/validate_file_extension.py b/openpype/hosts/houdini/plugins/publish/validate_file_extension.py
new file mode 100644
index 0000000000..b26d28a1e7
--- /dev/null
+++ b/openpype/hosts/houdini/plugins/publish/validate_file_extension.py
@@ -0,0 +1,59 @@
+import os
+import pyblish.api
+
+from openpype.hosts.houdini.api import lib
+
+
+class ValidateFileExtension(pyblish.api.InstancePlugin):
+ """Validate the output file extension fits the output family.
+
+ File extensions:
+ - Pointcache must be .abc
+ - Camera must be .abc
+ - VDB must be .vdb
+
+ """
+
+ order = pyblish.api.ValidatorOrder
+ families = ["pointcache", "camera", "vdbcache"]
+ hosts = ["houdini"]
+ label = "Output File Extension"
+
+ family_extensions = {
+ "pointcache": ".abc",
+ "camera": ".abc",
+ "vdbcache": ".vdb",
+ }
+
+ def process(self, instance):
+
+ invalid = self.get_invalid(instance)
+ if invalid:
+ raise RuntimeError(
+ "ROP node has incorrect " "file extension: %s" % invalid
+ )
+
+ @classmethod
+ def get_invalid(cls, instance):
+
+ # Get ROP node from instance
+ node = instance[0]
+
+ # Create lookup for current family in instance
+ families = []
+ family = instance.data.get("family", None)
+ if family:
+ families.append(family)
+ families = set(families)
+
+ # Perform extension check
+ output = lib.get_output_parameter(node).eval()
+ _, output_extension = os.path.splitext(output)
+
+ for family in families:
+ extension = cls.family_extensions.get(family, None)
+ if extension is None:
+ raise RuntimeError("Unsupported family: %s" % family)
+
+ if output_extension != extension:
+ return [node.path()]
diff --git a/openpype/hosts/houdini/plugins/publish/validate_frame_token.py b/openpype/hosts/houdini/plugins/publish/validate_frame_token.py
new file mode 100644
index 0000000000..76b5910576
--- /dev/null
+++ b/openpype/hosts/houdini/plugins/publish/validate_frame_token.py
@@ -0,0 +1,51 @@
+import pyblish.api
+
+from openpype.hosts.houdini.api import lib
+
+
+class ValidateFrameToken(pyblish.api.InstancePlugin):
+ """Validate if the unexpanded string contains the frame ('$F') token.
+
+ This validator will *only* check the output parameter of the node if
+ the Valid Frame Range is not set to 'Render Current Frame'
+
+ Rules:
+ If you render out a frame range it is mandatory to have the
+ frame token - '$F4' or similar - to ensure that each frame gets
+ written. If this is not the case you will override the same file
+ every time a frame is written out.
+
+ Examples:
+ Good: 'my_vbd_cache.$F4.vdb'
+ Bad: 'my_vbd_cache.vdb'
+
+ """
+
+ order = pyblish.api.ValidatorOrder
+ label = "Validate Frame Token"
+ families = ["vdbcache"]
+
+ def process(self, instance):
+
+ invalid = self.get_invalid(instance)
+ if invalid:
+ raise RuntimeError(
+ "Output settings do no match for '%s'" % instance
+ )
+
+ @classmethod
+ def get_invalid(cls, instance):
+
+ node = instance[0]
+
+ # Check trange parm, 0 means Render Current Frame
+ frame_range = node.evalParm("trange")
+ if frame_range == 0:
+ return []
+
+ output_parm = lib.get_output_parameter(node)
+ unexpanded_str = output_parm.unexpandedString()
+
+ if "$F" not in unexpanded_str:
+ cls.log.error("No frame token found in '%s'" % node.path())
+ return [instance]
diff --git a/openpype/hosts/houdini/plugins/publish/validate_houdini_license_category.py b/openpype/hosts/houdini/plugins/publish/validate_houdini_license_category.py
new file mode 100644
index 0000000000..f5f03aa844
--- /dev/null
+++ b/openpype/hosts/houdini/plugins/publish/validate_houdini_license_category.py
@@ -0,0 +1,30 @@
+import pyblish.api
+
+
+class ValidateHoudiniCommercialLicense(pyblish.api.InstancePlugin):
+ """Validate the Houdini instance runs a Commercial license.
+
+ When extracting USD files from a non-commercial Houdini license, even with
+ Houdini Indie license, the resulting files will get "scrambled" with
+ a license protection and get a special .usdnc or .usdlc suffix.
+
+ This currently breaks the Subset/representation pipeline so we disallow
+ any publish with those licenses. Only the commercial license is valid.
+
+ """
+
+ order = pyblish.api.ValidatorOrder
+ families = ["usd"]
+ hosts = ["houdini"]
+ label = "Houdini Commercial License"
+
+ def process(self, instance):
+
+ import hou
+
+ license = hou.licenseCategory()
+ if license != hou.licenseCategoryType.Commercial:
+ raise RuntimeError(
+ "USD Publishing requires a full Commercial "
+ "license. You are on: %s" % license
+ )
diff --git a/openpype/hosts/houdini/plugins/publish/validate_mkpaths_toggled.py b/openpype/hosts/houdini/plugins/publish/validate_mkpaths_toggled.py
index a735f4b64b..cd72877949 100644
--- a/openpype/hosts/houdini/plugins/publish/validate_mkpaths_toggled.py
+++ b/openpype/hosts/houdini/plugins/publish/validate_mkpaths_toggled.py
@@ -6,18 +6,18 @@ class ValidateIntermediateDirectoriesChecked(pyblish.api.InstancePlugin):
"""Validate Create Intermediate Directories is enabled on ROP node."""
order = openpype.api.ValidateContentsOrder
- families = ['pointcache',
- 'camera',
- 'vdbcache']
- hosts = ['houdini']
- label = 'Create Intermediate Directories Checked'
+ families = ["pointcache", "camera", "vdbcache"]
+ hosts = ["houdini"]
+ label = "Create Intermediate Directories Checked"
def process(self, instance):
invalid = self.get_invalid(instance)
if invalid:
- raise RuntimeError("Found ROP node with Create Intermediate "
- "Directories turned off: %s" % invalid)
+ raise RuntimeError(
+ "Found ROP node with Create Intermediate "
+ "Directories turned off: %s" % invalid
+ )
@classmethod
def get_invalid(cls, instance):
diff --git a/openpype/hosts/houdini/plugins/publish/validate_no_errors.py b/openpype/hosts/houdini/plugins/publish/validate_no_errors.py
new file mode 100644
index 0000000000..f58e5f8d7d
--- /dev/null
+++ b/openpype/hosts/houdini/plugins/publish/validate_no_errors.py
@@ -0,0 +1,65 @@
+import pyblish.api
+import openpype.api
+import hou
+
+
+def cook_in_range(node, start, end):
+ current = hou.intFrame()
+ if start >= current >= end:
+ # Allow cooking current frame since we're in frame range
+ node.cook(force=False)
+ else:
+ node.cook(force=False, frame_range=(start, start))
+
+
+def get_errors(node):
+ """Get cooking errors.
+
+ If node already has errors check whether it needs to recook
+ If so, then recook first to see if that solves it.
+
+ """
+ if node.errors() and node.needsToCook():
+ node.cook()
+
+ return node.errors()
+
+
+class ValidateNoErrors(pyblish.api.InstancePlugin):
+ """Validate the Instance has no current cooking errors."""
+
+ order = openpype.api.ValidateContentsOrder
+ hosts = ["houdini"]
+ label = "Validate no errors"
+
+ def process(self, instance):
+
+ validate_nodes = []
+
+ if len(instance) > 0:
+ validate_nodes.append(instance[0])
+ output_node = instance.data.get("output_node")
+ if output_node:
+ validate_nodes.append(output_node)
+
+ for node in validate_nodes:
+ self.log.debug("Validating for errors: %s" % node.path())
+ errors = get_errors(node)
+
+ if errors:
+ # If there are current errors, then try an unforced cook
+ # to see whether the error will disappear.
+ self.log.debug(
+ "Recooking to revalidate error "
+ "is up to date for: %s" % node.path()
+ )
+ current_frame = hou.intFrame()
+ start = instance.data.get("frameStart", current_frame)
+ end = instance.data.get("frameEnd", current_frame)
+ cook_in_range(node, start=start, end=end)
+
+ # Check for errors again after the forced recook
+ errors = get_errors(node)
+ if errors:
+ self.log.error(errors)
+ raise RuntimeError("Node has errors: %s" % node.path())
diff --git a/openpype/hosts/houdini/plugins/publish/validate_outnode_exists.py b/openpype/hosts/houdini/plugins/publish/validate_outnode_exists.py
deleted file mode 100644
index bfa2d38f1a..0000000000
--- a/openpype/hosts/houdini/plugins/publish/validate_outnode_exists.py
+++ /dev/null
@@ -1,50 +0,0 @@
-import pyblish.api
-import openpype.api
-
-
-class ValidatOutputNodeExists(pyblish.api.InstancePlugin):
- """Validate if node attribute Create intermediate Directories is turned on
-
- Rules:
- * The node must have Create intermediate Directories turned on to
- ensure the output file will be created
-
- """
-
- order = openpype.api.ValidateContentsOrder
- families = ["*"]
- hosts = ['houdini']
- label = "Output Node Exists"
-
- def process(self, instance):
- invalid = self.get_invalid(instance)
- if invalid:
- raise RuntimeError("Could not find output node(s)!")
-
- @classmethod
- def get_invalid(cls, instance):
-
- import hou
-
- result = set()
-
- node = instance[0]
- if node.type().name() == "alembic":
- soppath_parm = "sop_path"
- else:
- # Fall back to geometry node
- soppath_parm = "soppath"
-
- sop_path = node.parm(soppath_parm).eval()
- output_node = hou.node(sop_path)
-
- if output_node is None:
- cls.log.error("Node at '%s' does not exist" % sop_path)
- result.add(node.path())
-
- # Added cam as this is a legit output type (cameras can't
- if output_node.type().name() not in ["output", "cam"]:
- cls.log.error("SOP Path does not end path at output node")
- result.add(node.path())
-
- return result
diff --git a/openpype/hosts/houdini/plugins/publish/validate_output_node.py b/openpype/hosts/houdini/plugins/publish/validate_output_node.py
index 5e20ee40d6..0b60ab5c48 100644
--- a/openpype/hosts/houdini/plugins/publish/validate_output_node.py
+++ b/openpype/hosts/houdini/plugins/publish/validate_output_node.py
@@ -14,8 +14,7 @@ class ValidateOutputNode(pyblish.api.InstancePlugin):
"""
order = pyblish.api.ValidatorOrder
- families = ["pointcache",
- "vdbcache"]
+ families = ["pointcache", "vdbcache"]
hosts = ["houdini"]
label = "Validate Output Node"
@@ -23,8 +22,10 @@ class ValidateOutputNode(pyblish.api.InstancePlugin):
invalid = self.get_invalid(instance)
if invalid:
- raise RuntimeError("Output node(s) `%s` are incorrect. "
- "See plug-in log for details." % invalid)
+ raise RuntimeError(
+ "Output node(s) `%s` are incorrect. "
+ "See plug-in log for details." % invalid
+ )
@classmethod
def get_invalid(cls, instance):
@@ -35,39 +36,42 @@ class ValidateOutputNode(pyblish.api.InstancePlugin):
if output_node is None:
node = instance[0]
- cls.log.error("SOP Output node in '%s' does not exist. "
- "Ensure a valid SOP output path is set."
- % node.path())
+ cls.log.error(
+ "SOP Output node in '%s' does not exist. "
+ "Ensure a valid SOP output path is set." % node.path()
+ )
return [node.path()]
# Output node must be a Sop node.
if not isinstance(output_node, hou.SopNode):
- cls.log.error("Output node %s is not a SOP node. "
- "SOP Path must point to a SOP node, "
- "instead found category type: %s" % (
- output_node.path(),
- output_node.type().category().name()
- )
- )
+ cls.log.error(
+ "Output node %s is not a SOP node. "
+ "SOP Path must point to a SOP node, "
+ "instead found category type: %s"
+ % (output_node.path(), output_node.type().category().name())
+ )
return [output_node.path()]
# For the sake of completeness also assert the category type
# is Sop to avoid potential edge case scenarios even though
# the isinstance check above should be stricter than this category
assert output_node.type().category().name() == "Sop", (
- "Output node %s is not of category Sop. This is a bug.." %
- output_node.path()
+ "Output node %s is not of category Sop. This is a bug.."
+ % output_node.path()
)
# Check if output node has incoming connections
if not output_node.inputConnections():
- cls.log.error("Output node `%s` has no incoming connections"
- % output_node.path())
+ cls.log.error(
+ "Output node `%s` has no incoming connections"
+ % output_node.path()
+ )
return [output_node.path()]
# Ensure the output node has at least Geometry data
if not output_node.geometry():
- cls.log.error("Output node `%s` has no geometry data."
- % output_node.path())
+ cls.log.error(
+ "Output node `%s` has no geometry data." % output_node.path()
+ )
return [output_node.path()]
diff --git a/openpype/hosts/houdini/plugins/publish/validate_primitive_hierarchy_paths.py b/openpype/hosts/houdini/plugins/publish/validate_primitive_hierarchy_paths.py
index 608e236198..3c15532be8 100644
--- a/openpype/hosts/houdini/plugins/publish/validate_primitive_hierarchy_paths.py
+++ b/openpype/hosts/houdini/plugins/publish/validate_primitive_hierarchy_paths.py
@@ -19,8 +19,9 @@ class ValidatePrimitiveHierarchyPaths(pyblish.api.InstancePlugin):
def process(self, instance):
invalid = self.get_invalid(instance)
if invalid:
- raise RuntimeError("See log for details. "
- "Invalid nodes: {0}".format(invalid))
+ raise RuntimeError(
+ "See log for details. " "Invalid nodes: {0}".format(invalid)
+ )
@classmethod
def get_invalid(cls, instance):
@@ -28,48 +29,68 @@ class ValidatePrimitiveHierarchyPaths(pyblish.api.InstancePlugin):
import hou
output = instance.data["output_node"]
- prims = output.geometry().prims()
rop = instance[0]
build_from_path = rop.parm("build_from_path").eval()
if not build_from_path:
- cls.log.debug("Alembic ROP has 'Build from Path' disabled. "
- "Validation is ignored..")
+ cls.log.debug(
+ "Alembic ROP has 'Build from Path' disabled. "
+ "Validation is ignored.."
+ )
return
path_attr = rop.parm("path_attrib").eval()
if not path_attr:
- cls.log.error("The Alembic ROP node has no Path Attribute"
- "value set, but 'Build Hierarchy from Attribute'"
- "is enabled.")
+ cls.log.error(
+ "The Alembic ROP node has no Path Attribute"
+ "value set, but 'Build Hierarchy from Attribute'"
+ "is enabled."
+ )
return [rop.path()]
cls.log.debug("Checking for attribute: %s" % path_attr)
- missing_attr = []
- invalid_attr = []
- for prim in prims:
+ # Check if the primitive attribute exists
+ frame = instance.data.get("startFrame", 0)
+ geo = output.geometryAtFrame(frame)
- try:
- path = prim.stringAttribValue(path_attr)
- except hou.OperationFailed:
- # Attribute does not exist.
- missing_attr.append(prim)
- continue
+ # If there are no primitives on the current frame then we can't
+ # check whether the path names are correct. So we'll just issue a
+ # warning that the check can't be done consistently and skip
+ # validation.
+ if len(geo.iterPrims()) == 0:
+ cls.log.warning(
+ "No primitives found on current frame. Validation"
+ " for primitive hierarchy paths will be skipped,"
+ " thus can't be validated."
+ )
+ return
- if not path:
- # Empty path value is invalid.
- invalid_attr.append(prim)
- continue
-
- if missing_attr:
- cls.log.info("Prims are missing attribute `%s`" % path_attr)
-
- if invalid_attr:
- cls.log.info("Prims have no value for attribute `%s` "
- "(%s of %s prims)" % (path_attr,
- len(invalid_attr),
- len(prims)))
-
- if missing_attr or invalid_attr:
+ # Check if there are any values for the primitives
+ attrib = geo.findPrimAttrib(path_attr)
+ if not attrib:
+ cls.log.info(
+ "Geometry Primitives are missing "
+ "path attribute: `%s`" % path_attr
+ )
+ return [output.path()]
+
+ # Ensure at least a single string value is present
+ if not attrib.strings():
+ cls.log.info(
+ "Primitive path attribute has no "
+ "string values: %s" % path_attr
+ )
+ return [output.path()]
+
+ paths = geo.primStringAttribValues(path_attr)
+ # Ensure all primitives are set to a valid path
+ # Collect all invalid primitive numbers
+ invalid_prims = [i for i, path in enumerate(paths) if not path]
+ if invalid_prims:
+ num_prims = len(geo.iterPrims()) # faster than len(geo.prims())
+ cls.log.info(
+ "Prims have no value for attribute `%s` "
+ "(%s of %s prims)" % (path_attr, len(invalid_prims), num_prims)
+ )
return [output.path()]
diff --git a/openpype/hosts/houdini/plugins/publish/validate_remote_publish.py b/openpype/hosts/houdini/plugins/publish/validate_remote_publish.py
new file mode 100644
index 0000000000..95c66edff0
--- /dev/null
+++ b/openpype/hosts/houdini/plugins/publish/validate_remote_publish.py
@@ -0,0 +1,43 @@
+import pyblish.api
+import openpype.api
+
+from openpype.hosts.houdini.api import lib
+
+import hou
+
+
+class ValidateRemotePublishOutNode(pyblish.api.ContextPlugin):
+ """Validate the remote publish out node exists for Deadline to trigger."""
+
+ order = pyblish.api.ValidatorOrder - 0.4
+ families = ["*"]
+ hosts = ["houdini"]
+ targets = ["deadline"]
+ label = "Remote Publish ROP node"
+ actions = [openpype.api.RepairContextAction]
+
+ def process(self, context):
+
+ cmd = "import colorbleed.lib; colorbleed.lib.publish_remote()"
+
+ node = hou.node("/out/REMOTE_PUBLISH")
+ if not node:
+ raise RuntimeError("Missing REMOTE_PUBLISH node.")
+
+ # We ensure it's a shell node and that it has the pre-render script
+ # set correctly. Plus the shell script it will trigger should be
+ # completely empty (doing nothing)
+ assert node.type().name() == "shell", "Must be shell ROP node"
+ assert node.parm("command").eval() == "", "Must have no command"
+ assert not node.parm("shellexec").eval(), "Must not execute in shell"
+ assert (
+ node.parm("prerender").eval() == cmd
+ ), "REMOTE_PUBLISH node does not have correct prerender script."
+ assert (
+ node.parm("lprerender").eval() == "python"
+ ), "REMOTE_PUBLISH node prerender script type not set to 'python'"
+
+ @classmethod
+ def repair(cls, context):
+ """(Re)create the node if it fails to pass validation."""
+ lib.create_remote_publish_node(force=True)
diff --git a/openpype/hosts/houdini/plugins/publish/validate_remote_publish_enabled.py b/openpype/hosts/houdini/plugins/publish/validate_remote_publish_enabled.py
new file mode 100644
index 0000000000..b681fd0ee1
--- /dev/null
+++ b/openpype/hosts/houdini/plugins/publish/validate_remote_publish_enabled.py
@@ -0,0 +1,35 @@
+import pyblish.api
+import openpype.api
+
+import hou
+
+
+class ValidateRemotePublishEnabled(pyblish.api.ContextPlugin):
+ """Validate the remote publish node is *not* bypassed."""
+
+ order = pyblish.api.ValidatorOrder - 0.39
+ families = ["*"]
+ hosts = ["houdini"]
+ targets = ["deadline"]
+ label = "Remote Publish ROP enabled"
+ actions = [openpype.api.RepairContextAction]
+
+ def process(self, context):
+
+ node = hou.node("/out/REMOTE_PUBLISH")
+ if not node:
+ raise RuntimeError("Missing REMOTE_PUBLISH node.")
+
+ if node.isBypassed():
+ raise RuntimeError("REMOTE_PUBLISH must not be bypassed.")
+
+ @classmethod
+ def repair(cls, context):
+ """(Re)create the node if it fails to pass validation."""
+
+ node = hou.node("/out/REMOTE_PUBLISH")
+ if not node:
+ raise RuntimeError("Missing REMOTE_PUBLISH node.")
+
+ cls.log.info("Disabling bypass on /out/REMOTE_PUBLISH")
+ node.bypass(False)
diff --git a/openpype/hosts/houdini/plugins/publish/validate_sop_output_node.py b/openpype/hosts/houdini/plugins/publish/validate_sop_output_node.py
new file mode 100644
index 0000000000..a5a07b1b1a
--- /dev/null
+++ b/openpype/hosts/houdini/plugins/publish/validate_sop_output_node.py
@@ -0,0 +1,80 @@
+import pyblish.api
+
+
+class ValidateSopOutputNode(pyblish.api.InstancePlugin):
+ """Validate the instance SOP Output Node.
+
+ This will ensure:
+ - The SOP Path is set.
+ - The SOP Path refers to an existing object.
+ - The SOP Path node is a SOP node.
+ - The SOP Path node has at least one input connection (has an input)
+ - The SOP Path has geometry data.
+
+ """
+
+ order = pyblish.api.ValidatorOrder
+ families = ["pointcache", "vdbcache"]
+ hosts = ["houdini"]
+ label = "Validate Output Node"
+
+ def process(self, instance):
+
+ invalid = self.get_invalid(instance)
+ if invalid:
+ raise RuntimeError(
+ "Output node(s) `%s` are incorrect. "
+ "See plug-in log for details." % invalid
+ )
+
+ @classmethod
+ def get_invalid(cls, instance):
+
+ import hou
+
+ output_node = instance.data["output_node"]
+
+ if output_node is None:
+ node = instance[0]
+ cls.log.error(
+ "SOP Output node in '%s' does not exist. "
+ "Ensure a valid SOP output path is set." % node.path()
+ )
+
+ return [node.path()]
+
+ # Output node must be a Sop node.
+ if not isinstance(output_node, hou.SopNode):
+ cls.log.error(
+ "Output node %s is not a SOP node. "
+ "SOP Path must point to a SOP node, "
+ "instead found category type: %s"
+ % (output_node.path(), output_node.type().category().name())
+ )
+ return [output_node.path()]
+
+ # For the sake of completeness also assert the category type
+ # is Sop to avoid potential edge case scenarios even though
+ # the isinstance check above should be stricter than this category
+ assert output_node.type().category().name() == "Sop", (
+ "Output node %s is not of category Sop. This is a bug.."
+ % output_node.path()
+ )
+
+ # Ensure the node is cooked and succeeds to cook so we can correctly
+ # check for its geometry data.
+ if output_node.needsToCook():
+ cls.log.debug("Cooking node: %s" % output_node.path())
+ try:
+ output_node.cook()
+ except hou.Error as exc:
+ cls.log.error("Cook failed: %s" % exc)
+ cls.log.error(output_node.errors()[0])
+ return [output_node.path()]
+
+ # Ensure the output node has at least Geometry data
+ if not output_node.geometry():
+ cls.log.error(
+ "Output node `%s` has no geometry data." % output_node.path()
+ )
+ return [output_node.path()]
diff --git a/openpype/hosts/houdini/plugins/publish/validate_usd_layer_path_backslashes.py b/openpype/hosts/houdini/plugins/publish/validate_usd_layer_path_backslashes.py
new file mode 100644
index 0000000000..ac0181aed2
--- /dev/null
+++ b/openpype/hosts/houdini/plugins/publish/validate_usd_layer_path_backslashes.py
@@ -0,0 +1,50 @@
+import pyblish.api
+
+import openpype.hosts.houdini.api.usd as hou_usdlib
+
+
+class ValidateUSDLayerPathBackslashes(pyblish.api.InstancePlugin):
+ """Validate USD loaded paths have no backslashes.
+
+ This is a crucial validation for HUSK USD rendering as Houdini's
+ USD Render ROP will fail to write out a .usd file for rendering that
+ correctly preserves the backslashes, e.g. it will incorrectly convert a
+ '\t' to a TAB character disallowing HUSK to find those specific files.
+
+ This validation is redundant for usdModel since that flattens the model
+ before write. As such it will never have any used layers with a path.
+
+ """
+
+ order = pyblish.api.ValidatorOrder
+ families = ["usdSetDress", "usdShade", "usd", "usdrender"]
+ hosts = ["houdini"]
+ label = "USD Layer path backslashes"
+ optional = True
+
+ def process(self, instance):
+
+ rop = instance[0]
+ lop_path = hou_usdlib.get_usd_rop_loppath(rop)
+ stage = lop_path.stage(apply_viewport_overrides=False)
+
+ invalid = []
+ for layer in stage.GetUsedLayers():
+ references = layer.externalReferences
+
+ for ref in references:
+
+ # Ignore anonymous layers
+ if ref.startswith("anon:"):
+ continue
+
+ # If any backslashes in the path consider it invalid
+ if "\\" in ref:
+ self.log.error("Found invalid path: %s" % ref)
+ invalid.append(layer)
+
+ if invalid:
+ raise RuntimeError(
+ "Loaded layers have backslashes. "
+ "This is invalid for HUSK USD rendering."
+ )
diff --git a/openpype/hosts/houdini/plugins/publish/validate_usd_model_and_shade.py b/openpype/hosts/houdini/plugins/publish/validate_usd_model_and_shade.py
new file mode 100644
index 0000000000..2fd2f5eb9f
--- /dev/null
+++ b/openpype/hosts/houdini/plugins/publish/validate_usd_model_and_shade.py
@@ -0,0 +1,76 @@
+import pyblish.api
+
+import openpype.hosts.houdini.api.usd as hou_usdlib
+
+
+from pxr import UsdShade, UsdRender, UsdLux
+
+
+def fullname(o):
+ """Get fully qualified class name"""
+ module = o.__module__
+ if module is None or module == str.__module__:
+ return o.__name__
+ return module + "." + o.__name__
+
+
+class ValidateUsdModel(pyblish.api.InstancePlugin):
+ """Validate USD Model.
+
+ Disallow Shaders, Render settings, products and vars and Lux lights.
+
+ """
+
+ order = pyblish.api.ValidatorOrder
+ families = ["usdModel"]
+ hosts = ["houdini"]
+ label = "Validate USD Model"
+ optional = True
+
+ disallowed = [
+ UsdShade.Shader,
+ UsdRender.Settings,
+ UsdRender.Product,
+ UsdRender.Var,
+ UsdLux.Light,
+ ]
+
+ def process(self, instance):
+
+ rop = instance[0]
+ lop_path = hou_usdlib.get_usd_rop_loppath(rop)
+ stage = lop_path.stage(apply_viewport_overrides=False)
+
+ invalid = []
+ for prim in stage.Traverse():
+
+ for klass in self.disallowed:
+ if klass(prim):
+ # Get full class name without pxr. prefix
+ name = fullname(klass).split("pxr.", 1)[-1]
+ path = str(prim.GetPath())
+ self.log.warning("Disallowed %s: %s" % (name, path))
+
+ invalid.append(prim)
+
+ if invalid:
+ prim_paths = sorted([str(prim.GetPath()) for prim in invalid])
+ raise RuntimeError("Found invalid primitives: %s" % prim_paths)
+
+
+class ValidateUsdShade(ValidateUsdModel):
+ """Validate usdShade.
+
+ Disallow Render settings, products, vars and Lux lights.
+
+ """
+
+ families = ["usdShade"]
+ label = "Validate USD Shade"
+
+ disallowed = [
+ UsdRender.Settings,
+ UsdRender.Product,
+ UsdRender.Var,
+ UsdLux.Light,
+ ]
diff --git a/openpype/hosts/houdini/plugins/publish/validate_usd_output_node.py b/openpype/hosts/houdini/plugins/publish/validate_usd_output_node.py
new file mode 100644
index 0000000000..1f10fafdf4
--- /dev/null
+++ b/openpype/hosts/houdini/plugins/publish/validate_usd_output_node.py
@@ -0,0 +1,52 @@
+import pyblish.api
+
+
+class ValidateUSDOutputNode(pyblish.api.InstancePlugin):
+ """Validate the instance USD LOPs Output Node.
+
+ This will ensure:
+ - The LOP Path is set.
+ - The LOP Path refers to an existing object.
+ - The LOP Path node is a LOP node.
+
+ """
+
+ order = pyblish.api.ValidatorOrder
+ families = ["usd"]
+ hosts = ["houdini"]
+ label = "Validate Output Node (USD)"
+
+ def process(self, instance):
+
+ invalid = self.get_invalid(instance)
+ if invalid:
+ raise RuntimeError(
+ "Output node(s) `%s` are incorrect. "
+ "See plug-in log for details." % invalid
+ )
+
+ @classmethod
+ def get_invalid(cls, instance):
+
+ import hou
+
+ output_node = instance.data["output_node"]
+
+ if output_node is None:
+ node = instance[0]
+ cls.log.error(
+ "USD node '%s' LOP path does not exist. "
+ "Ensure a valid LOP path is set." % node.path()
+ )
+
+ return [node.path()]
+
+ # Output node must be a Sop node.
+ if not isinstance(output_node, hou.LopNode):
+ cls.log.error(
+ "Output node %s is not a LOP node. "
+ "LOP Path must point to a LOP node, "
+ "instead found category type: %s"
+ % (output_node.path(), output_node.type().category().name())
+ )
+ return [output_node.path()]
diff --git a/openpype/hosts/houdini/plugins/publish/validate_usd_render_product_names.py b/openpype/hosts/houdini/plugins/publish/validate_usd_render_product_names.py
new file mode 100644
index 0000000000..36336a03ae
--- /dev/null
+++ b/openpype/hosts/houdini/plugins/publish/validate_usd_render_product_names.py
@@ -0,0 +1,31 @@
+import pyblish.api
+
+import os
+
+
+class ValidateUSDRenderProductNames(pyblish.api.InstancePlugin):
+ """Validate USD Render Product names are correctly set absolute paths."""
+
+ order = pyblish.api.ValidatorOrder
+ families = ["usdrender"]
+ hosts = ["houdini"]
+ label = "Validate USD Render Product Names"
+ optional = True
+
+ def process(self, instance):
+
+ invalid = []
+ for filepath in instance.data["files"]:
+
+ if not filepath:
+ invalid.append("Detected empty output filepath.")
+
+ if not os.path.isabs(filepath):
+ invalid.append(
+ "Output file path is not " "absolute path: %s" % filepath
+ )
+
+ if invalid:
+ for message in invalid:
+ self.log.error(message)
+ raise RuntimeError("USD Render Paths are invalid.")
diff --git a/openpype/hosts/houdini/plugins/publish/validate_usd_setdress.py b/openpype/hosts/houdini/plugins/publish/validate_usd_setdress.py
new file mode 100644
index 0000000000..fb1094e6b5
--- /dev/null
+++ b/openpype/hosts/houdini/plugins/publish/validate_usd_setdress.py
@@ -0,0 +1,54 @@
+import pyblish.api
+
+import openpype.hosts.houdini.api.usd as hou_usdlib
+
+
+class ValidateUsdSetDress(pyblish.api.InstancePlugin):
+ """Validate USD Set Dress.
+
+ Must only have references or payloads. May not generate new mesh or
+ flattened meshes.
+
+ """
+
+ order = pyblish.api.ValidatorOrder
+ families = ["usdSetDress"]
+ hosts = ["houdini"]
+ label = "Validate USD Set Dress"
+ optional = True
+
+ def process(self, instance):
+
+ from pxr import UsdGeom
+
+ rop = instance[0]
+ lop_path = hou_usdlib.get_usd_rop_loppath(rop)
+ stage = lop_path.stage(apply_viewport_overrides=False)
+
+ invalid = []
+ for node in stage.Traverse():
+
+ if UsdGeom.Mesh(node):
+ # This solely checks whether there is any USD involved
+ # in this Prim's Stack and doesn't accurately tell us
+ # whether it was generated locally or not.
+ # TODO: More accurately track whether the Prim was created
+ # in the local scene
+ stack = node.GetPrimStack()
+ for sdf in stack:
+ path = sdf.layer.realPath
+ if path:
+ break
+ else:
+ prim_path = node.GetPath()
+ self.log.error(
+ "%s is not referenced geometry." % prim_path
+ )
+ invalid.append(node)
+
+ if invalid:
+ raise RuntimeError(
+ "SetDress contains local geometry. "
+ "This is not allowed, it must be an assembly "
+ "of referenced assets."
+ )
diff --git a/openpype/hosts/houdini/plugins/publish/validate_usd_shade_model_exists.py b/openpype/hosts/houdini/plugins/publish/validate_usd_shade_model_exists.py
new file mode 100644
index 0000000000..fcfbf6b22d
--- /dev/null
+++ b/openpype/hosts/houdini/plugins/publish/validate_usd_shade_model_exists.py
@@ -0,0 +1,41 @@
+import re
+
+import pyblish.api
+import openpype.api
+
+from avalon import io
+
+
+class ValidateUSDShadeModelExists(pyblish.api.InstancePlugin):
+ """Validate the Instance has no current cooking errors."""
+
+ order = openpype.api.ValidateContentsOrder
+ hosts = ["houdini"]
+ families = ["usdShade"]
+ label = "USD Shade model exists"
+
+ def process(self, instance):
+
+ asset = instance.data["asset"]
+ subset = instance.data["subset"]
+
+ # Assume shading variation starts after a dot separator
+ shade_subset = subset.split(".", 1)[0]
+ model_subset = re.sub("^usdShade", "usdModel", shade_subset)
+
+ asset_doc = io.find_one({"name": asset, "type": "asset"})
+ if not asset_doc:
+ raise RuntimeError("Asset does not exist: %s" % asset)
+
+ subset_doc = io.find_one(
+ {
+ "name": model_subset,
+ "type": "subset",
+ "parent": asset_doc["_id"],
+ }
+ )
+ if not subset_doc:
+ raise RuntimeError(
+ "USD Model subset not found: "
+ "%s (%s)" % (model_subset, asset)
+ )
diff --git a/openpype/hosts/houdini/plugins/publish/validate_usd_shade_workspace.py b/openpype/hosts/houdini/plugins/publish/validate_usd_shade_workspace.py
new file mode 100644
index 0000000000..a77ca2f3cb
--- /dev/null
+++ b/openpype/hosts/houdini/plugins/publish/validate_usd_shade_workspace.py
@@ -0,0 +1,63 @@
+import pyblish.api
+import openpype.api
+
+import hou
+
+
+class ValidateUsdShadeWorkspace(pyblish.api.InstancePlugin):
+ """Validate USD Shading Workspace is correct version.
+
+ There have been some issues with outdated/erroneous Shading Workspaces
+ so this is to confirm everything is set as it should.
+
+ """
+
+ order = openpype.api.ValidateContentsOrder
+ hosts = ["houdini"]
+ families = ["usdShade"]
+ label = "USD Shade Workspace"
+
+ def process(self, instance):
+
+ rop = instance[0]
+ workspace = rop.parent()
+
+ definition = workspace.type().definition()
+ name = definition.nodeType().name()
+ library = definition.libraryFilePath()
+
+ all_definitions = hou.hda.definitionsInFile(library)
+ node_type, version = name.rsplit(":", 1)
+ version = float(version)
+
+ highest = version
+ for other_definition in all_definitions:
+ other_name = other_definition.nodeType().name()
+ other_node_type, other_version = other_name.rsplit(":", 1)
+ other_version = float(other_version)
+
+ if node_type != other_node_type:
+ continue
+
+ # Get highest version
+ highest = max(highest, other_version)
+
+ if version != highest:
+ raise RuntimeError(
+ "Shading Workspace is not the latest version."
+ " Found %s. Latest is %s." % (version, highest)
+ )
+
+ # There were some issues with the editable node not having the right
+ # configured path. So for now let's assure that is correct to.from
+ value = (
+ 'avalon://`chs("../asset_name")`/'
+ 'usdShade`chs("../model_variantname1")`.usd'
+ )
+ rop_value = rop.parm("lopoutput").rawValue()
+ if rop_value != value:
+ raise RuntimeError(
+ "Shading Workspace has invalid 'lopoutput'"
+ " parameter value. The Shading Workspace"
+ " needs to be reset to its default values."
+ )
diff --git a/openpype/hosts/houdini/plugins/publish/validate_vdb_input_node.py b/openpype/hosts/houdini/plugins/publish/validate_vdb_input_node.py
index 7b23d73ac7..0ae1bc94eb 100644
--- a/openpype/hosts/houdini/plugins/publish/validate_vdb_input_node.py
+++ b/openpype/hosts/houdini/plugins/publish/validate_vdb_input_node.py
@@ -3,7 +3,7 @@ import openpype.api
class ValidateVDBInputNode(pyblish.api.InstancePlugin):
- """Validate that the node connected to the output node is of type VDB
+ """Validate that the node connected to the output node is of type VDB.
Regardless of the amount of VDBs create the output will need to have an
equal amount of VDBs, points, primitives and vertices
@@ -24,8 +24,9 @@ class ValidateVDBInputNode(pyblish.api.InstancePlugin):
def process(self, instance):
invalid = self.get_invalid(instance)
if invalid:
- raise RuntimeError("Node connected to the output node is not"
- "of type VDB!")
+ raise RuntimeError(
+ "Node connected to the output node is not" "of type VDB!"
+ )
@classmethod
def get_invalid(cls, instance):
diff --git a/openpype/hosts/houdini/plugins/publish/validate_vdb_output_node.py b/openpype/hosts/houdini/plugins/publish/validate_vdb_output_node.py
new file mode 100644
index 0000000000..1ba840b71d
--- /dev/null
+++ b/openpype/hosts/houdini/plugins/publish/validate_vdb_output_node.py
@@ -0,0 +1,73 @@
+import pyblish.api
+import openpype.api
+import hou
+
+
+class ValidateVDBOutputNode(pyblish.api.InstancePlugin):
+ """Validate that the node connected to the output node is of type VDB.
+
+ Regardless of the amount of VDBs create the output will need to have an
+ equal amount of VDBs, points, primitives and vertices
+
+ A VDB is an inherited type of Prim, holds the following data:
+ - Primitives: 1
+ - Points: 1
+ - Vertices: 1
+ - VDBs: 1
+
+ """
+
+ order = openpype.api.ValidateContentsOrder + 0.1
+ families = ["vdbcache"]
+ hosts = ["houdini"]
+ label = "Validate Output Node (VDB)"
+
+ def process(self, instance):
+ invalid = self.get_invalid(instance)
+ if invalid:
+ raise RuntimeError(
+ "Node connected to the output node is not" " of type VDB!"
+ )
+
+ @classmethod
+ def get_invalid(cls, instance):
+
+ node = instance.data["output_node"]
+ if node is None:
+ cls.log.error(
+ "SOP path is not correctly set on "
+ "ROP node '%s'." % instance[0].path()
+ )
+ return [instance]
+
+ frame = instance.data.get("frameStart", 0)
+ geometry = node.geometryAtFrame(frame)
+ if geometry is None:
+ # No geometry data on this node, maybe the node hasn't cooked?
+ cls.log.error(
+ "SOP node has no geometry data. "
+ "Is it cooked? %s" % node.path()
+ )
+ return [node]
+
+ prims = geometry.prims()
+ nr_of_prims = len(prims)
+
+ # All primitives must be hou.VDB
+ invalid_prim = False
+ for prim in prims:
+ if not isinstance(prim, hou.VDB):
+ cls.log.error("Found non-VDB primitive: %s" % prim)
+ invalid_prim = True
+ if invalid_prim:
+ return [instance]
+
+ nr_of_points = len(geometry.points())
+ if nr_of_points != nr_of_prims:
+ cls.log.error("The number of primitives and points do not match")
+ return [instance]
+
+ for prim in prims:
+ if prim.numVertices() != 1:
+ cls.log.error("Found primitive with more than 1 vertex!")
+ return [instance]
diff --git a/openpype/hosts/houdini/startup/scripts/123.py b/openpype/hosts/houdini/startup/scripts/123.py
index 6d90b8352e..4233d68c15 100644
--- a/openpype/hosts/houdini/startup/scripts/123.py
+++ b/openpype/hosts/houdini/startup/scripts/123.py
@@ -1,5 +1,4 @@
from avalon import api, houdini
-import hou
def main():
diff --git a/openpype/modules/ftrack/python2_vendor/backports.functools_lru_cache/backports/__init__.py b/openpype/hosts/houdini/vendor/husdoutputprocessors/__init__.py
similarity index 100%
rename from openpype/modules/ftrack/python2_vendor/backports.functools_lru_cache/backports/__init__.py
rename to openpype/hosts/houdini/vendor/husdoutputprocessors/__init__.py
diff --git a/openpype/hosts/houdini/vendor/husdoutputprocessors/avalon_uri_processor.py b/openpype/hosts/houdini/vendor/husdoutputprocessors/avalon_uri_processor.py
new file mode 100644
index 0000000000..4071eb3e0c
--- /dev/null
+++ b/openpype/hosts/houdini/vendor/husdoutputprocessors/avalon_uri_processor.py
@@ -0,0 +1,168 @@
+import hou
+import husdoutputprocessors.base as base
+import os
+import re
+import logging
+
+import colorbleed.usdlib as usdlib
+
+
+def _get_project_publish_template():
+ """Return publish template from database for current project"""
+ from avalon import io
+ project = io.find_one({"type": "project"},
+ projection={"config.template.publish": True})
+ return project["config"]["template"]["publish"]
+
+
+class AvalonURIOutputProcessor(base.OutputProcessorBase):
+ """Process Avalon URIs into their full path equivalents.
+
+ """
+
+ _parameters = None
+ _param_prefix = 'avalonurioutputprocessor_'
+ _parms = {
+ "use_publish_paths": _param_prefix + "use_publish_paths"
+ }
+
+ def __init__(self):
+ """ There is only one object of each output processor class that is
+ ever created in a Houdini session. Therefore be very careful
+ about what data gets put in this object.
+ """
+ self._template = None
+ self._use_publish_paths = False
+ self._cache = dict()
+
+ def displayName(self):
+ return 'Avalon URI Output Processor'
+
+ def parameters(self):
+
+ if not self._parameters:
+ parameters = hou.ParmTemplateGroup()
+ use_publish_path = hou.ToggleParmTemplate(
+ name=self._parms["use_publish_paths"],
+ label='Resolve Reference paths to publish paths',
+ default_value=False,
+ help=("When enabled any paths for Layers, References or "
+ "Payloads are resolved to published master versions.\n"
+ "This is usually only used by the publishing pipeline, "
+ "but can be used for testing too."))
+ parameters.append(use_publish_path)
+ self._parameters = parameters.asDialogScript()
+
+ return self._parameters
+
+ def beginSave(self, config_node, t):
+ self._template = _get_project_publish_template()
+
+ parm = self._parms["use_publish_paths"]
+ self._use_publish_paths = config_node.parm(parm).evalAtTime(t)
+ self._cache.clear()
+
+ def endSave(self):
+ self._template = None
+ self._use_publish_paths = None
+ self._cache.clear()
+
+ def processAsset(self,
+ asset_path,
+ asset_path_for_save,
+ referencing_layer_path,
+ asset_is_layer,
+ for_save):
+ """
+ Args:
+ asset_path (str): The incoming file path you want to alter or not.
+ asset_path_for_save (bool): Whether the current path is a
+ referenced path in the USD file. When True, return the path
+ you want inside USD file.
+ referencing_layer_path (str): ???
+ asset_is_layer (bool): Whether this asset is a USD layer file.
+ If this is False, the asset is something else (for example,
+ a texture or volume file).
+ for_save (bool): Whether the asset path is for a file to be saved
+ out. If so, then return actual written filepath.
+
+ Returns:
+ The refactored asset path.
+
+ """
+
+ # Retrieve from cache if this query occurred before (optimization)
+ cache_key = (asset_path, asset_path_for_save, asset_is_layer, for_save)
+ if cache_key in self._cache:
+ return self._cache[cache_key]
+
+ relative_template = "{asset}_{subset}.{ext}"
+ uri_data = usdlib.parse_avalon_uri(asset_path)
+ if uri_data:
+
+ if for_save:
+ # Set save output path to a relative path so other
+ # processors can potentially manage it easily?
+ path = relative_template.format(**uri_data)
+
+ print("Avalon URI Resolver: %s -> %s" % (asset_path, path))
+ self._cache[cache_key] = path
+ return path
+
+ if self._use_publish_paths:
+ # Resolve to an Avalon published asset for embedded paths
+ path = self._get_usd_master_path(**uri_data)
+ else:
+ path = relative_template.format(**uri_data)
+
+ print("Avalon URI Resolver: %s -> %s" % (asset_path, path))
+ self._cache[cache_key] = path
+ return path
+
+ self._cache[cache_key] = asset_path
+ return asset_path
+
+ def _get_usd_master_path(self,
+ asset,
+ subset,
+ ext):
+ """Get the filepath for a .usd file of a subset.
+
+ This will return the path to an unversioned master file generated by
+ `usd_master_file.py`.
+
+ """
+
+ from avalon import api, io
+
+ PROJECT = api.Session["AVALON_PROJECT"]
+ asset_doc = io.find_one({"name": asset,
+ "type": "asset"})
+ if not asset_doc:
+ raise RuntimeError("Invalid asset name: '%s'" % asset)
+
+ root = api.registered_root()
+ path = self._template.format(**{
+ "root": root,
+ "project": PROJECT,
+ "silo": asset_doc["silo"],
+ "asset": asset_doc["name"],
+ "subset": subset,
+ "representation": ext,
+ "version": 0 # stub version zero
+ })
+
+ # Remove the version folder
+ subset_folder = os.path.dirname(os.path.dirname(path))
+ master_folder = os.path.join(subset_folder, "master")
+ fname = "{0}.{1}".format(subset, ext)
+
+ return os.path.join(master_folder, fname).replace("\\", "/")
+
+
+output_processor = AvalonURIOutputProcessor()
+
+
+def usdOutputProcessor():
+ return output_processor
+
diff --git a/openpype/hosts/houdini/vendor/husdoutputprocessors/stagingdir_processor.py b/openpype/hosts/houdini/vendor/husdoutputprocessors/stagingdir_processor.py
new file mode 100644
index 0000000000..d8e36d5aa8
--- /dev/null
+++ b/openpype/hosts/houdini/vendor/husdoutputprocessors/stagingdir_processor.py
@@ -0,0 +1,90 @@
+import hou
+import husdoutputprocessors.base as base
+import os
+
+
+class StagingDirOutputProcessor(base.OutputProcessorBase):
+ """Output all USD Rop file nodes into the Staging Directory
+
+ Ignore any folders and paths set in the Configured Layers
+ and USD Rop node, just take the filename and save into a
+ single directory.
+
+ """
+ theParameters = None
+ parameter_prefix = "stagingdiroutputprocessor_"
+ stagingdir_parm_name = parameter_prefix + "stagingDir"
+
+ def __init__(self):
+ self.staging_dir = None
+
+ def displayName(self):
+ return 'StagingDir Output Processor'
+
+ def parameters(self):
+ if not self.theParameters:
+ parameters = hou.ParmTemplateGroup()
+ rootdirparm = hou.StringParmTemplate(
+ self.stagingdir_parm_name,
+ 'Staging Directory', 1,
+ string_type=hou.stringParmType.FileReference,
+ file_type=hou.fileType.Directory
+ )
+ parameters.append(rootdirparm)
+ self.theParameters = parameters.asDialogScript()
+ return self.theParameters
+
+ def beginSave(self, config_node, t):
+
+ # Use the Root Directory parameter if it is set.
+ root_dir_parm = config_node.parm(self.stagingdir_parm_name)
+ if root_dir_parm:
+ self.staging_dir = root_dir_parm.evalAtTime(t)
+
+ if not self.staging_dir:
+ out_file_parm = config_node.parm('lopoutput')
+ if out_file_parm:
+ self.staging_dir = out_file_parm.evalAtTime(t)
+ if self.staging_dir:
+ (self.staging_dir, filename) = os.path.split(self.staging_dir)
+
+ def endSave(self):
+ self.staging_dir = None
+
+ def processAsset(self, asset_path,
+ asset_path_for_save,
+ referencing_layer_path,
+ asset_is_layer,
+ for_save):
+ """
+ Args:
+ asset_path (str): The incoming file path you want to alter or not.
+ asset_path_for_save (bool): Whether the current path is a
+ referenced path in the USD file. When True, return the path
+ you want inside USD file.
+ referencing_layer_path (str): ???
+ asset_is_layer (bool): Whether this asset is a USD layer file.
+ If this is False, the asset is something else (for example,
+ a texture or volume file).
+ for_save (bool): Whether the asset path is for a file to be saved
+ out. If so, then return actual written filepath.
+
+ Returns:
+ The refactored asset path.
+
+ """
+
+ # Treat save paths as being relative to the output path.
+ if for_save and self.staging_dir:
+ # Whenever we're processing a Save Path make sure to
+ # resolve it to the Staging Directory
+ filename = os.path.basename(asset_path)
+ return os.path.join(self.staging_dir, filename)
+
+ return asset_path
+
+
+output_processor = StagingDirOutputProcessor()
+def usdOutputProcessor():
+ return output_processor
+
diff --git a/openpype/hosts/maya/api/expected_files.py b/openpype/hosts/maya/api/expected_files.py
deleted file mode 100644
index 15e0dc598c..0000000000
--- a/openpype/hosts/maya/api/expected_files.py
+++ /dev/null
@@ -1,945 +0,0 @@
-# -*- coding: utf-8 -*-
-"""Module handling expected render output from Maya.
-
-This module is used in :mod:`collect_render` and :mod:`collect_vray_scene`.
-
-Note:
- To implement new renderer, just create new class inheriting from
- :class:`AExpectedFiles` and add it to :func:`ExpectedFiles.get()`.
-
-Attributes:
- R_SINGLE_FRAME (:class:`re.Pattern`): Find single frame number.
- R_FRAME_RANGE (:class:`re.Pattern`): Find frame range.
- R_FRAME_NUMBER (:class:`re.Pattern`): Find frame number in string.
- R_LAYER_TOKEN (:class:`re.Pattern`): Find layer token in image prefixes.
- R_AOV_TOKEN (:class:`re.Pattern`): Find AOV token in image prefixes.
- R_SUBSTITUTE_AOV_TOKEN (:class:`re.Pattern`): Find and substitute AOV token
- in image prefixes.
- R_REMOVE_AOV_TOKEN (:class:`re.Pattern`): Find and remove AOV token in
- image prefixes.
- R_CLEAN_FRAME_TOKEN (:class:`re.Pattern`): Find and remove unfilled
- Renderman frame token in image prefix.
- R_CLEAN_EXT_TOKEN (:class:`re.Pattern`): Find and remove unfilled Renderman
- extension token in image prefix.
- R_SUBSTITUTE_LAYER_TOKEN (:class:`re.Pattern`): Find and substitute render
- layer token in image prefixes.
- R_SUBSTITUTE_SCENE_TOKEN (:class:`re.Pattern`): Find and substitute scene
- token in image prefixes.
- R_SUBSTITUTE_CAMERA_TOKEN (:class:`re.Pattern`): Find and substitute camera
- token in image prefixes.
- RENDERER_NAMES (dict): Renderer names mapping between reported name and
- *human readable* name.
- IMAGE_PREFIXES (dict): Mapping between renderers and their respective
- image prefix attribute names.
-
-Todo:
- Determine `multipart` from render instance.
-
-"""
-
-import types
-import re
-import os
-from abc import ABCMeta, abstractmethod
-
-import six
-import attr
-
-import openpype.hosts.maya.api.lib as lib
-
-from maya import cmds
-import maya.app.renderSetup.model.renderSetup as renderSetup
-
-
-R_SINGLE_FRAME = re.compile(r"^(-?)\d+$")
-R_FRAME_RANGE = re.compile(r"^(?P(-?)\d+)-(?P(-?)\d+)$")
-R_FRAME_NUMBER = re.compile(r".+\.(?P[0-9]+)\..+")
-R_LAYER_TOKEN = re.compile(
- r".*((?:%l)|(?:)|(?:)).*", re.IGNORECASE
-)
-R_AOV_TOKEN = re.compile(r".*%a.*|.*.*|.*.*", re.IGNORECASE)
-R_SUBSTITUTE_AOV_TOKEN = re.compile(r"%a||", re.IGNORECASE)
-R_REMOVE_AOV_TOKEN = re.compile(
- r"_%a|\.%a|_|\.|_|\.", re.IGNORECASE)
-# to remove unused renderman tokens
-R_CLEAN_FRAME_TOKEN = re.compile(r"\.?\.?", re.IGNORECASE)
-R_CLEAN_EXT_TOKEN = re.compile(r"\.?\.?", re.IGNORECASE)
-
-R_SUBSTITUTE_LAYER_TOKEN = re.compile(
- r"%l||", re.IGNORECASE
-)
-R_SUBSTITUTE_CAMERA_TOKEN = re.compile(r"%c|", re.IGNORECASE)
-R_SUBSTITUTE_SCENE_TOKEN = re.compile(r"%s|", re.IGNORECASE)
-
-RENDERER_NAMES = {
- "mentalray": "MentalRay",
- "vray": "V-Ray",
- "arnold": "Arnold",
- "renderman": "Renderman",
- "redshift": "Redshift",
-}
-
-# not sure about the renderman image prefix
-IMAGE_PREFIXES = {
- "mentalray": "defaultRenderGlobals.imageFilePrefix",
- "vray": "vraySettings.fileNamePrefix",
- "arnold": "defaultRenderGlobals.imageFilePrefix",
- "renderman": "rmanGlobals.imageFileFormat",
- "redshift": "defaultRenderGlobals.imageFilePrefix",
-}
-
-
-@attr.s
-class LayerMetadata(object):
- """Data class for Render Layer metadata."""
- frameStart = attr.ib()
- frameEnd = attr.ib()
- cameras = attr.ib()
- sceneName = attr.ib()
- layerName = attr.ib()
- renderer = attr.ib()
- defaultExt = attr.ib()
- filePrefix = attr.ib()
- enabledAOVs = attr.ib()
- frameStep = attr.ib(default=1)
- padding = attr.ib(default=4)
-
-
-class ExpectedFiles:
- """Class grouping functionality for all supported renderers.
-
- Attributes:
- multipart (bool): Flag if multipart exrs are used.
-
- """
- multipart = False
-
- def __init__(self, render_instance):
- """Constructor."""
- self._render_instance = render_instance
-
- def get(self, renderer, layer):
- """Get expected files for given renderer and render layer.
-
- Args:
- renderer (str): Name of renderer
- layer (str): Name of render layer
-
- Returns:
- dict: Expected rendered files by AOV
-
- Raises:
- :exc:`UnsupportedRendererException`: If requested renderer
- is not supported. It needs to be implemented by extending
- :class:`AExpectedFiles` and added to this methods ``if``
- statement.
-
- """
- renderSetup.instance().switchToLayerUsingLegacyName(layer)
-
- if renderer.lower() == "arnold":
- return self._get_files(ExpectedFilesArnold(layer,
- self._render_instance))
- if renderer.lower() == "vray":
- return self._get_files(ExpectedFilesVray(
- layer, self._render_instance))
- if renderer.lower() == "redshift":
- return self._get_files(ExpectedFilesRedshift(
- layer, self._render_instance))
- if renderer.lower() == "mentalray":
- return self._get_files(ExpectedFilesMentalray(
- layer, self._render_instance))
- if renderer.lower() == "renderman":
- return self._get_files(ExpectedFilesRenderman(
- layer, self._render_instance))
-
- raise UnsupportedRendererException(
- "unsupported {}".format(renderer)
- )
-
- def _get_files(self, renderer):
- # type: (AExpectedFiles) -> list
- files = renderer.get_files()
- self.multipart = renderer.multipart
- return files
-
-
-@six.add_metaclass(ABCMeta)
-class AExpectedFiles:
- """Abstract class with common code for all renderers.
-
- Attributes:
- renderer (str): name of renderer.
- layer (str): name of render layer.
- multipart (bool): flag for multipart exrs.
-
- """
-
- renderer = None
- layer = None
- multipart = False
-
- def __init__(self, layer, render_instance):
- """Constructor."""
- self.layer = layer
- self.render_instance = render_instance
-
- @abstractmethod
- def get_aovs(self):
- """To be implemented by renderer class."""
-
- @staticmethod
- def sanitize_camera_name(camera):
- """Sanitize camera name.
-
- Remove Maya illegal characters from camera name.
-
- Args:
- camera (str): Maya camera name.
-
- Returns:
- (str): sanitized camera name
-
- Example:
- >>> AExpectedFiles.sanizite_camera_name('test:camera_01')
- test_camera_01
-
- """
- return re.sub('[^0-9a-zA-Z_]+', '_', camera)
-
- def get_renderer_prefix(self):
- """Return prefix for specific renderer.
-
- This is for most renderers the same and can be overridden if needed.
-
- Returns:
- str: String with image prefix containing tokens
-
- Raises:
- :exc:`UnsupportedRendererException`: If we requested image
- prefix for renderer we know nothing about.
- See :data:`IMAGE_PREFIXES` for mapping of renderers and
- image prefixes.
-
- """
- try:
- file_prefix = cmds.getAttr(IMAGE_PREFIXES[self.renderer])
- except KeyError:
- raise UnsupportedRendererException(
- "Unsupported renderer {}".format(self.renderer)
- )
- return file_prefix
-
- def _get_layer_data(self):
- # type: () -> LayerMetadata
- # ______________________________________________
- # ____________________/ ____________________________________________/
- # 1 - get scene name /__________________/
- # ____________________/
- _, scene_basename = os.path.split(cmds.file(q=True, loc=True))
- scene_name, _ = os.path.splitext(scene_basename)
-
- file_prefix = self.get_renderer_prefix()
-
- if not file_prefix:
- raise RuntimeError("Image prefix not set")
-
- layer_name = self.layer
- if self.layer.startswith("rs_"):
- layer_name = self.layer[3:]
-
- return LayerMetadata(
- frameStart=int(self.get_render_attribute("startFrame")),
- frameEnd=int(self.get_render_attribute("endFrame")),
- frameStep=int(self.get_render_attribute("byFrameStep")),
- padding=int(self.get_render_attribute("extensionPadding")),
- # if we have token in prefix path we'll expect output for
- # every renderable camera in layer.
- cameras=self.get_renderable_cameras(),
- sceneName=scene_name,
- layerName=layer_name,
- renderer=self.renderer,
- defaultExt=cmds.getAttr("defaultRenderGlobals.imfPluginKey"),
- filePrefix=file_prefix,
- enabledAOVs=self.get_aovs()
- )
-
- def _generate_single_file_sequence(
- self, layer_data, force_aov_name=None):
- # type: (LayerMetadata, str) -> list
- expected_files = []
- for cam in layer_data.cameras:
- file_prefix = layer_data.filePrefix
- mappings = (
- (R_SUBSTITUTE_SCENE_TOKEN, layer_data.sceneName),
- (R_SUBSTITUTE_LAYER_TOKEN, layer_data.layerName),
- (R_SUBSTITUTE_CAMERA_TOKEN, self.sanitize_camera_name(cam)),
- # this is required to remove unfilled aov token, for example
- # in Redshift
- (R_REMOVE_AOV_TOKEN, "") if not force_aov_name \
- else (R_SUBSTITUTE_AOV_TOKEN, force_aov_name),
-
- (R_CLEAN_FRAME_TOKEN, ""),
- (R_CLEAN_EXT_TOKEN, ""),
- )
-
- for regex, value in mappings:
- file_prefix = re.sub(regex, value, file_prefix)
-
- for frame in range(
- int(layer_data.frameStart),
- int(layer_data.frameEnd) + 1,
- int(layer_data.frameStep),
- ):
- expected_files.append(
- "{}.{}.{}".format(
- file_prefix,
- str(frame).rjust(layer_data.padding, "0"),
- layer_data.defaultExt,
- )
- )
- return expected_files
-
- def _generate_aov_file_sequences(self, layer_data):
- # type: (LayerMetadata) -> list
- expected_files = []
- aov_file_list = {}
- for aov in layer_data.enabledAOVs:
- for cam in layer_data.cameras:
- file_prefix = layer_data.filePrefix
-
- mappings = (
- (R_SUBSTITUTE_SCENE_TOKEN, layer_data.sceneName),
- (R_SUBSTITUTE_LAYER_TOKEN, layer_data.layerName),
- (R_SUBSTITUTE_CAMERA_TOKEN,
- self.sanitize_camera_name(cam)),
- (R_SUBSTITUTE_AOV_TOKEN, aov[0]),
- (R_CLEAN_FRAME_TOKEN, ""),
- (R_CLEAN_EXT_TOKEN, ""),
- )
-
- for regex, value in mappings:
- file_prefix = re.sub(regex, value, file_prefix)
-
- aov_files = []
- for frame in range(
- int(layer_data.frameStart),
- int(layer_data.frameEnd) + 1,
- int(layer_data.frameStep),
- ):
- aov_files.append(
- "{}.{}.{}".format(
- file_prefix,
- str(frame).rjust(layer_data.padding, "0"),
- aov[1],
- )
- )
-
- # if we have more then one renderable camera, append
- # camera name to AOV to allow per camera AOVs.
- aov_name = aov[0]
- if len(layer_data.cameras) > 1:
- aov_name = "{}_{}".format(aov[0],
- self.sanitize_camera_name(cam))
-
- aov_file_list[aov_name] = aov_files
- file_prefix = layer_data.filePrefix
-
- expected_files.append(aov_file_list)
- return expected_files
-
- def get_files(self):
- """Return list of expected files.
-
- It will translate render token strings ('', etc.) to
- their values. This task is tricky as every renderer deals with this
- differently. It depends on `get_aovs()` abstract method implemented
- for every supported renderer.
-
- """
- layer_data = self._get_layer_data()
-
- expected_files = []
- if layer_data.enabledAOVs:
- return self._generate_aov_file_sequences(layer_data)
- else:
- return self._generate_single_file_sequence(layer_data)
-
- def get_renderable_cameras(self):
- # type: () -> list
- """Get all renderable cameras.
-
- Returns:
- list: list of renderable cameras.
-
- """
- cam_parents = [
- cmds.listRelatives(x, ap=True)[-1] for x in cmds.ls(cameras=True)
- ]
-
- return [
- cam
- for cam in cam_parents
- if self.maya_is_true(cmds.getAttr("{}.renderable".format(cam)))
- ]
-
- @staticmethod
- def maya_is_true(attr_val):
- """Whether a Maya attr evaluates to True.
-
- When querying an attribute value from an ambiguous object the
- Maya API will return a list of values, which need to be properly
- handled to evaluate properly.
-
- Args:
- attr_val (mixed): Maya attribute to be evaluated as bool.
-
- Returns:
- bool: cast Maya attribute to Pythons boolean value.
-
- """
- if isinstance(attr_val, types.BooleanType):
- return attr_val
- if isinstance(attr_val, (types.ListType, types.GeneratorType)):
- return any(attr_val)
-
- return bool(attr_val)
-
- @staticmethod
- def get_layer_overrides(attribute):
- """Get overrides for attribute on current render layer.
-
- Args:
- attribute (str): Maya attribute name.
-
- Returns:
- Value of attribute override.
-
- """
- connections = cmds.listConnections(attribute, plugs=True)
- if connections:
- for connection in connections:
- if connection:
- # node_name = connection.split(".")[0]
-
- attr_name = "%s.value" % ".".join(
- connection.split(".")[:-1]
- )
- yield cmds.getAttr(attr_name)
-
- def get_render_attribute(self, attribute):
- """Get attribute from render options.
-
- Args:
- attribute (str): name of attribute to be looked up.
-
- Returns:
- Attribute value
-
- """
- return lib.get_attr_in_layer(
- "defaultRenderGlobals.{}".format(attribute), layer=self.layer
- )
-
-
-class ExpectedFilesArnold(AExpectedFiles):
- """Expected files for Arnold renderer.
-
- Attributes:
- aiDriverExtension (dict): Arnold AOV driver extension mapping.
- Is there a better way?
- renderer (str): name of renderer.
-
- """
-
- aiDriverExtension = {
- "jpeg": "jpg",
- "exr": "exr",
- "deepexr": "exr",
- "png": "png",
- "tiff": "tif",
- "mtoa_shaders": "ass", # TODO: research what those last two should be
- "maya": "",
- }
-
- def __init__(self, layer, render_instance):
- """Constructor."""
- super(ExpectedFilesArnold, self).__init__(layer, render_instance)
- self.renderer = "arnold"
-
- def get_aovs(self):
- """Get all AOVs.
-
- See Also:
- :func:`AExpectedFiles.get_aovs()`
-
- Raises:
- :class:`AOVError`: If AOV cannot be determined.
-
- """
- enabled_aovs = []
- try:
- if not (
- cmds.getAttr("defaultArnoldRenderOptions.aovMode")
- and not cmds.getAttr("defaultArnoldDriver.mergeAOVs") # noqa: W503, E501
- ):
- # AOVs are merged in mutli-channel file
- self.multipart = True
- return enabled_aovs
- except ValueError:
- # this occurs when Render Setting windows was not opened yet. In
- # such case there are no Arnold options created so query for AOVs
- # will fail. We terminate here as there are no AOVs specified then.
- # This state will most probably fail later on some Validator
- # anyway.
- return enabled_aovs
-
- # AOVs are set to be rendered separately. We should expect
- # token in path.
-
- # handle aovs from references
- use_ref_aovs = self.render_instance.data.get(
- "useReferencedAovs", False) or False
-
- ai_aovs = cmds.ls(type="aiAOV")
- if not use_ref_aovs:
- ref_aovs = cmds.ls(type="aiAOV", referencedNodes=True)
- ai_aovs = list(set(ai_aovs) - set(ref_aovs))
-
- for aov in ai_aovs:
- enabled = self.maya_is_true(cmds.getAttr("{}.enabled".format(aov)))
- ai_driver = cmds.listConnections("{}.outputs".format(aov))[0]
- ai_translator = cmds.getAttr("{}.aiTranslator".format(ai_driver))
- try:
- aov_ext = self.aiDriverExtension[ai_translator]
- except KeyError:
- msg = (
- "Unrecognized arnold " "driver format for AOV - {}"
- ).format(cmds.getAttr("{}.name".format(aov)))
- raise AOVError(msg)
-
- for override in self.get_layer_overrides(
- "{}.enabled".format(aov)
- ):
- enabled = self.maya_is_true(override)
- if enabled:
- # If aov RGBA is selected, arnold will translate it to `beauty`
- aov_name = cmds.getAttr("%s.name" % aov)
- if aov_name == "RGBA":
- aov_name = "beauty"
- enabled_aovs.append((aov_name, aov_ext))
- # Append 'beauty' as this is arnolds
- # default. If token is specified and no AOVs are
- # defined, this will be used.
- enabled_aovs.append(
- (u"beauty", cmds.getAttr("defaultRenderGlobals.imfPluginKey"))
- )
- return enabled_aovs
-
-
-class ExpectedFilesVray(AExpectedFiles):
- """Expected files for V-Ray renderer."""
-
- def __init__(self, layer, render_instance):
- """Constructor."""
- super(ExpectedFilesVray, self).__init__(layer, render_instance)
- self.renderer = "vray"
-
- def get_renderer_prefix(self):
- """Get image prefix for V-Ray.
-
- This overrides :func:`AExpectedFiles.get_renderer_prefix()` as
- we must add `` token manually.
-
- See also:
- :func:`AExpectedFiles.get_renderer_prefix()`
-
- """
- prefix = super(ExpectedFilesVray, self).get_renderer_prefix()
- prefix = "{}_".format(prefix)
- return prefix
-
- def _get_layer_data(self):
- # type: () -> LayerMetadata
- """Override to get vray specific extension."""
- layer_data = super(ExpectedFilesVray, self)._get_layer_data()
- default_ext = cmds.getAttr("vraySettings.imageFormatStr")
- if default_ext in ["exr (multichannel)", "exr (deep)"]:
- default_ext = "exr"
- layer_data.defaultExt = default_ext
- layer_data.padding = cmds.getAttr("vraySettings.fileNamePadding")
- return layer_data
-
- def get_files(self):
- """Get expected files.
-
- This overrides :func:`AExpectedFiles.get_files()` as we
- we need to add one sequence for plain beauty if AOVs are enabled
- as vray output beauty without 'beauty' in filename.
-
- """
- expected_files = super(ExpectedFilesVray, self).get_files()
-
- layer_data = self._get_layer_data()
- # remove 'beauty' from filenames as vray doesn't output it
- update = {}
- if layer_data.enabledAOVs:
- for aov, seqs in expected_files[0].items():
- if aov.startswith("beauty"):
- new_list = []
- for seq in seqs:
- new_list.append(seq.replace("_beauty", ""))
- update[aov] = new_list
-
- expected_files[0].update(update)
- return expected_files
-
- def get_aovs(self):
- """Get all AOVs.
-
- See Also:
- :func:`AExpectedFiles.get_aovs()`
-
- """
- enabled_aovs = []
-
- try:
- # really? do we set it in vray just by selecting multichannel exr?
- if (
- cmds.getAttr("vraySettings.imageFormatStr")
- == "exr (multichannel)" # noqa: W503
- ):
- # AOVs are merged in mutli-channel file
- self.multipart = True
- return enabled_aovs
- except ValueError:
- # this occurs when Render Setting windows was not opened yet. In
- # such case there are no VRay options created so query for AOVs
- # will fail. We terminate here as there are no AOVs specified then.
- # This state will most probably fail later on some Validator
- # anyway.
- return enabled_aovs
-
- default_ext = cmds.getAttr("vraySettings.imageFormatStr")
- if default_ext in ["exr (multichannel)", "exr (deep)"]:
- default_ext = "exr"
-
- # add beauty as default
- enabled_aovs.append(
- (u"beauty", default_ext)
- )
-
- # handle aovs from references
- use_ref_aovs = self.render_instance.data.get(
- "useReferencedAovs", False) or False
-
- # this will have list of all aovs no matter if they are coming from
- # reference or not.
- vr_aovs = cmds.ls(
- type=["VRayRenderElement", "VRayRenderElementSet"]) or []
- if not use_ref_aovs:
- ref_aovs = cmds.ls(
- type=["VRayRenderElement", "VRayRenderElementSet"],
- referencedNodes=True) or []
- # get difference
- vr_aovs = list(set(vr_aovs) - set(ref_aovs))
-
- for aov in vr_aovs:
- enabled = self.maya_is_true(cmds.getAttr("{}.enabled".format(aov)))
- for override in self.get_layer_overrides(
- "{}.enabled".format(aov)
- ):
- enabled = self.maya_is_true(override)
-
- if enabled:
- enabled_aovs.append(
- (self._get_vray_aov_name(aov), default_ext))
-
- return enabled_aovs
-
- @staticmethod
- def _get_vray_aov_name(node):
- """Get AOVs name from Vray.
-
- Args:
- node (str): aov node name.
-
- Returns:
- str: aov name.
-
- """
- vray_name = None
- vray_explicit_name = None
- vray_file_name = None
- for node_attr in cmds.listAttr(node):
- if node_attr.startswith("vray_filename"):
- vray_file_name = cmds.getAttr("{}.{}".format(node, node_attr))
- elif node_attr.startswith("vray_name"):
- vray_name = cmds.getAttr("{}.{}".format(node, node_attr))
- elif node_attr.startswith("vray_explicit_name"):
- vray_explicit_name = cmds.getAttr(
- "{}.{}".format(node, node_attr))
-
- if vray_file_name is not None and vray_file_name != "":
- final_name = vray_file_name
- elif vray_explicit_name is not None and vray_explicit_name != "":
- final_name = vray_explicit_name
- elif vray_name is not None and vray_name != "":
- final_name = vray_name
- else:
- continue
- # special case for Material Select elements - these are named
- # based on the materia they are connected to.
- if "vray_mtl_mtlselect" in cmds.listAttr(node):
- connections = cmds.listConnections(
- "{}.vray_mtl_mtlselect".format(node))
- if connections:
- final_name += '_{}'.format(str(connections[0]))
-
- return final_name
-
-
-class ExpectedFilesRedshift(AExpectedFiles):
- """Expected files for Redshift renderer.
-
- Attributes:
-
- unmerged_aovs (list): Name of aovs that are not merged into resulting
- exr and we need them specified in expectedFiles output.
-
- """
-
- unmerged_aovs = ["Cryptomatte"]
-
- def __init__(self, layer, render_instance):
- """Construtor."""
- super(ExpectedFilesRedshift, self).__init__(layer, render_instance)
- self.renderer = "redshift"
-
- def get_renderer_prefix(self):
- """Get image prefix for Redshift.
-
- This overrides :func:`AExpectedFiles.get_renderer_prefix()` as
- we must add `` token manually.
-
- See also:
- :func:`AExpectedFiles.get_renderer_prefix()`
-
- """
- prefix = super(ExpectedFilesRedshift, self).get_renderer_prefix()
- prefix = "{}.".format(prefix)
- return prefix
-
- def get_files(self):
- """Get expected files.
-
- This overrides :func:`AExpectedFiles.get_files()` as we
- we need to add one sequence for plain beauty if AOVs are enabled
- as vray output beauty without 'beauty' in filename.
-
- """
- expected_files = super(ExpectedFilesRedshift, self).get_files()
- layer_data = self._get_layer_data()
-
- # Redshift doesn't merge Cryptomatte AOV to final exr. We need to check
- # for such condition and add it to list of expected files.
-
- for aov in layer_data.enabledAOVs:
- if aov[0].lower() == "cryptomatte":
- aov_name = aov[0]
- expected_files.append(
- {aov_name: self._generate_single_file_sequence(layer_data)}
- )
-
- if layer_data.get("enabledAOVs"):
- # because if Beauty is added manually, it will be rendered as
- # 'Beauty_other' in file name and "standard" beauty will have
- # 'Beauty' in its name. When disabled, standard output will be
- # without `Beauty`.
- if expected_files[0].get(u"Beauty"):
- expected_files[0][u"Beauty_other"] = expected_files[0].pop(
- u"Beauty")
- new_list = [
- seq.replace(".Beauty", ".Beauty_other")
- for seq in expected_files[0][u"Beauty_other"]
- ]
-
- expected_files[0][u"Beauty_other"] = new_list
- expected_files[0][u"Beauty"] = self._generate_single_file_sequence( # noqa: E501
- layer_data, force_aov_name="Beauty"
- )
- else:
- expected_files[0][u"Beauty"] = self._generate_single_file_sequence( # noqa: E501
- layer_data
- )
-
- return expected_files
-
- def get_aovs(self):
- """Get all AOVs.
-
- See Also:
- :func:`AExpectedFiles.get_aovs()`
-
- """
- enabled_aovs = []
-
- try:
- if self.maya_is_true(
- cmds.getAttr("redshiftOptions.exrForceMultilayer")
- ):
- # AOVs are merged in mutli-channel file
- self.multipart = True
- return enabled_aovs
- except ValueError:
- # this occurs when Render Setting windows was not opened yet. In
- # such case there are no Redshift options created so query for AOVs
- # will fail. We terminate here as there are no AOVs specified then.
- # This state will most probably fail later on some Validator
- # anyway.
- return enabled_aovs
-
- default_ext = cmds.getAttr(
- "redshiftOptions.imageFormat", asString=True)
- rs_aovs = cmds.ls(type="RedshiftAOV", referencedNodes=False)
-
- for aov in rs_aovs:
- enabled = self.maya_is_true(cmds.getAttr("{}.enabled".format(aov)))
- for override in self.get_layer_overrides(
- "{}.enabled".format(aov)
- ):
- enabled = self.maya_is_true(override)
-
- if enabled:
- # If AOVs are merged into multipart exr, append AOV only if it
- # is in the list of AOVs that renderer cannot (or will not)
- # merge into final exr.
- if self.maya_is_true(
- cmds.getAttr("redshiftOptions.exrForceMultilayer")
- ):
- if cmds.getAttr("%s.name" % aov) in self.unmerged_aovs:
- enabled_aovs.append(
- (cmds.getAttr("%s.name" % aov), default_ext)
- )
- else:
- enabled_aovs.append(
- (cmds.getAttr("%s.name" % aov), default_ext)
- )
-
- if self.maya_is_true(
- cmds.getAttr("redshiftOptions.exrForceMultilayer")
- ):
- # AOVs are merged in mutli-channel file
- self.multipart = True
-
- return enabled_aovs
-
-
-class ExpectedFilesRenderman(AExpectedFiles):
- """Expected files for Renderman renderer.
-
- Warning:
- This is very rudimentary and needs more love and testing.
- """
-
- def __init__(self, layer, render_instance):
- """Constructor."""
- super(ExpectedFilesRenderman, self).__init__(layer, render_instance)
- self.renderer = "renderman"
-
- def get_aovs(self):
- """Get all AOVs.
-
- See Also:
- :func:`AExpectedFiles.get_aovs()`
-
- """
- enabled_aovs = []
-
- default_ext = "exr"
- displays = cmds.listConnections("rmanGlobals.displays")
- for aov in displays:
- aov_name = str(aov)
- if aov_name == "rmanDefaultDisplay":
- aov_name = "beauty"
-
- enabled = self.maya_is_true(cmds.getAttr("{}.enable".format(aov)))
- for override in self.get_layer_overrides(
- "{}.enable".format(aov)
- ):
- enabled = self.maya_is_true(override)
-
- if enabled:
- enabled_aovs.append((aov_name, default_ext))
-
- return enabled_aovs
-
- def get_files(self):
- """Get expected files.
-
- This overrides :func:`AExpectedFiles.get_files()` as we
- we need to add one sequence for plain beauty if AOVs are enabled
- as vray output beauty without 'beauty' in filename.
-
- In renderman we hack it with prepending path. This path would
- normally be translated from `rmanGlobals.imageOutputDir`. We skip
- this and hardcode prepend path we expect. There is no place for user
- to mess around with this settings anyway and it is enforced in
- render settings validator.
- """
- layer_data = self._get_layer_data()
- new_aovs = {}
-
- expected_files = super(ExpectedFilesRenderman, self).get_files()
- # we always get beauty
- for aov, files in expected_files[0].items():
- new_files = []
- for file in files:
- new_file = "{}/{}/{}".format(
- layer_data["sceneName"], layer_data["layerName"], file
- )
- new_files.append(new_file)
- new_aovs[aov] = new_files
-
- return [new_aovs]
-
-
-class ExpectedFilesMentalray(AExpectedFiles):
- """Skeleton unimplemented class for Mentalray renderer."""
-
- def __init__(self, layer, render_instance):
- """Constructor.
-
- Raises:
- :exc:`UnimplementedRendererException`: as it is not implemented.
-
- """
- super(ExpectedFilesMentalray, self).__init__(layer, render_instance)
- raise UnimplementedRendererException("Mentalray not implemented")
-
- def get_aovs(self):
- """Get all AOVs.
-
- See Also:
- :func:`AExpectedFiles.get_aovs()`
-
- """
- return []
-
-
-class AOVError(Exception):
- """Custom exception for determining AOVs."""
-
-
-class UnsupportedRendererException(Exception):
- """Custom exception.
-
- Raised when requesting data from unsupported renderer.
- """
-
-
-class UnimplementedRendererException(Exception):
- """Custom exception.
-
- Raised when requesting data from renderer that is not implemented yet.
- """
diff --git a/openpype/hosts/maya/api/lib.py b/openpype/hosts/maya/api/lib.py
index b87e106865..b24235447f 100644
--- a/openpype/hosts/maya/api/lib.py
+++ b/openpype/hosts/maya/api/lib.py
@@ -2252,10 +2252,8 @@ def get_attr_in_layer(attr, layer):
try:
if cmds.mayaHasRenderSetup():
- log.debug("lib.get_attr_in_layer is not "
- "optimized for render setup")
- with renderlayer(layer):
- return cmds.getAttr(attr)
+ from . import lib_rendersetup
+ return lib_rendersetup.get_attr_in_layer(attr, layer)
except AttributeError:
pass
diff --git a/openpype/hosts/maya/api/lib_renderproducts.py b/openpype/hosts/maya/api/lib_renderproducts.py
new file mode 100644
index 0000000000..fb99584c5d
--- /dev/null
+++ b/openpype/hosts/maya/api/lib_renderproducts.py
@@ -0,0 +1,1039 @@
+# -*- coding: utf-8 -*-
+"""Module handling expected render output from Maya.
+
+This module is used in :mod:`collect_render` and :mod:`collect_vray_scene`.
+
+Note:
+ To implement new renderer, just create new class inheriting from
+ :class:`ARenderProducts` and add it to :func:`RenderProducts.get()`.
+
+Attributes:
+ R_SINGLE_FRAME (:class:`re.Pattern`): Find single frame number.
+ R_FRAME_RANGE (:class:`re.Pattern`): Find frame range.
+ R_FRAME_NUMBER (:class:`re.Pattern`): Find frame number in string.
+ R_LAYER_TOKEN (:class:`re.Pattern`): Find layer token in image prefixes.
+ R_AOV_TOKEN (:class:`re.Pattern`): Find AOV token in image prefixes.
+ R_SUBSTITUTE_AOV_TOKEN (:class:`re.Pattern`): Find and substitute AOV token
+ in image prefixes.
+ R_REMOVE_AOV_TOKEN (:class:`re.Pattern`): Find and remove AOV token in
+ image prefixes.
+ R_CLEAN_FRAME_TOKEN (:class:`re.Pattern`): Find and remove unfilled
+ Renderman frame token in image prefix.
+ R_CLEAN_EXT_TOKEN (:class:`re.Pattern`): Find and remove unfilled Renderman
+ extension token in image prefix.
+ R_SUBSTITUTE_LAYER_TOKEN (:class:`re.Pattern`): Find and substitute render
+ layer token in image prefixes.
+ R_SUBSTITUTE_SCENE_TOKEN (:class:`re.Pattern`): Find and substitute scene
+ token in image prefixes.
+ R_SUBSTITUTE_CAMERA_TOKEN (:class:`re.Pattern`): Find and substitute camera
+ token in image prefixes.
+ IMAGE_PREFIXES (dict): Mapping between renderers and their respective
+ image prefix attribute names.
+
+Thanks:
+ Roy Nieterau (BigRoy) / Colorbleed for overhaul of original
+ *expected_files*.
+
+"""
+
+import logging
+import re
+import os
+from abc import ABCMeta, abstractmethod
+
+import six
+import attr
+
+from . import lib
+from . import lib_rendersetup
+
+from maya import cmds, mel
+
+log = logging.getLogger(__name__)
+
+R_SINGLE_FRAME = re.compile(r"^(-?)\d+$")
+R_FRAME_RANGE = re.compile(r"^(?P(-?)\d+)-(?P(-?)\d+)$")
+R_FRAME_NUMBER = re.compile(r".+\.(?P[0-9]+)\..+")
+R_LAYER_TOKEN = re.compile(
+ r".*((?:%l)|(?:)|(?:)).*", re.IGNORECASE
+)
+R_AOV_TOKEN = re.compile(r".*%a.*|.*.*|.*.*", re.IGNORECASE)
+R_SUBSTITUTE_AOV_TOKEN = re.compile(r"%a||", re.IGNORECASE)
+R_REMOVE_AOV_TOKEN = re.compile(
+ r"_%a|\.%a|_|\.|_|\.", re.IGNORECASE)
+# to remove unused renderman tokens
+R_CLEAN_FRAME_TOKEN = re.compile(r"\.?\.?", re.IGNORECASE)
+R_CLEAN_EXT_TOKEN = re.compile(r"\.?\.?", re.IGNORECASE)
+
+R_SUBSTITUTE_LAYER_TOKEN = re.compile(
+ r"%l||", re.IGNORECASE
+)
+R_SUBSTITUTE_CAMERA_TOKEN = re.compile(r"%c|", re.IGNORECASE)
+R_SUBSTITUTE_SCENE_TOKEN = re.compile(r"%s|", re.IGNORECASE)
+
+# not sure about the renderman image prefix
+IMAGE_PREFIXES = {
+ "vray": "vraySettings.fileNamePrefix",
+ "arnold": "defaultRenderGlobals.imageFilePrefix",
+ "renderman": "rmanGlobals.imageFileFormat",
+ "redshift": "defaultRenderGlobals.imageFilePrefix",
+}
+
+
+@attr.s
+class LayerMetadata(object):
+ """Data class for Render Layer metadata."""
+ frameStart = attr.ib()
+ frameEnd = attr.ib()
+ cameras = attr.ib()
+ sceneName = attr.ib()
+ layerName = attr.ib()
+ renderer = attr.ib()
+ defaultExt = attr.ib()
+ filePrefix = attr.ib()
+ frameStep = attr.ib(default=1)
+ padding = attr.ib(default=4)
+
+ # Render Products
+ products = attr.ib(init=False, default=attr.Factory(list))
+
+
+@attr.s
+class RenderProduct(object):
+ """Describes an image or other file-like artifact produced by a render.
+
+ Warning:
+ This currently does NOT return as a product PER render camera.
+ A single Render Product will generate files per camera. E.g. with two
+ cameras each render product generates two sequences on disk assuming
+ the file path prefix correctly uses the tokens.
+
+ """
+ productName = attr.ib()
+ ext = attr.ib() # extension
+ aov = attr.ib(default=None) # source aov
+ driver = attr.ib(default=None) # source driver
+ multipart = attr.ib(default=False) # multichannel file
+
+
+def get(layer, render_instance=None):
+ # type: (str, object) -> ARenderProducts
+ """Get render details and products for given renderer and render layer.
+
+ Args:
+ layer (str): Name of render layer
+ render_instance (pyblish.api.Instance): Publish instance.
+ If not provided an empty mock instance is used.
+
+ Returns:
+ ARenderProducts: The correct RenderProducts instance for that
+ renderlayer.
+
+ Raises:
+ :exc:`UnsupportedRendererException`: If requested renderer
+ is not supported. It needs to be implemented by extending
+ :class:`ARenderProducts` and added to this methods ``if``
+ statement.
+
+ """
+
+ if render_instance is None:
+ # For now produce a mock instance
+ class Instance(object):
+ data = {}
+ render_instance = Instance()
+
+ renderer_name = lib.get_attr_in_layer(
+ "defaultRenderGlobals.currentRenderer",
+ layer=layer
+ )
+
+ renderer = {
+ "arnold": RenderProductsArnold,
+ "vray": RenderProductsVray,
+ "redshift": RenderProductsRedshift,
+ "renderman": RenderProductsRenderman
+ }.get(renderer_name.lower(), None)
+ if renderer is None:
+ raise UnsupportedRendererException(
+ "unsupported {}".format(renderer_name)
+ )
+
+ return renderer(layer, render_instance)
+
+
+@six.add_metaclass(ABCMeta)
+class ARenderProducts:
+ """Abstract class with common code for all renderers.
+
+ Attributes:
+ renderer (str): name of renderer.
+
+ """
+
+ renderer = None
+
+ def __init__(self, layer, render_instance):
+ """Constructor."""
+ self.layer = layer
+ self.render_instance = render_instance
+ self.multipart = False
+
+ # Initialize
+ self.layer_data = self._get_layer_data()
+ self.layer_data.products = self.get_render_products()
+
+ @abstractmethod
+ def get_render_products(self):
+ """To be implemented by renderer class.
+
+ This should return a list of RenderProducts.
+
+ Returns:
+ list: List of RenderProduct
+
+ """
+
+ @staticmethod
+ def sanitize_camera_name(camera):
+ # type: (str) -> str
+ """Sanitize camera name.
+
+ Remove Maya illegal characters from camera name.
+
+ Args:
+ camera (str): Maya camera name.
+
+ Returns:
+ (str): sanitized camera name
+
+ Example:
+ >>> ARenderProducts.sanizite_camera_name('test:camera_01')
+ test_camera_01
+
+ """
+ return re.sub('[^0-9a-zA-Z_]+', '_', camera)
+
+ def get_renderer_prefix(self):
+ # type: () -> str
+ """Return prefix for specific renderer.
+
+ This is for most renderers the same and can be overridden if needed.
+
+ Returns:
+ str: String with image prefix containing tokens
+
+ Raises:
+ :exc:`UnsupportedRendererException`: If we requested image
+ prefix for renderer we know nothing about.
+ See :data:`IMAGE_PREFIXES` for mapping of renderers and
+ image prefixes.
+
+ """
+ try:
+ file_prefix_attr = IMAGE_PREFIXES[self.renderer]
+ except KeyError:
+ raise UnsupportedRendererException(
+ "Unsupported renderer {}".format(self.renderer)
+ )
+
+ file_prefix = self._get_attr(file_prefix_attr)
+
+ if not file_prefix:
+ # Fall back to scene name by default
+ log.debug("Image prefix not set, using ")
+ file_prefix = ""
+
+ return file_prefix
+
+ def get_render_attribute(self, attribute):
+ """Get attribute from render options.
+
+ Args:
+ attribute (str): name of attribute to be looked up.
+
+ Returns:
+ Attribute value
+
+ """
+ return self._get_attr("defaultRenderGlobals", attribute)
+
+ def _get_attr(self, node_attr, attribute=None):
+ """Return the value of the attribute in the renderlayer
+
+ For readability this allows passing in the attribute in two ways.
+
+ As a single argument:
+ _get_attr("node.attr")
+ Or as two arguments:
+ _get_attr("node", "attr")
+
+ Returns:
+ Value of the attribute inside the layer this instance is set to.
+
+ """
+
+ if attribute is None:
+ plug = node_attr
+ else:
+ plug = "{}.{}".format(node_attr, attribute)
+
+ return lib.get_attr_in_layer(plug, layer=self.layer)
+
+ def _get_layer_data(self):
+ # type: () -> LayerMetadata
+ # ______________________________________________
+ # ____________________/ ____________________________________________/
+ # 1 - get scene name /__________________/
+ # ____________________/
+ _, scene_basename = os.path.split(cmds.file(q=True, loc=True))
+ scene_name, _ = os.path.splitext(scene_basename)
+
+ file_prefix = self.get_renderer_prefix()
+
+ # If the Render Layer belongs to a Render Setup layer then the
+ # output name is based on the Render Setup Layer name without
+ # the `rs_` prefix.
+ layer_name = self.layer
+ rs_layer = lib_rendersetup.get_rendersetup_layer(layer_name)
+ if rs_layer:
+ layer_name = rs_layer
+
+ if self.layer == "defaultRenderLayer":
+ # defaultRenderLayer renders as masterLayer
+ layer_name = "masterLayer"
+
+ # todo: Support Custom Frames sequences 0,5-10,100-120
+ # Deadline allows submitting renders with a custom frame list
+ # to support those cases we might want to allow 'custom frames'
+ # to be overridden to `ExpectFiles` class?
+ layer_data = LayerMetadata(
+ frameStart=int(self.get_render_attribute("startFrame")),
+ frameEnd=int(self.get_render_attribute("endFrame")),
+ frameStep=int(self.get_render_attribute("byFrameStep")),
+ padding=int(self.get_render_attribute("extensionPadding")),
+ # if we have token in prefix path we'll expect output for
+ # every renderable camera in layer.
+ cameras=self.get_renderable_cameras(),
+ sceneName=scene_name,
+ layerName=layer_name,
+ renderer=self.renderer,
+ defaultExt=self._get_attr("defaultRenderGlobals.imfPluginKey"),
+ filePrefix=file_prefix
+ )
+ return layer_data
+
+ def _generate_file_sequence(
+ self, layer_data,
+ force_aov_name=None,
+ force_ext=None,
+ force_cameras=None):
+ # type: (LayerMetadata, str, str, list) -> list
+ expected_files = []
+ cameras = force_cameras if force_cameras else layer_data.cameras
+ ext = force_ext or layer_data.defaultExt
+ for cam in cameras:
+ file_prefix = layer_data.filePrefix
+ mappings = (
+ (R_SUBSTITUTE_SCENE_TOKEN, layer_data.sceneName),
+ (R_SUBSTITUTE_LAYER_TOKEN, layer_data.layerName),
+ (R_SUBSTITUTE_CAMERA_TOKEN, self.sanitize_camera_name(cam)),
+ # this is required to remove unfilled aov token, for example
+ # in Redshift
+ (R_REMOVE_AOV_TOKEN, "") if not force_aov_name \
+ else (R_SUBSTITUTE_AOV_TOKEN, force_aov_name),
+
+ (R_CLEAN_FRAME_TOKEN, ""),
+ (R_CLEAN_EXT_TOKEN, ""),
+ )
+
+ for regex, value in mappings:
+ file_prefix = re.sub(regex, value, file_prefix)
+
+ for frame in range(
+ int(layer_data.frameStart),
+ int(layer_data.frameEnd) + 1,
+ int(layer_data.frameStep),
+ ):
+ frame_str = str(frame).rjust(layer_data.padding, "0")
+ expected_files.append(
+ "{}.{}.{}".format(file_prefix, frame_str, ext)
+ )
+ return expected_files
+
+ def get_files(self, product, camera):
+ # type: (RenderProduct, str) -> list
+ """Return list of expected files.
+
+ It will translate render token strings ('', etc.) to
+ their values. This task is tricky as every renderer deals with this
+ differently. That's why we expose `get_files` as a method on the
+ Renderer class so it can be overridden for complex cases.
+
+ Args:
+ product (RenderProduct): Render product to be used for file
+ generation.
+ camera (str): Camera name.
+
+ Returns:
+ List of files
+
+ """
+ return self._generate_file_sequence(
+ self.layer_data,
+ force_aov_name=product.productName,
+ force_ext=product.ext,
+ force_cameras=[camera]
+ )
+
+ def get_renderable_cameras(self):
+ # type: () -> list
+ """Get all renderable camera transforms.
+
+ Returns:
+ list: list of renderable cameras.
+
+ """
+
+ renderable_cameras = [
+ cam for cam in cmds.ls(cameras=True)
+ if self._get_attr(cam, "renderable")
+ ]
+
+ # The output produces a sanitized name for using its
+ # shortest unique path of the transform so we'll return
+ # at least that unique path. This could include a parent
+ # name too when two cameras have the same name but are
+ # in a different hierarchy, e.g. "group1|cam" and "group2|cam"
+ def get_name(camera):
+ return cmds.ls(cmds.listRelatives(camera,
+ parent=True,
+ fullPath=True))[0]
+
+ return [get_name(cam) for cam in renderable_cameras]
+
+
+class RenderProductsArnold(ARenderProducts):
+ """Render products for Arnold renderer.
+
+ References:
+ mtoa.utils.getFileName()
+ mtoa.utils.ui.common.updateArnoldTargetFilePreview()
+
+ Notes:
+ - Output Denoising AOVs are not currently included.
+ - Only Frame/Animation ext: name.#.ext is supported.
+ - Use Custom extension is not supported.
+ - and tokens not tested
+ - With Merge AOVs but in File Name Prefix Arnold
+ will still NOT merge the aovs. This class correctly resolves
+ it - but user should be aware.
+ - File Path Prefix overrides per AOV driver are not implemented
+
+ Attributes:
+ aiDriverExtension (dict): Arnold AOV driver extension mapping.
+ Is there a better way?
+ renderer (str): name of renderer.
+
+ """
+ renderer = "arnold"
+ aiDriverExtension = {
+ "jpeg": "jpg",
+ "exr": "exr",
+ "deepexr": "exr",
+ "png": "png",
+ "tiff": "tif",
+ "mtoa_shaders": "ass", # TODO: research what those last two should be
+ "maya": "",
+ }
+
+ def get_renderer_prefix(self):
+
+ prefix = super(RenderProductsArnold, self).get_renderer_prefix()
+ merge_aovs = self._get_attr("defaultArnoldDriver.mergeAOVs")
+ if not merge_aovs and "" not in prefix.lower():
+ # When Merge AOVs is disabled and token not present
+ # then Arnold prepends / to the output path.
+ # todo: It's untested what happens if AOV driver has an
+ # an explicit override path prefix.
+ prefix = "/" + prefix
+
+ return prefix
+
+ def _get_aov_render_products(self, aov):
+ """Return all render products for the AOV"""
+
+ products = list()
+ aov_name = self._get_attr(aov, "name")
+ ai_drivers = cmds.listConnections("{}.outputs".format(aov),
+ source=True,
+ destination=False,
+ type="aiAOVDriver") or []
+
+ for ai_driver in ai_drivers:
+ # todo: check aiAOVDriver.prefix as it could have
+ # a custom path prefix set for this driver
+
+ # Skip Drivers set only for GUI
+ # 0: GUI, 1: Batch, 2: GUI and Batch
+ output_mode = self._get_attr(ai_driver, "outputMode")
+ if output_mode == 0: # GUI only
+ log.warning("%s has Output Mode set to GUI, "
+ "skipping...", ai_driver)
+ continue
+
+ ai_translator = self._get_attr(ai_driver, "aiTranslator")
+ try:
+ ext = self.aiDriverExtension[ai_translator]
+ except KeyError:
+ raise AOVError(
+ "Unrecognized arnold driver format "
+ "for AOV - {}".format(aov_name)
+ )
+
+ # If aov RGBA is selected, arnold will translate it to `beauty`
+ name = aov_name
+ if name == "RGBA":
+ name = "beauty"
+
+ # Support Arnold light groups for AOVs
+ # Global AOV: When disabled the main layer is not written: `{pass}`
+ # All Light Groups: When enabled, a `{pass}_lgroups` file is
+ # written and is always merged into a single file
+ # Light Groups List: When set, a product per light group is written
+ # e.g. {pass}_front, {pass}_rim
+ global_aov = self._get_attr(aov, "globalAov")
+ if global_aov:
+ product = RenderProduct(productName=name,
+ ext=ext,
+ aov=aov_name,
+ driver=ai_driver)
+ products.append(product)
+
+ all_light_groups = self._get_attr(aov, "lightGroups")
+ if all_light_groups:
+ # All light groups is enabled. A single multipart
+ # Render Product
+ product = RenderProduct(productName=name + "_lgroups",
+ ext=ext,
+ aov=aov_name,
+ driver=ai_driver,
+ # Always multichannel output
+ multipart=True)
+ products.append(product)
+ else:
+ value = self._get_attr(aov, "lightGroupsList")
+ if not value:
+ continue
+ selected_light_groups = value.strip().split()
+ for light_group in selected_light_groups:
+ # Render Product per selected light group
+ aov_light_group_name = "{}_{}".format(name, light_group)
+ product = RenderProduct(productName=aov_light_group_name,
+ aov=aov_name,
+ driver=ai_driver,
+ ext=ext)
+ products.append(product)
+
+ return products
+
+ def get_render_products(self):
+ """Get all AOVs.
+
+ See Also:
+ :func:`ARenderProducts.get_render_products()`
+
+ Raises:
+ :class:`AOVError`: If AOV cannot be determined.
+
+ """
+
+ if not cmds.ls("defaultArnoldRenderOptions", type="aiOptions"):
+ # this occurs when Render Setting windows was not opened yet. In
+ # such case there are no Arnold options created so query for AOVs
+ # will fail. We terminate here as there are no AOVs specified then.
+ # This state will most probably fail later on some Validator
+ # anyway.
+ return []
+
+ default_ext = self._get_attr("defaultRenderGlobals.imfPluginKey")
+ beauty_product = RenderProduct(productName="beauty",
+ ext=default_ext,
+ driver="defaultArnoldDriver")
+
+ # AOVs > Legacy > Maya Render View > Mode
+ aovs_enabled = bool(
+ self._get_attr("defaultArnoldRenderOptions.aovMode")
+ )
+ if not aovs_enabled:
+ return [beauty_product]
+
+ # Common > File Output > Merge AOVs or
+ # We don't need to check for Merge AOVs due to overridden
+ # `get_renderer_prefix()` behavior which forces
+ has_renderpass_token = (
+ "" in self.layer_data.filePrefix.lower()
+ )
+ if not has_renderpass_token:
+ beauty_product.multipart = True
+ return [beauty_product]
+
+ # AOVs are set to be rendered separately. We should expect
+ # token in path.
+ # handle aovs from references
+ use_ref_aovs = self.render_instance.data.get(
+ "useReferencedAovs", False) or False
+
+ aovs = cmds.ls(type="aiAOV")
+ if not use_ref_aovs:
+ ref_aovs = cmds.ls(type="aiAOV", referencedNodes=True)
+ aovs = list(set(aovs) - set(ref_aovs))
+
+ products = []
+
+ # Append the AOV products
+ for aov in aovs:
+ enabled = self._get_attr(aov, "enabled")
+ if not enabled:
+ continue
+
+ # For now stick to the legacy output format.
+ aov_products = self._get_aov_render_products(aov)
+ products.extend(aov_products)
+
+ if not any(product.aov == "RGBA" for product in products):
+ # Append default 'beauty' as this is arnolds default.
+ # However, it is excluded whenever a RGBA pass is enabled.
+ # For legibility add the beauty layer as first entry
+ products.insert(0, beauty_product)
+
+ # TODO: Output Denoising AOVs?
+
+ return products
+
+
+class RenderProductsVray(ARenderProducts):
+ """Expected files for V-Ray renderer.
+
+ Notes:
+ - "Disabled" animation incorrectly returns frames in filename
+ - "Renumber Frames" is not supported
+
+ Reference:
+ vrayAddRenderElementImpl() in vrayCreateRenderElementsTab.mel
+
+ """
+ # todo: detect whether rendering with V-Ray GPU + whether AOV is supported
+
+ renderer = "vray"
+
+ def get_renderer_prefix(self):
+ # type: () -> str
+ """Get image prefix for V-Ray.
+
+ This overrides :func:`ARenderProducts.get_renderer_prefix()` as
+ we must add `` token manually.
+
+ See also:
+ :func:`ARenderProducts.get_renderer_prefix()`
+
+ """
+ prefix = super(RenderProductsVray, self).get_renderer_prefix()
+ prefix = "{}.".format(prefix)
+ return prefix
+
+ def _get_layer_data(self):
+ # type: () -> LayerMetadata
+ """Override to get vray specific extension."""
+ layer_data = super(RenderProductsVray, self)._get_layer_data()
+
+ default_ext = self._get_attr("vraySettings.imageFormatStr")
+ if default_ext in ["exr (multichannel)", "exr (deep)"]:
+ default_ext = "exr"
+ layer_data.defaultExt = default_ext
+ layer_data.padding = self._get_attr("vraySettings.fileNamePadding")
+
+ return layer_data
+
+ def get_render_products(self):
+ """Get all AOVs.
+
+ See Also:
+ :func:`ARenderProducts.get_render_products()`
+
+ """
+ if not cmds.ls("vraySettings", type="VRaySettingsNode"):
+ # this occurs when Render Setting windows was not opened yet. In
+ # such case there are no VRay options created so query for AOVs
+ # will fail. We terminate here as there are no AOVs specified then.
+ # This state will most probably fail later on some Validator
+ # anyway.
+ return []
+
+ image_format_str = self._get_attr("vraySettings.imageFormatStr")
+ default_ext = image_format_str
+ if default_ext in {"exr (multichannel)", "exr (deep)"}:
+ default_ext = "exr"
+
+ products = []
+
+ # add beauty as default when not disabled
+ dont_save_rgb = self._get_attr("vraySettings.dontSaveRgbChannel")
+ if not dont_save_rgb:
+ products.append(RenderProduct(productName="", ext=default_ext))
+
+ # separate alpha file
+ separate_alpha = self._get_attr("vraySettings.separateAlpha")
+ if separate_alpha:
+ products.append(RenderProduct(productName="Alpha",
+ ext=default_ext))
+
+ if image_format_str == "exr (multichannel)":
+ # AOVs are merged in m-channel file, only main layer is rendered
+ self.multipart = True
+ return products
+
+ # handle aovs from references
+ use_ref_aovs = self.render_instance.data.get(
+ "useReferencedAovs", False) or False
+
+ # this will have list of all aovs no matter if they are coming from
+ # reference or not.
+ aov_types = ["VRayRenderElement", "VRayRenderElementSet"]
+ aovs = cmds.ls(type=aov_types)
+ if not use_ref_aovs:
+ ref_aovs = cmds.ls(type=aov_types, referencedNodes=True) or []
+ aovs = list(set(aovs) - set(ref_aovs))
+
+ for aov in aovs:
+ enabled = self._get_attr(aov, "enabled")
+ if not enabled:
+ continue
+
+ class_type = self._get_attr(aov + ".vrayClassType")
+ if class_type == "LightMixElement":
+ # Special case which doesn't define a name by itself but
+ # instead seems to output multiple Render Products,
+ # specifically "Self_Illumination" and "Environment"
+ product_names = ["Self_Illumination", "Environment"]
+ for name in product_names:
+ product = RenderProduct(productName=name,
+ ext=default_ext,
+ aov=aov)
+ products.append(product)
+ # Continue as we've processed this special case AOV
+ continue
+
+ aov_name = self._get_vray_aov_name(aov)
+ product = RenderProduct(productName=aov_name,
+ ext=default_ext,
+ aov=aov)
+ products.append(product)
+
+ return products
+
+ def _get_vray_aov_attr(self, node, prefix):
+ """Get value for attribute that starts with key in name
+
+ V-Ray AOVs have attribute names that include the type
+ of AOV in the attribute name, for example:
+ - vray_filename_rawdiffuse
+ - vray_filename_velocity
+ - vray_name_gi
+ - vray_explicit_name_extratex
+
+ To simplify querying the "vray_filename" or "vray_name"
+ attributes we just find the first attribute that has
+ that particular "{prefix}_" in the attribute name.
+
+ Args:
+ node (str): AOV node name
+ prefix (str): Prefix of the attribute name.
+
+ Returns:
+ Value of the attribute if it exists, else None
+
+ """
+ attrs = cmds.listAttr(node, string="{}_*".format(prefix))
+ if not attrs:
+ return None
+
+ assert len(attrs) == 1, "Found more than one attribute: %s" % attrs
+ attr = attrs[0]
+
+ return self._get_attr(node, attr)
+
+ def _get_vray_aov_name(self, node):
+ """Get AOVs name from Vray.
+
+ Args:
+ node (str): aov node name.
+
+ Returns:
+ str: aov name.
+
+ """
+
+ vray_explicit_name = self._get_vray_aov_attr(node,
+ "vray_explicit_name")
+ vray_filename = self._get_vray_aov_attr(node, "vray_filename")
+ vray_name = self._get_vray_aov_attr(node, "vray_name")
+ final_name = vray_explicit_name or vray_filename or vray_name or None
+
+ class_type = self._get_attr(node, "vrayClassType")
+ if not vray_explicit_name:
+ # Explicit name takes precedence and overrides completely
+ # otherwise add the connected node names to the special cases
+ # Any namespace colon ':' gets replaced to underscore '_'
+ # so we sanitize using `sanitize_camera_name`
+ def _get_source_name(node, attr):
+ """Return sanitized name of input connection to attribute"""
+ plug = "{}.{}".format(node, attr)
+ connections = cmds.listConnections(plug,
+ source=True,
+ destination=False)
+ if connections:
+ return self.sanitize_camera_name(connections[0])
+
+ if class_type == "MaterialSelectElement":
+ # Name suffix is based on the connected material or set
+ attrs = [
+ "vray_mtllist_mtlselect",
+ "vray_mtl_mtlselect"
+ ]
+ for attribute in attrs:
+ name = _get_source_name(node, attribute)
+ if name:
+ final_name += '_{}'.format(name)
+ break
+ else:
+ log.warning("Material Select Element has no "
+ "selected materials: %s", node)
+
+ elif class_type == "ExtraTexElement":
+ # Name suffix is based on the connected textures
+ extratex_type = self._get_attr(node, "vray_type_extratex")
+ attr = {
+ 0: "vray_texture_extratex",
+ 1: "vray_float_texture_extratex",
+ 2: "vray_int_texture_extratex",
+ }.get(extratex_type)
+ name = _get_source_name(node, attr)
+ if name:
+ final_name += '_{}'.format(name)
+ else:
+ log.warning("Extratex Element has no incoming texture")
+
+ assert final_name, "Output filename not defined for AOV: %s" % node
+
+ return final_name
+
+
+class RenderProductsRedshift(ARenderProducts):
+ """Expected files for Redshift renderer.
+
+ Notes:
+ - `get_files()` only supports rendering with frames, like "animation"
+
+ Attributes:
+
+ unmerged_aovs (list): Name of aovs that are not merged into resulting
+ exr and we need them specified in Render Products output.
+
+ """
+
+ renderer = "redshift"
+ unmerged_aovs = {"Cryptomatte"}
+
+ def get_renderer_prefix(self):
+ """Get image prefix for Redshift.
+
+ This overrides :func:`ARenderProducts.get_renderer_prefix()` as
+ we must add `` token manually.
+
+ See also:
+ :func:`ARenderProducts.get_renderer_prefix()`
+
+ """
+ prefix = super(RenderProductsRedshift, self).get_renderer_prefix()
+ prefix = "{}.".format(prefix)
+ return prefix
+
+ def get_render_products(self):
+ """Get all AOVs.
+
+ See Also:
+ :func:`ARenderProducts.get_render_products()`
+
+ """
+
+ if not cmds.ls("redshiftOptions", type="RedshiftOptions"):
+ # this occurs when Render Setting windows was not opened yet. In
+ # such case there are no Redshift options created so query for AOVs
+ # will fail. We terminate here as there are no AOVs specified then.
+ # This state will most probably fail later on some Validator
+ # anyway.
+ return []
+
+ # For Redshift we don't directly return upon forcing multilayer
+ # due to some AOVs still being written into separate files,
+ # like Cryptomatte.
+ # AOVs are merged in multi-channel file
+ multipart = bool(self._get_attr("redshiftOptions.exrForceMultilayer"))
+
+ # Get Redshift Extension from image format
+ image_format = self._get_attr("redshiftOptions.imageFormat") # integer
+ ext = mel.eval("redshiftGetImageExtension(%i)" % image_format)
+
+ use_ref_aovs = self.render_instance.data.get(
+ "useReferencedAovs", False) or False
+
+ aovs = cmds.ls(type="RedshiftAOV")
+ if not use_ref_aovs:
+ ref_aovs = cmds.ls(type="RedshiftAOV", referencedNodes=True)
+ aovs = list(set(aovs) - set(ref_aovs))
+
+ products = []
+ light_groups_enabled = False
+ has_beauty_aov = False
+ for aov in aovs:
+ enabled = self._get_attr(aov, "enabled")
+ if not enabled:
+ continue
+
+ aov_type = self._get_attr(aov, "aovType")
+ if multipart and aov_type not in self.unmerged_aovs:
+ continue
+
+ # Any AOVs that still get processed, like Cryptomatte
+ # by themselves are not multipart files.
+ aov_multipart = not multipart
+
+ # Redshift skips rendering of masterlayer without AOV suffix
+ # when a Beauty AOV is rendered. It overrides the main layer.
+ if aov_type == "Beauty":
+ has_beauty_aov = True
+
+ aov_name = self._get_attr(aov, "name")
+
+ # Support light Groups
+ light_groups = []
+ if self._get_attr(aov, "supportsLightGroups"):
+ all_light_groups = self._get_attr(aov, "allLightGroups")
+ if all_light_groups:
+ # All light groups is enabled
+ light_groups = self._get_redshift_light_groups()
+ else:
+ value = self._get_attr(aov, "lightGroupList")
+ # note: string value can return None when never set
+ if value:
+ selected_light_groups = value.strip().split()
+ light_groups = selected_light_groups
+
+ for light_group in light_groups:
+ aov_light_group_name = "{}_{}".format(aov_name,
+ light_group)
+ product = RenderProduct(productName=aov_light_group_name,
+ aov=aov_name,
+ ext=ext,
+ multipart=aov_multipart)
+ products.append(product)
+
+ if light_groups:
+ light_groups_enabled = True
+
+ # Redshift AOV Light Select always renders the global AOV
+ # even when light groups are present so we don't need to
+ # exclude it when light groups are active
+ product = RenderProduct(productName=aov_name,
+ aov=aov_name,
+ ext=ext,
+ multipart=aov_multipart)
+ products.append(product)
+
+ # When a Beauty AOV is added manually, it will be rendered as
+ # 'Beauty_other' in file name and "standard" beauty will have
+ # 'Beauty' in its name. When disabled, standard output will be
+ # without `Beauty`. Except when using light groups.
+ if light_groups_enabled:
+ return products
+
+ beauty_name = "Beauty_other" if has_beauty_aov else ""
+ products.insert(0,
+ RenderProduct(productName=beauty_name,
+ ext=ext,
+ multipart=multipart))
+
+ return products
+
+ @staticmethod
+ def _get_redshift_light_groups():
+ return sorted(mel.eval("redshiftAllAovLightGroups"))
+
+
+class RenderProductsRenderman(ARenderProducts):
+ """Expected files for Renderman renderer.
+
+ Warning:
+ This is very rudimentary and needs more love and testing.
+ """
+
+ renderer = "renderman"
+
+ def get_render_products(self):
+ """Get all AOVs.
+
+ See Also:
+ :func:`ARenderProducts.get_render_products()`
+
+ """
+ products = []
+
+ default_ext = "exr"
+ displays = cmds.listConnections("rmanGlobals.displays")
+ for aov in displays:
+ enabled = self._get_attr(aov, "enabled")
+ if not enabled:
+ continue
+
+ aov_name = str(aov)
+ if aov_name == "rmanDefaultDisplay":
+ aov_name = "beauty"
+
+ product = RenderProduct(productName=aov_name,
+ ext=default_ext)
+ products.append(product)
+
+ return products
+
+ def get_files(self, product, camera):
+ """Get expected files.
+
+ In renderman we hack it with prepending path. This path would
+ normally be translated from `rmanGlobals.imageOutputDir`. We skip
+ this and hardcode prepend path we expect. There is no place for user
+ to mess around with this settings anyway and it is enforced in
+ render settings validator.
+ """
+ files = super(RenderProductsRenderman, self).get_files(product, camera)
+
+ layer_data = self.layer_data
+ new_files = []
+ for file in files:
+ new_file = "{}/{}/{}".format(
+ layer_data["sceneName"], layer_data["layerName"], file
+ )
+ new_files.append(new_file)
+
+ return new_files
+
+
+class AOVError(Exception):
+ """Custom exception for determining AOVs."""
+
+
+class UnsupportedRendererException(Exception):
+ """Custom exception.
+
+ Raised when requesting data from unsupported renderer.
+ """
diff --git a/openpype/hosts/maya/api/lib_rendersetup.py b/openpype/hosts/maya/api/lib_rendersetup.py
new file mode 100644
index 0000000000..0736febe9c
--- /dev/null
+++ b/openpype/hosts/maya/api/lib_rendersetup.py
@@ -0,0 +1,343 @@
+# -*- coding: utf-8 -*-
+"""Library for handling Render Setup in Maya."""
+from maya import cmds
+import maya.api.OpenMaya as om
+import logging
+
+import maya.app.renderSetup.model.utils as utils
+from maya.app.renderSetup.model import (
+ renderSetup
+)
+from maya.app.renderSetup.model.override import (
+ AbsOverride,
+ RelOverride,
+ UniqueOverride
+)
+
+ExactMatch = 0
+ParentMatch = 1
+ChildMatch = 2
+
+DefaultRenderLayer = "defaultRenderLayer"
+
+log = logging.getLogger(__name__)
+
+
+def get_rendersetup_layer(layer):
+ """Return render setup layer name.
+
+ This also converts names from legacy renderLayer node name to render setup
+ name.
+
+ Note: `defaultRenderLayer` is not a renderSetupLayer node but it is however
+ the valid layer name for Render Setup - so we return that as is.
+
+ Example:
+ >>> for legacy_layer in cmds.ls(type="renderLayer"):
+ >>> layer = get_rendersetup_layer(legacy_layer)
+
+ Returns:
+ str or None: Returns renderSetupLayer node name if `layer` is a valid
+ layer name in legacy renderlayers or render setup layers.
+ Returns None if the layer can't be found or Render Setup is
+ currently disabled.
+
+
+ """
+ if layer == DefaultRenderLayer:
+ # defaultRenderLayer doesn't have a `renderSetupLayer`
+ return layer
+
+ if not cmds.mayaHasRenderSetup():
+ return None
+
+ if not cmds.objExists(layer):
+ return None
+
+ if cmds.nodeType(layer) == "renderSetupLayer":
+ return layer
+
+ # By default Render Setup renames the legacy renderlayer
+ # to `rs_` but lets not rely on that as the
+ # layer node can be renamed manually
+ connections = cmds.listConnections(layer + ".message",
+ type="renderSetupLayer",
+ exactType=True,
+ source=False,
+ destination=True,
+ plugs=True) or []
+ return next((conn.split(".", 1)[0] for conn in connections
+ if conn.endswith(".legacyRenderLayer")), None)
+
+
+def get_attr_in_layer(node_attr, layer):
+ """Return attribute value in Render Setup layer.
+
+ This will only work for attributes which can be
+ retrieved with `maya.cmds.getAttr` and for which
+ Relative and Absolute overrides are applicable.
+
+ Examples:
+ >>> get_attr_in_layer("defaultResolution.width", layer="layer1")
+ >>> get_attr_in_layer("defaultRenderGlobals.startFrame", layer="layer")
+ >>> get_attr_in_layer("transform.translate", layer="layer3")
+
+ Args:
+ attr (str): attribute name as 'node.attribute'
+ layer (str): layer name
+
+ Returns:
+ object: attribute value in layer
+
+ """
+
+ # Delay pymel import to here because it's slow to load
+ import pymel.core as pm
+
+ def _layer_needs_update(layer):
+ """Return whether layer needs updating."""
+ # Use `getattr` as e.g. DefaultRenderLayer does not have the attribute
+ return getattr(layer, "needsMembershipUpdate", False) or \
+ getattr(layer, "needsApplyUpdate", False)
+
+ def get_default_layer_value(node_attr_):
+ """Return attribute value in defaultRenderLayer"""
+ inputs = cmds.listConnections(node_attr_,
+ source=True,
+ destination=False,
+ # We want to skip conversion nodes since
+ # an override to `endFrame` could have
+ # a `unitToTimeConversion` node
+ # in-between
+ skipConversionNodes=True,
+ type="applyOverride") or []
+ if inputs:
+ _override = inputs[0]
+ history_overrides = cmds.ls(cmds.listHistory(_override,
+ pruneDagObjects=True),
+ type="applyOverride")
+ node = history_overrides[-1] if history_overrides else _override
+ node_attr_ = node + ".original"
+
+ return pm.getAttr(node_attr_, asString=True)
+
+ layer = get_rendersetup_layer(layer)
+ rs = renderSetup.instance()
+ current_layer = rs.getVisibleRenderLayer()
+ if current_layer.name() == layer:
+
+ # Ensure layer is up-to-date
+ if _layer_needs_update(current_layer):
+ try:
+ rs.switchToLayer(current_layer)
+ except RuntimeError:
+ # Some cases can cause errors on switching
+ # the first time with Render Setup layers
+ # e.g. different overrides to compounds
+ # and its children plugs. So we just force
+ # it another time. If it then still fails
+ # we will let it error out.
+ rs.switchToLayer(current_layer)
+
+ return pm.getAttr(node_attr, asString=True)
+
+ overrides = get_attr_overrides(node_attr, layer)
+ default_layer_value = get_default_layer_value(node_attr)
+ if not overrides:
+ return default_layer_value
+
+ value = default_layer_value
+ for match, layer_override, index in overrides:
+ if isinstance(layer_override, AbsOverride):
+ # Absolute override
+ value = pm.getAttr(layer_override.name() + ".attrValue")
+ if match == ExactMatch:
+ value = value
+ if match == ParentMatch:
+ value = value[index]
+ if match == ChildMatch:
+ value[index] = value
+
+ elif isinstance(layer_override, RelOverride):
+ # Relative override
+ # Value = Original * Multiply + Offset
+ multiply = pm.getAttr(layer_override.name() + ".multiply")
+ offset = pm.getAttr(layer_override.name() + ".offset")
+
+ if match == ExactMatch:
+ value = value * multiply + offset
+ if match == ParentMatch:
+ value = value * multiply[index] + offset[index]
+ if match == ChildMatch:
+ value[index] = value[index] * multiply + offset
+
+ else:
+ raise TypeError("Unsupported override: %s" % layer_override)
+
+ return value
+
+
+def get_attr_overrides(node_attr, layer,
+ skip_disabled=True,
+ skip_local_render=True,
+ stop_at_absolute_override=True):
+ """Return all Overrides applicable to the attribute.
+
+ Overrides are returned as a 3-tuple:
+ (Match, Override, Index)
+
+ Match:
+ This is any of ExactMatch, ParentMatch, ChildMatch
+ and defines whether the override is exactly on the
+ plug, on the parent or on a child plug.
+
+ Override:
+ This is the RenderSetup Override instance.
+
+ Index:
+ This is the Plug index under the parent or for
+ the child that matches. The ExactMatch index will
+ always be None. For ParentMatch the index is which
+ index the plug is under the parent plug. For ChildMatch
+ the index is which child index matches the plug.
+
+ Args:
+ node_attr (str): attribute name as 'node.attribute'
+ layer (str): layer name
+ skip_disabled (bool): exclude disabled overrides
+ skip_local_render (bool): exclude overrides marked
+ as local render.
+ stop_at_absolute_override: exclude overrides prior
+ to the last absolute override as they have
+ no influence on the resulting value.
+
+ Returns:
+ list: Ordered Overrides in order of strength
+
+ """
+
+ def get_mplug_children(plug):
+ """Return children MPlugs of compound MPlug"""
+ children = []
+ if plug.isCompound:
+ for i in range(plug.numChildren()):
+ children.append(plug.child(i))
+ return children
+
+ def get_mplug_names(mplug):
+ """Return long and short name of MPlug"""
+ long_name = mplug.partialName(useLongNames=True)
+ short_name = mplug.partialName(useLongNames=False)
+ return {long_name, short_name}
+
+ def iter_override_targets(_override):
+ try:
+ for target in _override._targets():
+ yield target
+ except AssertionError:
+ # Workaround: There is a bug where the private `_targets()` method
+ # fails on some attribute plugs. For example overrides
+ # to the defaultRenderGlobals.endFrame
+ # (Tested in Maya 2020.2)
+ log.debug("Workaround for %s" % _override)
+ from maya.app.renderSetup.common.utils import findPlug
+
+ attr = _override.attributeName()
+ if isinstance(_override, UniqueOverride):
+ node = _override.targetNodeName()
+ yield findPlug(node, attr)
+ else:
+ nodes = _override.parent().selector().nodes()
+ for node in nodes:
+ if cmds.attributeQuery(attr, node=node, exists=True):
+ yield findPlug(node, attr)
+
+ # Get the MPlug for the node.attr
+ sel = om.MSelectionList()
+ sel.add(node_attr)
+ plug = sel.getPlug(0)
+
+ layer = get_rendersetup_layer(layer)
+ if layer == DefaultRenderLayer:
+ # DefaultRenderLayer will never have overrides
+ # since it's the default layer
+ return []
+
+ rs_layer = renderSetup.instance().getRenderLayer(layer)
+ if rs_layer is None:
+ # Renderlayer does not exist
+ return
+
+ # Get any parent or children plugs as we also
+ # want to include them in the attribute match
+ # for overrides
+ parent = plug.parent() if plug.isChild else None
+ parent_index = None
+ if parent:
+ parent_index = get_mplug_children(parent).index(plug)
+
+ children = get_mplug_children(plug)
+
+ # Create lookup for the attribute by both long
+ # and short names
+ attr_names = get_mplug_names(plug)
+ for child in children:
+ attr_names.update(get_mplug_names(child))
+ if parent:
+ attr_names.update(get_mplug_names(parent))
+
+ # Get all overrides of the layer
+ # And find those that are relevant to the attribute
+ plug_overrides = []
+
+ # Iterate over the overrides in reverse so we get the last
+ # overrides first and can "break" whenever an absolute
+ # override is reached
+ layer_overrides = list(utils.getOverridesRecursive(rs_layer))
+ for layer_override in reversed(layer_overrides):
+
+ if skip_disabled and not layer_override.isEnabled():
+ # Ignore disabled overrides
+ continue
+
+ if skip_local_render and layer_override.isLocalRender():
+ continue
+
+ # The targets list can be very large so we'll do
+ # a quick filter by attribute name to detect whether
+ # it matches the attribute name, or its parent or child
+ if layer_override.attributeName() not in attr_names:
+ continue
+
+ override_match = None
+ for override_plug in iter_override_targets(layer_override):
+
+ override_match = None
+ if plug == override_plug:
+ override_match = (ExactMatch, layer_override, None)
+
+ elif parent and override_plug == parent:
+ override_match = (ParentMatch, layer_override, parent_index)
+
+ elif children and override_plug in children:
+ child_index = children.index(override_plug)
+ override_match = (ChildMatch, layer_override, child_index)
+
+ if override_match:
+ plug_overrides.append(override_match)
+ break
+
+ if (
+ override_match and
+ stop_at_absolute_override and
+ isinstance(layer_override, AbsOverride) and
+ # When the override is only on a child plug then it doesn't
+ # override the entire value so we not stop at this override
+ not override_match[0] == ChildMatch
+ ):
+ # If override is absolute override, then BREAK out
+ # of parent loop we don't need to look any further as
+ # this is the absolute override
+ break
+
+ return reversed(plug_overrides)
diff --git a/openpype/hosts/maya/api/menu.py b/openpype/hosts/maya/api/menu.py
index 0dced48868..ad225dcd28 100644
--- a/openpype/hosts/maya/api/menu.py
+++ b/openpype/hosts/maya/api/menu.py
@@ -16,12 +16,9 @@ log = logging.getLogger(__name__)
def _get_menu(menu_name=None):
"""Return the menu instance if it currently exists in Maya"""
-
- project_settings = get_project_settings(os.getenv("AVALON_PROJECT"))
- _menu = project_settings["maya"]["scriptsmenu"]["name"]
-
if menu_name is None:
- menu_name = _menu
+ menu_name = pipeline._menu
+
widgets = dict((
w.objectName(), w) for w in QtWidgets.QApplication.allWidgets())
menu = widgets.get(menu_name)
@@ -58,11 +55,64 @@ def deferred():
parent=pipeline._parent
)
+ # Find the pipeline menu
+ top_menu = _get_menu()
+
+ # Try to find workfile tool action in the menu
+ workfile_action = None
+ for action in top_menu.actions():
+ if action.text() == "Work Files":
+ workfile_action = action
+ break
+
+ # Add at the top of menu if "Work Files" action was not found
+ after_action = ""
+ if workfile_action:
+ # Use action's object name for `insertAfter` argument
+ after_action = workfile_action.objectName()
+
+ # Insert action to menu
+ cmds.menuItem(
+ "Work Files",
+ parent=pipeline._menu,
+ command=launch_workfiles_app,
+ insertAfter=after_action
+ )
+
+ # Remove replaced action
+ if workfile_action:
+ top_menu.removeAction(workfile_action)
+
+ def remove_project_manager():
+ top_menu = _get_menu()
+
+ # Try to find "System" menu action in the menu
+ system_menu = None
+ for action in top_menu.actions():
+ if action.text() == "System":
+ system_menu = action
+ break
+
+ if system_menu is None:
+ return
+
+ # Try to find "Project manager" action in "System" menu
+ project_manager_action = None
+ for action in system_menu.menu().children():
+ if hasattr(action, "text") and action.text() == "Project Manager":
+ project_manager_action = action
+ break
+
+ # Remove "Project manager" action if was found
+ if project_manager_action is not None:
+ system_menu.menu().removeAction(project_manager_action)
+
log.info("Attempting to install scripts menu ...")
add_build_workfiles_item()
add_look_assigner_item()
modify_workfiles()
+ remove_project_manager()
try:
import scriptsmenu.launchformaya as launchformaya
@@ -110,7 +160,6 @@ def install():
log.info("Skipping openpype.menu initialization in batch mode..")
return
- uninstall()
# Allow time for uninstallation to finish.
cmds.evalDeferred(deferred)
diff --git a/openpype/hosts/maya/api/plugin.py b/openpype/hosts/maya/api/plugin.py
index 257908c768..121f7a08a7 100644
--- a/openpype/hosts/maya/api/plugin.py
+++ b/openpype/hosts/maya/api/plugin.py
@@ -99,14 +99,24 @@ class ReferenceLoader(api.Loader):
nodes = self[:]
if not nodes:
return
-
- loaded_containers.append(containerise(
- name=name,
- namespace=namespace,
- nodes=nodes,
- context=context,
- loader=self.__class__.__name__
- ))
+ # FIXME: there is probably better way to do this for looks.
+ if "look" in self.families:
+ loaded_containers.append(containerise(
+ name=name,
+ namespace=namespace,
+ nodes=nodes,
+ context=context,
+ loader=self.__class__.__name__
+ ))
+ else:
+ ref_node = self._get_reference_node(nodes)
+ loaded_containers.append(containerise(
+ name=name,
+ namespace=namespace,
+ nodes=[ref_node],
+ context=context,
+ loader=self.__class__.__name__
+ ))
c += 1
namespace = None
@@ -235,9 +245,6 @@ class ReferenceLoader(api.Loader):
self.log.info("Setting %s.verticesOnlySet to False", node)
cmds.setAttr("{}.verticesOnlySet".format(node), False)
- # Add new nodes of the reference to the container
- cmds.sets(content, forceElement=node)
-
# Remove any placeHolderList attribute entries from the set that
# are remaining from nodes being removed from the referenced file.
members = cmds.sets(node, query=True)
diff --git a/openpype/hosts/maya/plugins/create/create_animation.py b/openpype/hosts/maya/plugins/create/create_animation.py
index 5155aec0ab..7ce96166f7 100644
--- a/openpype/hosts/maya/plugins/create/create_animation.py
+++ b/openpype/hosts/maya/plugins/create/create_animation.py
@@ -24,6 +24,7 @@ class CreateAnimation(plugin.Creator):
# Write vertex colors with the geometry.
self.data["writeColorSets"] = False
+ self.data["writeFaceSets"] = False
# Include only renderable visible shapes.
# Skips locators and empty transforms
diff --git a/openpype/hosts/maya/plugins/create/create_model.py b/openpype/hosts/maya/plugins/create/create_model.py
index f1d9d22c1c..37faad23a0 100644
--- a/openpype/hosts/maya/plugins/create/create_model.py
+++ b/openpype/hosts/maya/plugins/create/create_model.py
@@ -15,6 +15,7 @@ class CreateModel(plugin.Creator):
# Vertex colors with the geometry
self.data["writeColorSets"] = False
+ self.data["writeFaceSets"] = False
# Include attributes by attribute name or prefix
self.data["attr"] = ""
diff --git a/openpype/hosts/maya/plugins/create/create_pointcache.py b/openpype/hosts/maya/plugins/create/create_pointcache.py
index 9afea731fd..d8e5fd43a7 100644
--- a/openpype/hosts/maya/plugins/create/create_pointcache.py
+++ b/openpype/hosts/maya/plugins/create/create_pointcache.py
@@ -20,6 +20,7 @@ class CreatePointCache(plugin.Creator):
self.data.update(lib.collect_animation_data())
self.data["writeColorSets"] = False # Vertex colors with the geometry.
+ self.data["writeFaceSets"] = False # Vertex colors with the geometry.
self.data["renderableOnly"] = False # Only renderable visible shapes
self.data["visibleOnly"] = False # only nodes that are visible
self.data["includeParentHierarchy"] = False # Include parent groups
diff --git a/openpype/hosts/maya/plugins/create/create_render.py b/openpype/hosts/maya/plugins/create/create_render.py
index cbca091365..4fd4b9d986 100644
--- a/openpype/hosts/maya/plugins/create/create_render.py
+++ b/openpype/hosts/maya/plugins/create/create_render.py
@@ -4,6 +4,8 @@ import os
import json
import appdirs
import requests
+import six
+import sys
from maya import cmds
import maya.app.renderSetup.model.renderSetup as renderSetup
@@ -12,7 +14,13 @@ from openpype.hosts.maya.api import (
lib,
plugin
)
-from openpype.api import (get_system_settings, get_asset)
+from openpype.api import (
+ get_system_settings,
+ get_project_settings,
+ get_asset)
+from openpype.modules import ModulesManager
+
+from avalon.api import Session
class CreateRender(plugin.Creator):
@@ -83,6 +91,32 @@ class CreateRender(plugin.Creator):
def __init__(self, *args, **kwargs):
"""Constructor."""
super(CreateRender, self).__init__(*args, **kwargs)
+ deadline_settings = get_system_settings()["modules"]["deadline"]
+ if not deadline_settings["enabled"]:
+ self.deadline_servers = {}
+ return
+ project_settings = get_project_settings(Session["AVALON_PROJECT"])
+ try:
+ default_servers = deadline_settings["deadline_urls"]
+ project_servers = (
+ project_settings["deadline"]
+ ["deadline_servers"]
+ )
+ self.deadline_servers = {
+ k: default_servers[k]
+ for k in project_servers
+ if k in default_servers
+ }
+
+ if not self.deadline_servers:
+ self.deadline_servers = default_servers
+
+ except AttributeError:
+ # Handle situation were we had only one url for deadline.
+ manager = ModulesManager()
+ deadline_module = manager.modules_by_name["deadline"]
+ # get default deadline webservice url from deadline module
+ self.deadline_servers = deadline_module.deadline_urls
def process(self):
"""Entry point."""
@@ -94,10 +128,10 @@ class CreateRender(plugin.Creator):
use_selection = self.options.get("useSelection")
with lib.undo_chunk():
self._create_render_settings()
- instance = super(CreateRender, self).process()
+ self.instance = super(CreateRender, self).process()
# create namespace with instance
index = 1
- namespace_name = "_{}".format(str(instance))
+ namespace_name = "_{}".format(str(self.instance))
try:
cmds.namespace(rm=namespace_name)
except RuntimeError:
@@ -105,12 +139,20 @@ class CreateRender(plugin.Creator):
pass
while cmds.namespace(exists=namespace_name):
- namespace_name = "_{}{}".format(str(instance), index)
+ namespace_name = "_{}{}".format(str(self.instance), index)
index += 1
namespace = cmds.namespace(add=namespace_name)
- cmds.setAttr("{}.machineList".format(instance), lock=True)
+ # add Deadline server selection list
+ if self.deadline_servers:
+ cmds.scriptJob(
+ attributeChange=[
+ "{}.deadlineServers".format(self.instance),
+ self._deadline_webservice_changed
+ ])
+
+ cmds.setAttr("{}.machineList".format(self.instance), lock=True)
self._rs = renderSetup.instance()
layers = self._rs.getRenderLayers()
if use_selection:
@@ -122,7 +164,7 @@ class CreateRender(plugin.Creator):
render_set = cmds.sets(
n="{}:{}".format(namespace, layer.name()))
sets.append(render_set)
- cmds.sets(sets, forceElement=instance)
+ cmds.sets(sets, forceElement=self.instance)
# if no render layers are present, create default one with
# asterisk selector
@@ -138,62 +180,61 @@ class CreateRender(plugin.Creator):
renderer = 'renderman'
self._set_default_renderer_settings(renderer)
+ return self.instance
+
+ def _deadline_webservice_changed(self):
+ """Refresh Deadline server dependent options."""
+ # get selected server
+ from maya import cmds
+ webservice = self.deadline_servers[
+ self.server_aliases[
+ cmds.getAttr("{}.deadlineServers".format(self.instance))
+ ]
+ ]
+ pools = self._get_deadline_pools(webservice)
+ cmds.deleteAttr("{}.primaryPool".format(self.instance))
+ cmds.deleteAttr("{}.secondaryPool".format(self.instance))
+ cmds.addAttr(self.instance, longName="primaryPool",
+ attributeType="enum",
+ enumName=":".join(pools))
+ cmds.addAttr(self.instance, longName="secondaryPool",
+ attributeType="enum",
+ enumName=":".join(["-"] + pools))
+
+ def _get_deadline_pools(self, webservice):
+ # type: (str) -> list
+ """Get pools from Deadline.
+ Args:
+ webservice (str): Server url.
+ Returns:
+ list: Pools.
+ Throws:
+ RuntimeError: If deadline webservice is unreachable.
+
+ """
+ argument = "{}/api/pools?NamesOnly=true".format(webservice)
+ try:
+ response = self._requests_get(argument)
+ except requests.exceptions.ConnectionError as exc:
+ msg = 'Cannot connect to deadline web service'
+ self.log.error(msg)
+ six.reraise(
+ RuntimeError,
+ RuntimeError('{} - {}'.format(msg, exc)),
+ sys.exc_info()[2])
+ if not response.ok:
+ self.log.warning("No pools retrieved")
+ return []
+
+ return response.json()
def _create_render_settings(self):
+ """Create instance settings."""
# get pools
- pools = []
-
- system_settings = get_system_settings()["modules"]
-
- deadline_enabled = system_settings["deadline"]["enabled"]
- muster_enabled = system_settings["muster"]["enabled"]
- deadline_url = system_settings["deadline"]["DEADLINE_REST_URL"]
- muster_url = system_settings["muster"]["MUSTER_REST_URL"]
-
- if deadline_enabled and muster_enabled:
- self.log.error(
- "Both Deadline and Muster are enabled. " "Cannot support both."
- )
- raise RuntimeError("Both Deadline and Muster are enabled")
-
- if deadline_enabled:
- argument = "{}/api/pools?NamesOnly=true".format(deadline_url)
- try:
- response = self._requests_get(argument)
- except requests.exceptions.ConnectionError as e:
- msg = 'Cannot connect to deadline web service'
- self.log.error(msg)
- raise RuntimeError('{} - {}'.format(msg, e))
- if not response.ok:
- self.log.warning("No pools retrieved")
- else:
- pools = response.json()
- self.data["primaryPool"] = pools
- # We add a string "-" to allow the user to not
- # set any secondary pools
- self.data["secondaryPool"] = ["-"] + pools
-
- if muster_enabled:
- self.log.info(">>> Loading Muster credentials ...")
- self._load_credentials()
- self.log.info(">>> Getting pools ...")
- try:
- pools = self._get_muster_pools()
- except requests.exceptions.HTTPError as e:
- if e.startswith("401"):
- self.log.warning("access token expired")
- self._show_login()
- raise RuntimeError("Access token expired")
- except requests.exceptions.ConnectionError:
- self.log.error("Cannot connect to Muster API endpoint.")
- raise RuntimeError("Cannot connect to {}".format(muster_url))
- pool_names = []
- for pool in pools:
- self.log.info(" - pool: {}".format(pool["name"]))
- pool_names.append(pool["name"])
-
- self.data["primaryPool"] = pool_names
+ pool_names = []
+ self.server_aliases = self.deadline_servers.keys()
+ self.data["deadlineServers"] = self.server_aliases
self.data["suspendPublishJob"] = False
self.data["review"] = True
self.data["extendFrames"] = False
@@ -212,6 +253,54 @@ class CreateRender(plugin.Creator):
# Disable for now as this feature is not working yet
# self.data["assScene"] = False
+ system_settings = get_system_settings()["modules"]
+
+ deadline_enabled = system_settings["deadline"]["enabled"]
+ muster_enabled = system_settings["muster"]["enabled"]
+ muster_url = system_settings["muster"]["MUSTER_REST_URL"]
+
+ if deadline_enabled and muster_enabled:
+ self.log.error(
+ "Both Deadline and Muster are enabled. " "Cannot support both."
+ )
+ raise RuntimeError("Both Deadline and Muster are enabled")
+
+ if deadline_enabled:
+ # if default server is not between selected, use first one for
+ # initial list of pools.
+ try:
+ deadline_url = self.deadline_servers["default"]
+ except KeyError:
+ deadline_url = [
+ self.deadline_servers[k]
+ for k in self.deadline_servers.keys()
+ ][0]
+
+ pool_names = self._get_deadline_pools(deadline_url)
+
+ if muster_enabled:
+ self.log.info(">>> Loading Muster credentials ...")
+ self._load_credentials()
+ self.log.info(">>> Getting pools ...")
+ pools = []
+ try:
+ pools = self._get_muster_pools()
+ except requests.exceptions.HTTPError as e:
+ if e.startswith("401"):
+ self.log.warning("access token expired")
+ self._show_login()
+ raise RuntimeError("Access token expired")
+ except requests.exceptions.ConnectionError:
+ self.log.error("Cannot connect to Muster API endpoint.")
+ raise RuntimeError("Cannot connect to {}".format(muster_url))
+ for pool in pools:
+ self.log.info(" - pool: {}".format(pool["name"]))
+ pool_names.append(pool["name"])
+
+ self.data["primaryPool"] = pool_names
+ # We add a string "-" to allow the user to not
+ # set any secondary pools
+ self.data["secondaryPool"] = ["-"] + pool_names
self.options = {"useSelection": False} # Force no content
def _load_credentials(self):
@@ -293,9 +382,7 @@ class CreateRender(plugin.Creator):
"""
if "verify" not in kwargs:
- kwargs["verify"] = (
- False if os.getenv("OPENPYPE_DONT_VERIFY_SSL", True) else True
- ) # noqa
+ kwargs["verify"] = not os.getenv("OPENPYPE_DONT_VERIFY_SSL", True)
return requests.post(*args, **kwargs)
def _requests_get(self, *args, **kwargs):
@@ -312,9 +399,7 @@ class CreateRender(plugin.Creator):
"""
if "verify" not in kwargs:
- kwargs["verify"] = (
- False if os.getenv("OPENPYPE_DONT_VERIFY_SSL", True) else True
- ) # noqa
+ kwargs["verify"] = not os.getenv("OPENPYPE_DONT_VERIFY_SSL", True)
return requests.get(*args, **kwargs)
def _set_default_renderer_settings(self, renderer):
@@ -332,14 +417,10 @@ class CreateRender(plugin.Creator):
if renderer == "arnold":
# set format to exr
+
cmds.setAttr(
"defaultArnoldDriver.ai_translator", "exr", type="string")
- # enable animation
- cmds.setAttr("defaultRenderGlobals.outFormatControl", 0)
- cmds.setAttr("defaultRenderGlobals.animation", 1)
- cmds.setAttr("defaultRenderGlobals.putFrameBeforeExt", 1)
- cmds.setAttr("defaultRenderGlobals.extensionPadding", 4)
-
+ self._set_global_output_settings()
# resolution
cmds.setAttr(
"defaultResolution.width",
@@ -349,43 +430,12 @@ class CreateRender(plugin.Creator):
asset["data"].get("resolutionHeight"))
if renderer == "vray":
- vray_settings = cmds.ls(type="VRaySettingsNode")
- if not vray_settings:
- node = cmds.createNode("VRaySettingsNode")
- else:
- node = vray_settings[0]
-
- # set underscore as element separator instead of default `.`
- cmds.setAttr(
- "{}.fileNameRenderElementSeparator".format(
- node),
- "_"
- )
- # set format to exr
- cmds.setAttr(
- "{}.imageFormatStr".format(node), 5)
-
- # animType
- cmds.setAttr(
- "{}.animType".format(node), 1)
-
- # resolution
- cmds.setAttr(
- "{}.width".format(node),
- asset["data"].get("resolutionWidth"))
- cmds.setAttr(
- "{}.height".format(node),
- asset["data"].get("resolutionHeight"))
-
+ self._set_vray_settings(asset)
if renderer == "redshift":
- redshift_settings = cmds.ls(type="RedshiftOptions")
- if not redshift_settings:
- node = cmds.createNode("RedshiftOptions")
- else:
- node = redshift_settings[0]
+ _ = self._set_renderer_option(
+ "RedshiftOptions", "{}.imageFormat", 1
+ )
- # set exr
- cmds.setAttr("{}.imageFormat".format(node), 1)
# resolution
cmds.setAttr(
"defaultResolution.width",
@@ -394,8 +444,56 @@ class CreateRender(plugin.Creator):
"defaultResolution.height",
asset["data"].get("resolutionHeight"))
- # enable animation
- cmds.setAttr("defaultRenderGlobals.outFormatControl", 0)
- cmds.setAttr("defaultRenderGlobals.animation", 1)
- cmds.setAttr("defaultRenderGlobals.putFrameBeforeExt", 1)
- cmds.setAttr("defaultRenderGlobals.extensionPadding", 4)
+ self._set_global_output_settings()
+
+ @staticmethod
+ def _set_renderer_option(renderer_node, arg=None, value=None):
+ # type: (str, str, str) -> str
+ """Set option on renderer node.
+
+ If renderer settings node doesn't exists, it is created first.
+
+ Args:
+ renderer_node (str): Renderer name.
+ arg (str, optional): Argument name.
+ value (str, optional): Argument value.
+
+ Returns:
+ str: Renderer settings node.
+
+ """
+ settings = cmds.ls(type=renderer_node)
+ result = settings[0] if settings else cmds.createNode(renderer_node)
+ cmds.setAttr(arg.format(result), value)
+ return result
+
+ def _set_vray_settings(self, asset):
+ # type: (dict) -> None
+ """Sets important settings for Vray."""
+ node = self._set_renderer_option(
+ "VRaySettingsNode", "{}.fileNameRenderElementSeparator", "_"
+ )
+
+ # set format to exr
+ cmds.setAttr(
+ "{}.imageFormatStr".format(node), 5)
+
+ # animType
+ cmds.setAttr(
+ "{}.animType".format(node), 1)
+
+ # resolution
+ cmds.setAttr(
+ "{}.width".format(node),
+ asset["data"].get("resolutionWidth"))
+ cmds.setAttr(
+ "{}.height".format(node),
+ asset["data"].get("resolutionHeight"))
+
+ @staticmethod
+ def _set_global_output_settings():
+ # enable animation
+ cmds.setAttr("defaultRenderGlobals.outFormatControl", 0)
+ cmds.setAttr("defaultRenderGlobals.animation", 1)
+ cmds.setAttr("defaultRenderGlobals.putFrameBeforeExt", 1)
+ cmds.setAttr("defaultRenderGlobals.extensionPadding", 4)
diff --git a/openpype/hosts/maya/plugins/create/create_xgen.py b/openpype/hosts/maya/plugins/create/create_xgen.py
new file mode 100644
index 0000000000..3953972952
--- /dev/null
+++ b/openpype/hosts/maya/plugins/create/create_xgen.py
@@ -0,0 +1,11 @@
+from openpype.hosts.maya.api import plugin
+
+
+class CreateXgen(plugin.Creator):
+ """Xgen interactive export"""
+
+ name = "xgen"
+ label = "Xgen Interactive"
+ family = "xgen"
+ icon = "pagelines"
+ defaults = ['Main']
diff --git a/openpype/hosts/maya/plugins/load/load_reference.py b/openpype/hosts/maya/plugins/load/load_reference.py
index 96269f2771..d5952ed267 100644
--- a/openpype/hosts/maya/plugins/load/load_reference.py
+++ b/openpype/hosts/maya/plugins/load/load_reference.py
@@ -17,7 +17,8 @@ class ReferenceLoader(openpype.hosts.maya.api.plugin.ReferenceLoader):
"layout",
"camera",
"rig",
- "camerarig"]
+ "camerarig",
+ "xgen"]
representations = ["ma", "abc", "fbx", "mb"]
label = "Reference"
diff --git a/openpype/hosts/maya/plugins/publish/collect_render.py b/openpype/hosts/maya/plugins/publish/collect_render.py
index 647a46e240..5049647ff9 100644
--- a/openpype/hosts/maya/plugins/publish/collect_render.py
+++ b/openpype/hosts/maya/plugins/publish/collect_render.py
@@ -49,7 +49,7 @@ import maya.app.renderSetup.model.renderSetup as renderSetup
import pyblish.api
from avalon import maya, api
-from openpype.hosts.maya.api.expected_files import ExpectedFiles
+from openpype.hosts.maya.api.lib_renderproducts import get as get_layer_render_products # noqa: E501
from openpype.hosts.maya.api import lib
@@ -64,6 +64,8 @@ class CollectMayaRender(pyblish.api.ContextPlugin):
def process(self, context):
"""Entry point to collector."""
render_instance = None
+ deadline_url = None
+
for instance in context:
if "rendering" in instance.data["families"]:
render_instance = instance
@@ -86,6 +88,15 @@ class CollectMayaRender(pyblish.api.ContextPlugin):
asset = api.Session["AVALON_ASSET"]
workspace = context.data["workspaceDir"]
+ deadline_settings = (
+ context.data
+ ["system_settings"]
+ ["modules"]
+ ["deadline"]
+ )
+
+ if deadline_settings["enabled"]:
+ deadline_url = render_instance.data.get("deadlineUrl")
self._rs = renderSetup.instance()
current_layer = self._rs.getVisibleRenderLayer()
maya_render_layers = {
@@ -157,10 +168,21 @@ class CollectMayaRender(pyblish.api.ContextPlugin):
# return all expected files for all cameras and aovs in given
# frame range
- ef = ExpectedFiles(render_instance)
- exp_files = ef.get(renderer, layer_name)
- self.log.info("multipart: {}".format(ef.multipart))
+ layer_render_products = get_layer_render_products(
+ layer_name, render_instance)
+ render_products = layer_render_products.layer_data.products
+ assert render_products, "no render products generated"
+ exp_files = []
+ for product in render_products:
+ for camera in layer_render_products.layer_data.cameras:
+ exp_files.append(
+ {product.productName: layer_render_products.get_files(
+ product, camera)})
+
+ self.log.info("multipart: {}".format(
+ layer_render_products.multipart))
assert exp_files, "no file names were generated, this is bug"
+ self.log.info(exp_files)
# if we want to attach render to subset, check if we have AOV's
# in expectedFiles. If so, raise error as we cannot attach AOV
@@ -175,24 +197,15 @@ class CollectMayaRender(pyblish.api.ContextPlugin):
full_exp_files = []
aov_dict = {}
- # we either get AOVs or just list of files. List of files can
- # mean two things - there are no AOVs enabled or multipass EXR
- # is produced. In either case we treat those as `beauty`.
- if isinstance(exp_files[0], dict):
- for aov, files in exp_files[0].items():
- full_paths = []
- for e in files:
- full_path = os.path.join(workspace, "renders", e)
- full_path = full_path.replace("\\", "/")
- full_paths.append(full_path)
- aov_dict[aov] = full_paths
- else:
+ # replace relative paths with absolute. Render products are
+ # returned as list of dictionaries.
+ for aov in exp_files:
full_paths = []
- for e in exp_files:
- full_path = os.path.join(workspace, "renders", e)
+ for file in aov[aov.keys()[0]]:
+ full_path = os.path.join(workspace, "renders", file)
full_path = full_path.replace("\\", "/")
full_paths.append(full_path)
- aov_dict["beauty"] = full_paths
+ aov_dict[aov.keys()[0]] = full_paths
frame_start_render = int(self.get_render_attribute(
"startFrame", layer=layer_name))
@@ -224,7 +237,7 @@ class CollectMayaRender(pyblish.api.ContextPlugin):
"subset": expected_layer_name,
"attachTo": attach_to,
"setMembers": layer_name,
- "multipartExr": ef.multipart,
+ "multipartExr": layer_render_products.multipart,
"review": render_instance.data.get("review") or False,
"publish": True,
@@ -263,6 +276,9 @@ class CollectMayaRender(pyblish.api.ContextPlugin):
"vrayUseReferencedAovs") or False
}
+ if deadline_url:
+ data["deadlineUrl"] = deadline_url
+
if self.sync_workfile_version:
data["version"] = context.data["version"]
@@ -306,10 +322,6 @@ class CollectMayaRender(pyblish.api.ContextPlugin):
instance.data.update(data)
self.log.debug("data: {}".format(json.dumps(data, indent=4)))
- # Restore current layer.
- self.log.info("Restoring to {}".format(current_layer.name()))
- self._rs.switchToLayer(current_layer)
-
def parse_options(self, render_globals):
"""Get all overrides with a value, skip those without.
@@ -392,11 +404,13 @@ class CollectMayaRender(pyblish.api.ContextPlugin):
rset = self.maya_layers[layer].renderSettingsCollectionInstance()
return rset.getOverrides()
- def get_render_attribute(self, attr, layer):
+ @staticmethod
+ def get_render_attribute(attr, layer):
"""Get attribute from render options.
Args:
- attr (str): name of attribute to be looked up.
+ attr (str): name of attribute to be looked up
+ layer (str): name of render layer
Returns:
Attribute value
diff --git a/openpype/hosts/maya/plugins/publish/extract_animation.py b/openpype/hosts/maya/plugins/publish/extract_animation.py
index b86ded1fb0..7ecc40a68d 100644
--- a/openpype/hosts/maya/plugins/publish/extract_animation.py
+++ b/openpype/hosts/maya/plugins/publish/extract_animation.py
@@ -57,7 +57,8 @@ class ExtractAnimation(openpype.api.Extractor):
"uvWrite": True,
"selection": True,
"worldSpace": instance.data.get("worldSpace", True),
- "writeColorSets": instance.data.get("writeColorSets", False)
+ "writeColorSets": instance.data.get("writeColorSets", False),
+ "writeFaceSets": instance.data.get("writeFaceSets", False)
}
if not instance.data.get("includeParentHierarchy", True):
diff --git a/openpype/hosts/maya/plugins/publish/extract_maya_scene_raw.py b/openpype/hosts/maya/plugins/publish/extract_maya_scene_raw.py
index c85bc0387d..3c2b70900d 100644
--- a/openpype/hosts/maya/plugins/publish/extract_maya_scene_raw.py
+++ b/openpype/hosts/maya/plugins/publish/extract_maya_scene_raw.py
@@ -19,7 +19,8 @@ class ExtractMayaSceneRaw(openpype.api.Extractor):
families = ["mayaAscii",
"setdress",
"layout",
- "camerarig"]
+ "camerarig",
+ "xgen"]
scene_type = "ma"
def process(self, instance):
diff --git a/openpype/hosts/maya/plugins/publish/extract_model.py b/openpype/hosts/maya/plugins/publish/extract_model.py
index 1773297826..40cc9427f3 100644
--- a/openpype/hosts/maya/plugins/publish/extract_model.py
+++ b/openpype/hosts/maya/plugins/publish/extract_model.py
@@ -28,6 +28,7 @@ class ExtractModel(openpype.api.Extractor):
hosts = ["maya"]
families = ["model"]
scene_type = "ma"
+ optional = True
def process(self, instance):
"""Plugin entry point."""
diff --git a/openpype/hosts/maya/plugins/publish/extract_pointcache.py b/openpype/hosts/maya/plugins/publish/extract_pointcache.py
index ba716c0d18..630cc39398 100644
--- a/openpype/hosts/maya/plugins/publish/extract_pointcache.py
+++ b/openpype/hosts/maya/plugins/publish/extract_pointcache.py
@@ -38,6 +38,7 @@ class ExtractAlembic(openpype.api.Extractor):
# Get extra export arguments
writeColorSets = instance.data.get("writeColorSets", False)
+ writeFaceSets = instance.data.get("writeFaceSets", False)
self.log.info("Extracting pointcache..")
dirname = self.staging_dir(instance)
@@ -53,6 +54,7 @@ class ExtractAlembic(openpype.api.Extractor):
"writeVisibility": True,
"writeCreases": True,
"writeColorSets": writeColorSets,
+ "writeFaceSets": writeFaceSets,
"uvWrite": True,
"selection": True,
"worldSpace": instance.data.get("worldSpace", True)
diff --git a/openpype/hosts/maya/plugins/publish/extract_xgen_cache.py b/openpype/hosts/maya/plugins/publish/extract_xgen_cache.py
new file mode 100644
index 0000000000..d69911c404
--- /dev/null
+++ b/openpype/hosts/maya/plugins/publish/extract_xgen_cache.py
@@ -0,0 +1,61 @@
+import os
+
+from maya import cmds
+
+import avalon.maya
+import openpype.api
+
+
+class ExtractXgenCache(openpype.api.Extractor):
+ """Produce an alembic of just xgen interactive groom
+
+ """
+
+ label = "Extract Xgen ABC Cache"
+ hosts = ["maya"]
+ families = ["xgen"]
+ optional = True
+
+ def process(self, instance):
+
+ # Collect the out set nodes
+ out_descriptions = [node for node in instance
+ if cmds.nodeType(node) == "xgmSplineDescription"]
+
+ start = 1
+ end = 1
+
+ self.log.info("Extracting Xgen Cache..")
+ dirname = self.staging_dir(instance)
+
+ parent_dir = self.staging_dir(instance)
+ filename = "{name}.abc".format(**instance.data)
+ path = os.path.join(parent_dir, filename)
+
+ with avalon.maya.suspended_refresh():
+ with avalon.maya.maintained_selection():
+ command = (
+ '-file '
+ + path
+ + ' -df "ogawa" -fr '
+ + str(start)
+ + ' '
+ + str(end)
+ + ' -step 1 -mxf -wfw'
+ )
+ for desc in out_descriptions:
+ command += (" -obj " + desc)
+ cmds.xgmSplineCache(export=True, j=command)
+
+ if "representations" not in instance.data:
+ instance.data["representations"] = []
+
+ representation = {
+ 'name': 'abc',
+ 'ext': 'abc',
+ 'files': filename,
+ "stagingDir": dirname,
+ }
+ instance.data["representations"].append(representation)
+
+ self.log.info("Extracted {} to {}".format(instance, dirname))
diff --git a/openpype/hosts/standalonepublisher/plugins/publish/collect_editorial_instances.py b/openpype/hosts/standalonepublisher/plugins/publish/collect_editorial_instances.py
index 3a9a7a3445..45c6a264dd 100644
--- a/openpype/hosts/standalonepublisher/plugins/publish/collect_editorial_instances.py
+++ b/openpype/hosts/standalonepublisher/plugins/publish/collect_editorial_instances.py
@@ -2,7 +2,7 @@ import os
import opentimelineio as otio
import pyblish.api
from openpype import lib as plib
-
+from copy import deepcopy
class CollectInstances(pyblish.api.InstancePlugin):
"""Collect instances from editorial's OTIO sequence"""
@@ -186,8 +186,8 @@ class CollectInstances(pyblish.api.InstancePlugin):
properities.pop("version")
# adding Review-able instance
- subset_instance_data = instance_data.copy()
- subset_instance_data.update(properities)
+ subset_instance_data = deepcopy(instance_data)
+ subset_instance_data.update(deepcopy(properities))
subset_instance_data.update({
# unique attributes
"name": f"{name}_{subset}",
diff --git a/openpype/hosts/tvpaint/plugins/publish/collect_instances.py b/openpype/hosts/tvpaint/plugins/publish/collect_instances.py
index e496b144cd..dfa8f17ee9 100644
--- a/openpype/hosts/tvpaint/plugins/publish/collect_instances.py
+++ b/openpype/hosts/tvpaint/plugins/publish/collect_instances.py
@@ -9,7 +9,7 @@ from openpype.lib import get_subset_name
class CollectInstances(pyblish.api.ContextPlugin):
label = "Collect Instances"
- order = pyblish.api.CollectorOrder - 1
+ order = pyblish.api.CollectorOrder - 0.4
hosts = ["tvpaint"]
def process(self, context):
diff --git a/openpype/hosts/tvpaint/plugins/publish/collect_workfile.py b/openpype/hosts/tvpaint/plugins/publish/collect_workfile.py
index b61fec895f..65e38ea258 100644
--- a/openpype/hosts/tvpaint/plugins/publish/collect_workfile.py
+++ b/openpype/hosts/tvpaint/plugins/publish/collect_workfile.py
@@ -8,7 +8,7 @@ from openpype.lib import get_subset_name
class CollectWorkfile(pyblish.api.ContextPlugin):
label = "Collect Workfile"
- order = pyblish.api.CollectorOrder - 1
+ order = pyblish.api.CollectorOrder - 0.4
hosts = ["tvpaint"]
def process(self, context):
diff --git a/openpype/hosts/tvpaint/plugins/publish/collect_workfile_data.py b/openpype/hosts/tvpaint/plugins/publish/collect_workfile_data.py
index 79cc01740a..f4259f1b5f 100644
--- a/openpype/hosts/tvpaint/plugins/publish/collect_workfile_data.py
+++ b/openpype/hosts/tvpaint/plugins/publish/collect_workfile_data.py
@@ -39,7 +39,7 @@ class ResetTVPaintWorkfileMetadata(pyblish.api.Action):
class CollectWorkfileData(pyblish.api.ContextPlugin):
label = "Collect Workfile Data"
- order = pyblish.api.CollectorOrder - 1.01
+ order = pyblish.api.CollectorOrder - 0.45
hosts = ["tvpaint"]
actions = [ResetTVPaintWorkfileMetadata]
diff --git a/openpype/hosts/tvpaint/plugins/publish/extract_sequence.py b/openpype/hosts/tvpaint/plugins/publish/extract_sequence.py
index 1df7512588..36f0b0c954 100644
--- a/openpype/hosts/tvpaint/plugins/publish/extract_sequence.py
+++ b/openpype/hosts/tvpaint/plugins/publish/extract_sequence.py
@@ -50,12 +50,12 @@ class ExtractSequence(pyblish.api.Extractor):
mark_in = instance.context.data["sceneMarkIn"]
mark_out = instance.context.data["sceneMarkOut"]
- # Scene start frame offsets the output files, so we need to offset the
- # marks.
+ # Change scene Start Frame to 0 to prevent frame index issues
+ # - issue is that TVPaint versions deal with frame indexes in a
+ # different way when Start Frame is not `0`
+ # NOTE It will be set back after rendering
scene_start_frame = instance.context.data["sceneStartFrame"]
- difference = scene_start_frame - mark_in
- mark_in += difference
- mark_out += difference
+ lib.execute_george("tv_startframe 0")
# Frame start/end may be stored as float
frame_start = int(instance.data["frameStart"])
@@ -145,6 +145,9 @@ class ExtractSequence(pyblish.api.Extractor):
filtered_layers
)
+ # Change scene frame Start back to previous value
+ lib.execute_george("tv_startframe {}".format(scene_start_frame))
+
# Sequence of one frame
if not output_filenames:
self.log.warning("Extractor did not create any output.")
diff --git a/openpype/hosts/unreal/plugins/load/load_alembic_geometrycache.py b/openpype/hosts/unreal/plugins/load/load_alembic_geometrycache.py
index a9279bf6e0..ad37a7a068 100644
--- a/openpype/hosts/unreal/plugins/load/load_alembic_geometrycache.py
+++ b/openpype/hosts/unreal/plugins/load/load_alembic_geometrycache.py
@@ -15,6 +15,46 @@ class PointCacheAlembicLoader(api.Loader):
icon = "cube"
color = "orange"
+ def get_task(
+ self, filename, asset_dir, asset_name, replace, frame_start, frame_end
+ ):
+ task = unreal.AssetImportTask()
+ options = unreal.AbcImportSettings()
+ gc_settings = unreal.AbcGeometryCacheSettings()
+ conversion_settings = unreal.AbcConversionSettings()
+ sampling_settings = unreal.AbcSamplingSettings()
+
+ task.set_editor_property('filename', filename)
+ task.set_editor_property('destination_path', asset_dir)
+ task.set_editor_property('destination_name', asset_name)
+ task.set_editor_property('replace_existing', replace)
+ task.set_editor_property('automated', True)
+ task.set_editor_property('save', True)
+
+ # set import options here
+ # Unreal 4.24 ignores the settings. It works with Unreal 4.26
+ options.set_editor_property(
+ 'import_type', unreal.AlembicImportType.GEOMETRY_CACHE)
+
+ gc_settings.set_editor_property('flatten_tracks', False)
+
+ conversion_settings.set_editor_property('flip_u', False)
+ conversion_settings.set_editor_property('flip_v', True)
+ conversion_settings.set_editor_property(
+ 'scale', unreal.Vector(x=100.0, y=100.0, z=100.0))
+ conversion_settings.set_editor_property(
+ 'rotation', unreal.Vector(x=-90.0, y=0.0, z=180.0))
+
+ sampling_settings.set_editor_property('frame_start', frame_start)
+ sampling_settings.set_editor_property('frame_end', frame_end)
+
+ options.geometry_cache_settings = gc_settings
+ options.conversion_settings = conversion_settings
+ options.sampling_settings = sampling_settings
+ task.options = options
+
+ return task
+
def load(self, context, name, namespace, data):
"""
Load and containerise representation into Content Browser.
@@ -55,25 +95,17 @@ class PointCacheAlembicLoader(api.Loader):
unreal.EditorAssetLibrary.make_directory(asset_dir)
- task = unreal.AssetImportTask()
+ frame_start = context.get('asset').get('data').get('frameStart')
+ frame_end = context.get('asset').get('data').get('frameEnd')
- task.set_editor_property('filename', self.fname)
- task.set_editor_property('destination_path', asset_dir)
- task.set_editor_property('destination_name', asset_name)
- task.set_editor_property('replace_existing', False)
- task.set_editor_property('automated', True)
- task.set_editor_property('save', True)
+ # If frame start and end are the same, we increse the end frame by
+ # one, otherwise Unreal will not import it
+ if frame_start == frame_end:
+ frame_end += 1
- # set import options here
- # Unreal 4.24 ignores the settings. It works with Unreal 4.26
- options = unreal.AbcImportSettings()
- options.set_editor_property(
- 'import_type', unreal.AlembicImportType.GEOMETRY_CACHE)
+ task = self.get_task(
+ self.fname, asset_dir, asset_name, False, frame_start, frame_end)
- options.geometry_cache_settings.set_editor_property(
- 'flatten_tracks', False)
-
- task.options = options
unreal.AssetToolsHelpers.get_asset_tools().import_asset_tasks([task]) # noqa: E501
# Create Asset Container
@@ -109,28 +141,11 @@ class PointCacheAlembicLoader(api.Loader):
source_path = api.get_representation_path(representation)
destination_path = container["namespace"]
- task = unreal.AssetImportTask()
+ task = self.get_task(source_path, destination_path, name, True)
- task.set_editor_property('filename', source_path)
- task.set_editor_property('destination_path', destination_path)
- # strip suffix
- task.set_editor_property('destination_name', name)
- task.set_editor_property('replace_existing', True)
- task.set_editor_property('automated', True)
- task.set_editor_property('save', True)
-
- # set import options here
- # Unreal 4.24 ignores the settings. It works with Unreal 4.26
- options = unreal.AbcImportSettings()
- options.set_editor_property(
- 'import_type', unreal.AlembicImportType.GEOMETRY_CACHE)
-
- options.geometry_cache_settings.set_editor_property(
- 'flatten_tracks', False)
-
- task.options = options
# do import fbx and replace existing data
unreal.AssetToolsHelpers.get_asset_tools().import_asset_tasks([task])
+
container_path = "{}/{}".format(container["namespace"],
container["objectName"])
# update metadata
diff --git a/openpype/hosts/unreal/plugins/load/load_alembic_staticmesh.py b/openpype/hosts/unreal/plugins/load/load_alembic_staticmesh.py
index 12b9320f72..ccec31b832 100644
--- a/openpype/hosts/unreal/plugins/load/load_alembic_staticmesh.py
+++ b/openpype/hosts/unreal/plugins/load/load_alembic_staticmesh.py
@@ -15,6 +15,39 @@ class StaticMeshAlembicLoader(api.Loader):
icon = "cube"
color = "orange"
+ def get_task(self, filename, asset_dir, asset_name, replace):
+ task = unreal.AssetImportTask()
+ options = unreal.AbcImportSettings()
+ sm_settings = unreal.AbcStaticMeshSettings()
+ conversion_settings = unreal.AbcConversionSettings()
+
+ task.set_editor_property('filename', filename)
+ task.set_editor_property('destination_path', asset_dir)
+ task.set_editor_property('destination_name', asset_name)
+ task.set_editor_property('replace_existing', replace)
+ task.set_editor_property('automated', True)
+ task.set_editor_property('save', True)
+
+ # set import options here
+ # Unreal 4.24 ignores the settings. It works with Unreal 4.26
+ options.set_editor_property(
+ 'import_type', unreal.AlembicImportType.STATIC_MESH)
+
+ sm_settings.set_editor_property('merge_meshes', True)
+
+ conversion_settings.set_editor_property('flip_u', False)
+ conversion_settings.set_editor_property('flip_v', True)
+ conversion_settings.set_editor_property(
+ 'scale', unreal.Vector(x=100.0, y=100.0, z=100.0))
+ conversion_settings.set_editor_property(
+ 'rotation', unreal.Vector(x=-90.0, y=0.0, z=180.0))
+
+ options.static_mesh_settings = sm_settings
+ options.conversion_settings = conversion_settings
+ task.options = options
+
+ return task
+
def load(self, context, name, namespace, data):
"""
Load and containerise representation into Content Browser.
@@ -55,22 +88,8 @@ class StaticMeshAlembicLoader(api.Loader):
unreal.EditorAssetLibrary.make_directory(asset_dir)
- task = unreal.AssetImportTask()
+ task = self.get_task(self.fname, asset_dir, asset_name, False)
- task.set_editor_property('filename', self.fname)
- task.set_editor_property('destination_path', asset_dir)
- task.set_editor_property('destination_name', asset_name)
- task.set_editor_property('replace_existing', False)
- task.set_editor_property('automated', True)
- task.set_editor_property('save', True)
-
- # set import options here
- # Unreal 4.24 ignores the settings. It works with Unreal 4.26
- options = unreal.AbcImportSettings()
- options.set_editor_property(
- 'import_type', unreal.AlembicImportType.STATIC_MESH)
-
- task.options = options
unreal.AssetToolsHelpers.get_asset_tools().import_asset_tasks([task]) # noqa: E501
# Create Asset Container
@@ -106,25 +125,11 @@ class StaticMeshAlembicLoader(api.Loader):
source_path = api.get_representation_path(representation)
destination_path = container["namespace"]
- task = unreal.AssetImportTask()
+ task = self.get_task(source_path, destination_path, name, True)
- task.set_editor_property('filename', source_path)
- task.set_editor_property('destination_path', destination_path)
- # strip suffix
- task.set_editor_property('destination_name', name)
- task.set_editor_property('replace_existing', True)
- task.set_editor_property('automated', True)
- task.set_editor_property('save', True)
-
- # set import options here
- # Unreal 4.24 ignores the settings. It works with Unreal 4.26
- options = unreal.AbcImportSettings()
- options.set_editor_property(
- 'import_type', unreal.AlembicImportType.STATIC_MESH)
-
- task.options = options
# do import fbx and replace existing data
unreal.AssetToolsHelpers.get_asset_tools().import_asset_tasks([task])
+
container_path = "{}/{}".format(container["namespace"],
container["objectName"])
# update metadata
diff --git a/openpype/hosts/unreal/plugins/load/load_staticmeshfbx.py b/openpype/hosts/unreal/plugins/load/load_staticmeshfbx.py
index dcb566fa4c..d25f84ea69 100644
--- a/openpype/hosts/unreal/plugins/load/load_staticmeshfbx.py
+++ b/openpype/hosts/unreal/plugins/load/load_staticmeshfbx.py
@@ -15,6 +15,31 @@ class StaticMeshFBXLoader(api.Loader):
icon = "cube"
color = "orange"
+ def get_task(self, filename, asset_dir, asset_name, replace):
+ task = unreal.AssetImportTask()
+ options = unreal.FbxImportUI()
+ import_data = unreal.FbxStaticMeshImportData()
+
+ task.set_editor_property('filename', filename)
+ task.set_editor_property('destination_path', asset_dir)
+ task.set_editor_property('destination_name', asset_name)
+ task.set_editor_property('replace_existing', replace)
+ task.set_editor_property('automated', True)
+ task.set_editor_property('save', True)
+
+ # set import options here
+ options.set_editor_property(
+ 'automated_import_should_detect_type', False)
+ options.set_editor_property('import_animations', False)
+
+ import_data.set_editor_property('combine_meshes', True)
+ import_data.set_editor_property('remove_degenerates', False)
+
+ options.static_mesh_import_data = import_data
+ task.options = options
+
+ return task
+
def load(self, context, name, namespace, data):
"""
Load and containerise representation into Content Browser.
@@ -55,22 +80,8 @@ class StaticMeshFBXLoader(api.Loader):
unreal.EditorAssetLibrary.make_directory(asset_dir)
- task = unreal.AssetImportTask()
+ task = self.get_task(self.fname, asset_dir, asset_name, False)
- task.set_editor_property('filename', self.fname)
- task.set_editor_property('destination_path', asset_dir)
- task.set_editor_property('destination_name', asset_name)
- task.set_editor_property('replace_existing', False)
- task.set_editor_property('automated', True)
- task.set_editor_property('save', True)
-
- # set import options here
- options = unreal.FbxImportUI()
- options.set_editor_property(
- 'automated_import_should_detect_type', False)
- options.set_editor_property('import_animations', False)
-
- task.options = options
unreal.AssetToolsHelpers.get_asset_tools().import_asset_tasks([task]) # noqa: E501
# Create Asset Container
@@ -102,29 +113,15 @@ class StaticMeshFBXLoader(api.Loader):
return asset_content
def update(self, container, representation):
- name = container["name"]
+ name = container["asset_name"]
source_path = api.get_representation_path(representation)
destination_path = container["namespace"]
- task = unreal.AssetImportTask()
+ task = self.get_task(source_path, destination_path, name, True)
- task.set_editor_property('filename', source_path)
- task.set_editor_property('destination_path', destination_path)
- # strip suffix
- task.set_editor_property('destination_name', name)
- task.set_editor_property('replace_existing', True)
- task.set_editor_property('automated', True)
- task.set_editor_property('save', True)
-
- # set import options here
- options = unreal.FbxImportUI()
- options.set_editor_property(
- 'automated_import_should_detect_type', False)
- options.set_editor_property('import_animations', False)
-
- task.options = options
# do import fbx and replace existing data
unreal.AssetToolsHelpers.get_asset_tools().import_asset_tasks([task])
+
container_path = "{}/{}".format(container["namespace"],
container["objectName"])
# update metadata
diff --git a/openpype/hosts/unreal/plugins/publish/extract_layout.py b/openpype/hosts/unreal/plugins/publish/extract_layout.py
index 2d9f6eb3d1..a47187cf47 100644
--- a/openpype/hosts/unreal/plugins/publish/extract_layout.py
+++ b/openpype/hosts/unreal/plugins/publish/extract_layout.py
@@ -83,7 +83,7 @@ class ExtractLayout(openpype.api.Extractor):
"z": transform.translation.z
},
"rotation": {
- "x": math.radians(transform.rotation.euler().x + 90.0),
+ "x": math.radians(transform.rotation.euler().x),
"y": math.radians(transform.rotation.euler().y),
"z": math.radians(180.0 - transform.rotation.euler().z)
},
diff --git a/openpype/hosts/webpublisher/README.md b/openpype/hosts/webpublisher/README.md
new file mode 100644
index 0000000000..0826e44490
--- /dev/null
+++ b/openpype/hosts/webpublisher/README.md
@@ -0,0 +1,6 @@
+Webpublisher
+-------------
+
+Plugins meant for processing of Webpublisher.
+
+Gets triggered by calling openpype.cli.remotepublish with appropriate arguments.
\ No newline at end of file
diff --git a/openpype/modules/log_viewer/tray/__init__.py b/openpype/hosts/webpublisher/__init__.py
similarity index 100%
rename from openpype/modules/log_viewer/tray/__init__.py
rename to openpype/hosts/webpublisher/__init__.py
diff --git a/openpype/hosts/webpublisher/api/__init__.py b/openpype/hosts/webpublisher/api/__init__.py
new file mode 100644
index 0000000000..e40d46d662
--- /dev/null
+++ b/openpype/hosts/webpublisher/api/__init__.py
@@ -0,0 +1,43 @@
+import os
+import logging
+
+from avalon import api as avalon
+from avalon import io
+from pyblish import api as pyblish
+import openpype.hosts.webpublisher
+
+log = logging.getLogger("openpype.hosts.webpublisher")
+
+HOST_DIR = os.path.dirname(os.path.abspath(
+ openpype.hosts.webpublisher.__file__))
+PLUGINS_DIR = os.path.join(HOST_DIR, "plugins")
+PUBLISH_PATH = os.path.join(PLUGINS_DIR, "publish")
+LOAD_PATH = os.path.join(PLUGINS_DIR, "load")
+CREATE_PATH = os.path.join(PLUGINS_DIR, "create")
+
+
+def application_launch():
+ pass
+
+
+def install():
+ print("Installing Pype config...")
+
+ pyblish.register_plugin_path(PUBLISH_PATH)
+ avalon.register_plugin_path(avalon.Loader, LOAD_PATH)
+ avalon.register_plugin_path(avalon.Creator, CREATE_PATH)
+ log.info(PUBLISH_PATH)
+
+ io.install()
+ avalon.on("application.launched", application_launch)
+
+
+def uninstall():
+ pyblish.deregister_plugin_path(PUBLISH_PATH)
+ avalon.deregister_plugin_path(avalon.Loader, LOAD_PATH)
+ avalon.deregister_plugin_path(avalon.Creator, CREATE_PATH)
+
+
+# to have required methods for interface
+def ls():
+ pass
diff --git a/openpype/hosts/webpublisher/plugins/publish/collect_fps.py b/openpype/hosts/webpublisher/plugins/publish/collect_fps.py
new file mode 100644
index 0000000000..79fe53176a
--- /dev/null
+++ b/openpype/hosts/webpublisher/plugins/publish/collect_fps.py
@@ -0,0 +1,28 @@
+"""
+Requires:
+ Nothing
+
+Provides:
+ Instance
+"""
+
+import pyblish.api
+from pprint import pformat
+
+
+class CollectFPS(pyblish.api.InstancePlugin):
+ """
+ Adds fps from context to instance because of ExtractReview
+ """
+
+ label = "Collect fps"
+ order = pyblish.api.CollectorOrder + 0.49
+ hosts = ["webpublisher"]
+
+ def process(self, instance):
+ fps = instance.context.data["fps"]
+
+ instance.data.update({
+ "fps": fps
+ })
+ self.log.debug(f"instance.data: {pformat(instance.data)}")
diff --git a/openpype/hosts/webpublisher/plugins/publish/collect_published_files.py b/openpype/hosts/webpublisher/plugins/publish/collect_published_files.py
new file mode 100644
index 0000000000..6584120d97
--- /dev/null
+++ b/openpype/hosts/webpublisher/plugins/publish/collect_published_files.py
@@ -0,0 +1,267 @@
+"""Loads publishing context from json and continues in publish process.
+
+Requires:
+ anatomy -> context["anatomy"] *(pyblish.api.CollectorOrder - 0.11)
+
+Provides:
+ context, instances -> All data from previous publishing process.
+"""
+
+import os
+import json
+import clique
+
+import pyblish.api
+from avalon import io
+from openpype.lib import prepare_template_data
+
+
+class CollectPublishedFiles(pyblish.api.ContextPlugin):
+ """
+ This collector will try to find json files in provided
+ `OPENPYPE_PUBLISH_DATA`. Those files _MUST_ share same context.
+
+ """
+ # must be really early, context values are only in json file
+ order = pyblish.api.CollectorOrder - 0.490
+ label = "Collect rendered frames"
+ host = ["webpublisher"]
+
+ _context = None
+
+ # from Settings
+ task_type_to_family = {}
+
+ def _load_json(self, path):
+ path = path.strip('\"')
+ assert os.path.isfile(path), (
+ "Path to json file doesn't exist. \"{}\"".format(path)
+ )
+ data = None
+ with open(path, "r") as json_file:
+ try:
+ data = json.load(json_file)
+ except Exception as exc:
+ self.log.error(
+ "Error loading json: "
+ "{} - Exception: {}".format(path, exc)
+ )
+ return data
+
+ def _process_batch(self, dir_url):
+ task_subfolders = [
+ os.path.join(dir_url, o)
+ for o in os.listdir(dir_url)
+ if os.path.isdir(os.path.join(dir_url, o))]
+ self.log.info("task_sub:: {}".format(task_subfolders))
+ for task_dir in task_subfolders:
+ task_data = self._load_json(os.path.join(task_dir,
+ "manifest.json"))
+ self.log.info("task_data:: {}".format(task_data))
+ ctx = task_data["context"]
+ task_type = "default_task_type"
+ task_name = None
+
+ if ctx["type"] == "task":
+ items = ctx["path"].split('/')
+ asset = items[-2]
+ os.environ["AVALON_TASK"] = ctx["name"]
+ task_name = ctx["name"]
+ task_type = ctx["attributes"]["type"]
+ else:
+ asset = ctx["name"]
+ os.environ["AVALON_TASK"] = ""
+
+ is_sequence = len(task_data["files"]) > 1
+
+ _, extension = os.path.splitext(task_data["files"][0])
+ family, families, subset_template, tags = self._get_family(
+ self.task_type_to_family,
+ task_type,
+ is_sequence,
+ extension.replace(".", ''))
+
+ subset = self._get_subset_name(family, subset_template, task_name,
+ task_data["variant"])
+
+ os.environ["AVALON_ASSET"] = asset
+ io.Session["AVALON_ASSET"] = asset
+
+ instance = self._context.create_instance(subset)
+ instance.data["asset"] = asset
+ instance.data["subset"] = subset
+ instance.data["family"] = family
+ instance.data["families"] = families
+ instance.data["version"] = \
+ self._get_last_version(asset, subset) + 1
+ instance.data["stagingDir"] = task_dir
+ instance.data["source"] = "webpublisher"
+
+ # to store logging info into DB openpype.webpublishes
+ instance.data["ctx_path"] = ctx["path"]
+ instance.data["batch_id"] = task_data["batch"]
+
+ # to convert from email provided into Ftrack username
+ instance.data["user_email"] = task_data["user"]
+
+ if is_sequence:
+ instance.data["representations"] = self._process_sequence(
+ task_data["files"], task_dir, tags
+ )
+ instance.data["frameStart"] = \
+ instance.data["representations"][0]["frameStart"]
+ instance.data["frameEnd"] = \
+ instance.data["representations"][0]["frameEnd"]
+ else:
+ instance.data["representations"] = self._get_single_repre(
+ task_dir, task_data["files"], tags
+ )
+
+ self.log.info("instance.data:: {}".format(instance.data))
+
+ def _get_subset_name(self, family, subset_template, task_name, variant):
+ fill_pairs = {
+ "variant": variant,
+ "family": family,
+ "task": task_name
+ }
+ subset = subset_template.format(**prepare_template_data(fill_pairs))
+ return subset
+
+ def _get_single_repre(self, task_dir, files, tags):
+ _, ext = os.path.splitext(files[0])
+ repre_data = {
+ "name": ext[1:],
+ "ext": ext[1:],
+ "files": files[0],
+ "stagingDir": task_dir,
+ "tags": tags
+ }
+ self.log.info("single file repre_data.data:: {}".format(repre_data))
+ return [repre_data]
+
+ def _process_sequence(self, files, task_dir, tags):
+ """Prepare reprentations for sequence of files."""
+ collections, remainder = clique.assemble(files)
+ assert len(collections) == 1, \
+ "Too many collections in {}".format(files)
+
+ frame_start = list(collections[0].indexes)[0]
+ frame_end = list(collections[0].indexes)[-1]
+ ext = collections[0].tail
+ repre_data = {
+ "frameStart": frame_start,
+ "frameEnd": frame_end,
+ "name": ext[1:],
+ "ext": ext[1:],
+ "files": files,
+ "stagingDir": task_dir,
+ "tags": tags
+ }
+ self.log.info("sequences repre_data.data:: {}".format(repre_data))
+ return [repre_data]
+
+ def _get_family(self, settings, task_type, is_sequence, extension):
+ """Guess family based on input data.
+
+ Args:
+ settings (dict): configuration per task_type
+ task_type (str): Animation|Art etc
+ is_sequence (bool): single file or sequence
+ extension (str): without '.'
+
+ Returns:
+ (family, [families], subset_template_name, tags) tuple
+ AssertionError if not matching family found
+ """
+ task_obj = settings.get(task_type)
+ assert task_obj, "No family configuration for '{}'".format(task_type)
+
+ found_family = None
+ for family, content in task_obj.items():
+ if is_sequence != content["is_sequence"]:
+ continue
+ if extension in content["extensions"] or \
+ '' in content["extensions"]: # all extensions setting
+ found_family = family
+ break
+
+ msg = "No family found for combination of " +\
+ "task_type: {}, is_sequence:{}, extension: {}".format(
+ task_type, is_sequence, extension)
+ assert found_family, msg
+
+ return found_family, \
+ content["families"], \
+ content["subset_template_name"], \
+ content["tags"]
+
+ def _get_last_version(self, asset_name, subset_name):
+ """Returns version number or 0 for 'asset' and 'subset'"""
+ query = [
+ {
+ "$match": {"type": "asset", "name": asset_name}
+ },
+ {
+ "$lookup":
+ {
+ "from": os.environ["AVALON_PROJECT"],
+ "localField": "_id",
+ "foreignField": "parent",
+ "as": "subsets"
+ }
+ },
+ {
+ "$unwind": "$subsets"
+ },
+ {
+ "$match": {"subsets.type": "subset",
+ "subsets.name": subset_name}},
+ {
+ "$lookup":
+ {
+ "from": os.environ["AVALON_PROJECT"],
+ "localField": "subsets._id",
+ "foreignField": "parent",
+ "as": "versions"
+ }
+ },
+ {
+ "$unwind": "$versions"
+ },
+ {
+ "$group": {
+ "_id": {
+ "asset_name": "$name",
+ "subset_name": "$subsets.name"
+ },
+ 'version': {'$max': "$versions.name"}
+ }
+ }
+ ]
+ version = list(io.aggregate(query))
+
+ if version:
+ return version[0].get("version") or 0
+ else:
+ return 0
+
+ def process(self, context):
+ self._context = context
+
+ batch_dir = os.environ.get("OPENPYPE_PUBLISH_DATA")
+
+ assert batch_dir, (
+ "Missing `OPENPYPE_PUBLISH_DATA`")
+
+ assert batch_dir, \
+ "Folder {} doesn't exist".format(batch_dir)
+
+ project_name = os.environ.get("AVALON_PROJECT")
+ if project_name is None:
+ raise AssertionError(
+ "Environment `AVALON_PROJECT` was not found."
+ "Could not set project `root` which may cause issues."
+ )
+
+ self._process_batch(batch_dir)
diff --git a/openpype/hosts/webpublisher/plugins/publish/integrate_context_to_log.py b/openpype/hosts/webpublisher/plugins/publish/integrate_context_to_log.py
new file mode 100644
index 0000000000..419c065e16
--- /dev/null
+++ b/openpype/hosts/webpublisher/plugins/publish/integrate_context_to_log.py
@@ -0,0 +1,38 @@
+import os
+
+import pyblish.api
+from openpype.lib import OpenPypeMongoConnection
+
+
+class IntegrateContextToLog(pyblish.api.ContextPlugin):
+ """ Adds context information to log document for displaying in front end"""
+
+ label = "Integrate Context to Log"
+ order = pyblish.api.IntegratorOrder - 0.1
+ hosts = ["webpublisher"]
+
+ def process(self, context):
+ self.log.info("Integrate Context to Log")
+
+ mongo_client = OpenPypeMongoConnection.get_mongo_client()
+ database_name = os.environ["OPENPYPE_DATABASE_NAME"]
+ dbcon = mongo_client[database_name]["webpublishes"]
+
+ for instance in context:
+ self.log.info("ctx_path: {}".format(instance.data.get("ctx_path")))
+ self.log.info("batch_id: {}".format(instance.data.get("batch_id")))
+ if instance.data.get("ctx_path") and instance.data.get("batch_id"):
+ self.log.info("Updating log record")
+ dbcon.update_one(
+ {
+ "batch_id": instance.data.get("batch_id"),
+ "status": "in_progress"
+ },
+ {"$set":
+ {
+ "path": instance.data.get("ctx_path")
+
+ }}
+ )
+
+ return
diff --git a/openpype/hosts/webpublisher/webserver_service/webpublish_routes.py b/openpype/hosts/webpublisher/webserver_service/webpublish_routes.py
new file mode 100644
index 0000000000..0014d1b344
--- /dev/null
+++ b/openpype/hosts/webpublisher/webserver_service/webpublish_routes.py
@@ -0,0 +1,247 @@
+"""Routes and etc. for webpublisher API."""
+import os
+import json
+import datetime
+from bson.objectid import ObjectId
+import collections
+from aiohttp.web_response import Response
+import subprocess
+
+from avalon.api import AvalonMongoDB
+
+from openpype.lib import OpenPypeMongoConnection
+from openpype_modules.avalon_apps.rest_api import _RestApiEndpoint
+
+from openpype.lib import PypeLogger
+
+log = PypeLogger.get_logger("WebServer")
+
+
+class RestApiResource:
+ """Resource carrying needed info and Avalon DB connection for publish."""
+ def __init__(self, server_manager, executable, upload_dir):
+ self.server_manager = server_manager
+ self.upload_dir = upload_dir
+ self.executable = executable
+
+ self.dbcon = AvalonMongoDB()
+ self.dbcon.install()
+
+ @staticmethod
+ def json_dump_handler(value):
+ if isinstance(value, datetime.datetime):
+ return value.isoformat()
+ if isinstance(value, ObjectId):
+ return str(value)
+ raise TypeError(value)
+
+ @classmethod
+ def encode(cls, data):
+ return json.dumps(
+ data,
+ indent=4,
+ default=cls.json_dump_handler
+ ).encode("utf-8")
+
+
+class OpenPypeRestApiResource(RestApiResource):
+ """Resource carrying OP DB connection for storing batch info into DB."""
+ def __init__(self, ):
+ mongo_client = OpenPypeMongoConnection.get_mongo_client()
+ database_name = os.environ["OPENPYPE_DATABASE_NAME"]
+ self.dbcon = mongo_client[database_name]["webpublishes"]
+
+
+class WebpublisherProjectsEndpoint(_RestApiEndpoint):
+ """Returns list of dict with project info (id, name)."""
+ async def get(self) -> Response:
+ output = []
+ for project_name in self.dbcon.database.collection_names():
+ project_doc = self.dbcon.database[project_name].find_one({
+ "type": "project"
+ })
+ if project_doc:
+ ret_val = {
+ "id": project_doc["_id"],
+ "name": project_doc["name"]
+ }
+ output.append(ret_val)
+ return Response(
+ status=200,
+ body=self.resource.encode(output),
+ content_type="application/json"
+ )
+
+
+class WebpublisherHiearchyEndpoint(_RestApiEndpoint):
+ """Returns dictionary with context tree from assets."""
+ async def get(self, project_name) -> Response:
+ query_projection = {
+ "_id": 1,
+ "data.tasks": 1,
+ "data.visualParent": 1,
+ "data.entityType": 1,
+ "name": 1,
+ "type": 1,
+ }
+
+ asset_docs = self.dbcon.database[project_name].find(
+ {"type": "asset"},
+ query_projection
+ )
+ asset_docs_by_id = {
+ asset_doc["_id"]: asset_doc
+ for asset_doc in asset_docs
+ }
+
+ asset_docs_by_parent_id = collections.defaultdict(list)
+ for asset_doc in asset_docs_by_id.values():
+ parent_id = asset_doc["data"].get("visualParent")
+ asset_docs_by_parent_id[parent_id].append(asset_doc)
+
+ assets = collections.defaultdict(list)
+
+ for parent_id, children in asset_docs_by_parent_id.items():
+ for child in children:
+ node = assets.get(child["_id"])
+ if not node:
+ node = Node(child["_id"],
+ child["data"].get("entityType", "Folder"),
+ child["name"])
+ assets[child["_id"]] = node
+
+ tasks = child["data"].get("tasks", {})
+ for t_name, t_con in tasks.items():
+ task_node = TaskNode("task", t_name)
+ task_node["attributes"]["type"] = t_con.get("type")
+
+ task_node.parent = node
+
+ parent_node = assets.get(parent_id)
+ if not parent_node:
+ asset_doc = asset_docs_by_id.get(parent_id)
+ if asset_doc: # regular node
+ parent_node = Node(parent_id,
+ asset_doc["data"].get("entityType",
+ "Folder"),
+ asset_doc["name"])
+ else: # root
+ parent_node = Node(parent_id,
+ "project",
+ project_name)
+ assets[parent_id] = parent_node
+ node.parent = parent_node
+
+ roots = [x for x in assets.values() if x.parent is None]
+
+ return Response(
+ status=200,
+ body=self.resource.encode(roots[0]),
+ content_type="application/json"
+ )
+
+
+class Node(dict):
+ """Node element in context tree."""
+
+ def __init__(self, uid, node_type, name):
+ self._parent = None # pointer to parent Node
+ self["type"] = node_type
+ self["name"] = name
+ self['id'] = uid # keep reference to id #
+ self['children'] = [] # collection of pointers to child Nodes
+
+ @property
+ def parent(self):
+ return self._parent # simply return the object at the _parent pointer
+
+ @parent.setter
+ def parent(self, node):
+ self._parent = node
+ # add this node to parent's list of children
+ node['children'].append(self)
+
+
+class TaskNode(Node):
+ """Special node type only for Tasks."""
+
+ def __init__(self, node_type, name):
+ self._parent = None
+ self["type"] = node_type
+ self["name"] = name
+ self["attributes"] = {}
+
+
+class WebpublisherBatchPublishEndpoint(_RestApiEndpoint):
+ """Triggers headless publishing of batch."""
+ async def post(self, request) -> Response:
+ output = {}
+ log.info("WebpublisherBatchPublishEndpoint called")
+ content = await request.json()
+
+ batch_path = os.path.join(self.resource.upload_dir,
+ content["batch"])
+
+ openpype_app = self.resource.executable
+ args = [
+ openpype_app,
+ 'remotepublish',
+ batch_path
+ ]
+
+ if not openpype_app or not os.path.exists(openpype_app):
+ msg = "Non existent OpenPype executable {}".format(openpype_app)
+ raise RuntimeError(msg)
+
+ add_args = {
+ "host": "webpublisher",
+ "project": content["project_name"],
+ "user": content["user"]
+ }
+
+ for key, value in add_args.items():
+ args.append("--{}".format(key))
+ args.append(value)
+
+ log.info("args:: {}".format(args))
+
+ subprocess.call(args)
+ return Response(
+ status=200,
+ body=self.resource.encode(output),
+ content_type="application/json"
+ )
+
+
+class WebpublisherTaskPublishEndpoint(_RestApiEndpoint):
+ """Prepared endpoint triggered after each task - for future development."""
+ async def post(self, request) -> Response:
+ return Response(
+ status=200,
+ body=self.resource.encode([]),
+ content_type="application/json"
+ )
+
+
+class BatchStatusEndpoint(_RestApiEndpoint):
+ """Returns dict with info for batch_id."""
+ async def get(self, batch_id) -> Response:
+ output = self.dbcon.find_one({"batch_id": batch_id})
+
+ return Response(
+ status=200,
+ body=self.resource.encode(output),
+ content_type="application/json"
+ )
+
+
+class PublishesStatusEndpoint(_RestApiEndpoint):
+ """Returns list of dict with batch info for user (email address)."""
+ async def get(self, user) -> Response:
+ output = list(self.dbcon.find({"user": user}))
+
+ return Response(
+ status=200,
+ body=self.resource.encode(output),
+ content_type="application/json"
+ )
diff --git a/openpype/hosts/webpublisher/webserver_service/webserver_cli.py b/openpype/hosts/webpublisher/webserver_service/webserver_cli.py
new file mode 100644
index 0000000000..d00d269059
--- /dev/null
+++ b/openpype/hosts/webpublisher/webserver_service/webserver_cli.py
@@ -0,0 +1,141 @@
+import time
+import os
+from datetime import datetime
+import requests
+import json
+
+from openpype.lib import PypeLogger
+
+from .webpublish_routes import (
+ RestApiResource,
+ OpenPypeRestApiResource,
+ WebpublisherBatchPublishEndpoint,
+ WebpublisherTaskPublishEndpoint,
+ WebpublisherHiearchyEndpoint,
+ WebpublisherProjectsEndpoint,
+ BatchStatusEndpoint,
+ PublishesStatusEndpoint
+)
+
+
+log = PypeLogger().get_logger("webserver_gui")
+
+
+def run_webserver(*args, **kwargs):
+ """Runs webserver in command line, adds routes."""
+ from openpype.modules import ModulesManager
+
+ manager = ModulesManager()
+ webserver_module = manager.modules_by_name["webserver"]
+ host = kwargs.get("host") or "localhost"
+ port = kwargs.get("port") or 8079
+ server_manager = webserver_module.create_new_server_manager(port, host)
+ webserver_url = server_manager.url
+
+ resource = RestApiResource(server_manager,
+ upload_dir=kwargs["upload_dir"],
+ executable=kwargs["executable"])
+ projects_endpoint = WebpublisherProjectsEndpoint(resource)
+ server_manager.add_route(
+ "GET",
+ "/api/projects",
+ projects_endpoint.dispatch
+ )
+
+ hiearchy_endpoint = WebpublisherHiearchyEndpoint(resource)
+ server_manager.add_route(
+ "GET",
+ "/api/hierarchy/{project_name}",
+ hiearchy_endpoint.dispatch
+ )
+
+ # triggers publish
+ webpublisher_task_publish_endpoint = \
+ WebpublisherBatchPublishEndpoint(resource)
+ server_manager.add_route(
+ "POST",
+ "/api/webpublish/batch",
+ webpublisher_task_publish_endpoint.dispatch
+ )
+
+ webpublisher_batch_publish_endpoint = \
+ WebpublisherTaskPublishEndpoint(resource)
+ server_manager.add_route(
+ "POST",
+ "/api/webpublish/task",
+ webpublisher_batch_publish_endpoint.dispatch
+ )
+
+ # reporting
+ openpype_resource = OpenPypeRestApiResource()
+ batch_status_endpoint = BatchStatusEndpoint(openpype_resource)
+ server_manager.add_route(
+ "GET",
+ "/api/batch_status/{batch_id}",
+ batch_status_endpoint.dispatch
+ )
+
+ user_status_endpoint = PublishesStatusEndpoint(openpype_resource)
+ server_manager.add_route(
+ "GET",
+ "/api/publishes/{user}",
+ user_status_endpoint.dispatch
+ )
+
+ server_manager.start_server()
+ last_reprocessed = time.time()
+ while True:
+ if time.time() - last_reprocessed > 20:
+ reprocess_failed(kwargs["upload_dir"], webserver_url)
+ last_reprocessed = time.time()
+ time.sleep(1.0)
+
+
+def reprocess_failed(upload_dir, webserver_url):
+ # log.info("check_reprocesable_records")
+ from openpype.lib import OpenPypeMongoConnection
+
+ mongo_client = OpenPypeMongoConnection.get_mongo_client()
+ database_name = os.environ["OPENPYPE_DATABASE_NAME"]
+ dbcon = mongo_client[database_name]["webpublishes"]
+
+ results = dbcon.find({"status": "reprocess"})
+ for batch in results:
+ batch_url = os.path.join(upload_dir,
+ batch["batch_id"],
+ "manifest.json")
+ log.info("batch:: {} {}".format(os.path.exists(batch_url), batch_url))
+ if not os.path.exists(batch_url):
+ msg = "Manifest {} not found".format(batch_url)
+ print(msg)
+ dbcon.update_one(
+ {"_id": batch["_id"]},
+ {"$set":
+ {
+ "finish_date": datetime.now(),
+ "status": "error",
+ "progress": 1,
+ "log": batch.get("log") + msg
+ }}
+ )
+ continue
+ server_url = "{}/api/webpublish/batch".format(webserver_url)
+
+ with open(batch_url) as f:
+ data = json.loads(f.read())
+
+ try:
+ r = requests.post(server_url, json=data)
+ log.info("response{}".format(r))
+ except Exception:
+ log.info("exception", exc_info=True)
+
+ dbcon.update_one(
+ {"_id": batch["_id"]},
+ {"$set":
+ {
+ "finish_date": datetime.now(),
+ "status": "sent_for_reprocessing",
+ "progress": 1
+ }}
+ )
diff --git a/openpype/lib/__init__.py b/openpype/lib/__init__.py
index 12c04a4236..3d392dc745 100644
--- a/openpype/lib/__init__.py
+++ b/openpype/lib/__init__.py
@@ -52,9 +52,11 @@ from .vendor_bin_utils import (
)
from .python_module_tools import (
+ import_filepath,
modules_from_path,
recursive_bases_from_class,
- classes_from_module
+ classes_from_module,
+ import_module_from_dirpath
)
from .avalon_context import (
@@ -69,6 +71,8 @@ from .avalon_context import (
get_linked_assets,
get_latest_version,
+ get_workfile_template_key,
+ get_workfile_template_key_from_context,
get_workdir_data,
get_workdir,
get_workdir_with_workdir_data,
@@ -170,9 +174,11 @@ __all__ = [
"get_ffmpeg_tool_path",
"ffprobe_streams",
+ "import_filepath",
"modules_from_path",
"recursive_bases_from_class",
"classes_from_module",
+ "import_module_from_dirpath",
"CURRENT_DOC_SCHEMAS",
"PROJECT_NAME_ALLOWED_SYMBOLS",
@@ -185,6 +191,8 @@ __all__ = [
"get_linked_assets",
"get_latest_version",
+ "get_workfile_template_key",
+ "get_workfile_template_key_from_context",
"get_workdir_data",
"get_workdir",
"get_workdir_with_workdir_data",
diff --git a/openpype/lib/abstract_submit_deadline.py b/openpype/lib/abstract_submit_deadline.py
index 4a052a4ee2..5b6e1743e0 100644
--- a/openpype/lib/abstract_submit_deadline.py
+++ b/openpype/lib/abstract_submit_deadline.py
@@ -415,13 +415,11 @@ class AbstractSubmitDeadline(pyblish.api.InstancePlugin):
"""Plugin entry point."""
self._instance = instance
context = instance.context
- self._deadline_url = (
- context.data["system_settings"]
- ["modules"]
- ["deadline"]
- ["DEADLINE_REST_URL"]
- )
- assert self._deadline_url, "Requires DEADLINE_REST_URL"
+ self._deadline_url = context.data.get("defaultDeadline")
+ self._deadline_url = instance.data.get(
+ "deadlineUrl", self._deadline_url)
+
+ assert self._deadline_url, "Requires Deadline Webservice URL"
file_path = None
if self.use_published:
diff --git a/openpype/lib/applications.py b/openpype/lib/applications.py
index fe964d3bab..fbf991a32e 100644
--- a/openpype/lib/applications.py
+++ b/openpype/lib/applications.py
@@ -28,7 +28,8 @@ from . import (
from .local_settings import get_openpype_username
from .avalon_context import (
get_workdir_data,
- get_workdir_with_workdir_data
+ get_workdir_with_workdir_data,
+ get_workfile_template_key_from_context
)
from .python_module_tools import (
@@ -1105,7 +1106,7 @@ def prepare_host_environments(data, implementation_envs=True):
asset_doc = data.get("asset_doc")
# Add tools environments
groups_by_name = {}
- tool_by_group_name = collections.defaultdict(list)
+ tool_by_group_name = collections.defaultdict(dict)
if asset_doc:
# Make sure each tool group can be added only once
for key in asset_doc["data"].get("tools_env") or []:
@@ -1113,12 +1114,14 @@ def prepare_host_environments(data, implementation_envs=True):
if not tool:
continue
groups_by_name[tool.group.name] = tool.group
- tool_by_group_name[tool.group.name].append(tool)
+ tool_by_group_name[tool.group.name][tool.name] = tool
- for group_name, group in groups_by_name.items():
+ for group_name in sorted(groups_by_name.keys()):
+ group = groups_by_name[group_name]
environments.append(group.environment)
added_env_keys.add(group_name)
- for tool in tool_by_group_name[group_name]:
+ for tool_name in sorted(tool_by_group_name[group_name].keys()):
+ tool = tool_by_group_name[group_name][tool_name]
environments.append(tool.environment)
added_env_keys.add(tool.name)
@@ -1234,8 +1237,18 @@ def prepare_context_environments(data):
anatomy = data["anatomy"]
+ template_key = get_workfile_template_key_from_context(
+ asset_doc["name"],
+ task_name,
+ app.host_name,
+ project_name=project_name,
+ dbcon=data["dbcon"]
+ )
+
try:
- workdir = get_workdir_with_workdir_data(workdir_data, anatomy)
+ workdir = get_workdir_with_workdir_data(
+ workdir_data, anatomy, template_key=template_key
+ )
except Exception as exc:
raise ApplicationLaunchFailed(
diff --git a/openpype/lib/avalon_context.py b/openpype/lib/avalon_context.py
index c4217cc6d5..497348af33 100644
--- a/openpype/lib/avalon_context.py
+++ b/openpype/lib/avalon_context.py
@@ -344,6 +344,127 @@ def get_latest_version(asset_name, subset_name, dbcon=None, project_name=None):
return version_doc
+def get_workfile_template_key_from_context(
+ asset_name, task_name, host_name, project_name=None,
+ dbcon=None, project_settings=None
+):
+ """Helper function to get template key for workfile template.
+
+ Do the same as `get_workfile_template_key` but returns value for "session
+ context".
+
+ It is required to pass one of 'dbcon' with already set project name or
+ 'project_name' arguments.
+
+ Args:
+ asset_name(str): Name of asset document.
+ task_name(str): Task name for which is template key retrieved.
+ Must be available on asset document under `data.tasks`.
+ host_name(str): Name of host implementation for which is workfile
+ used.
+ project_name(str): Project name where asset and task is. Not required
+ when 'dbcon' is passed.
+ dbcon(AvalonMongoDB): Connection to mongo with already set project
+ under `AVALON_PROJECT`. Not required when 'project_name' is passed.
+ project_settings(dict): Project settings for passed 'project_name'.
+ Not required at all but makes function faster.
+ Raises:
+ ValueError: When both 'dbcon' and 'project_name' were not
+ passed.
+ """
+ if not dbcon:
+ if not project_name:
+ raise ValueError((
+ "`get_workfile_template_key_from_context` requires to pass"
+ " one of 'dbcon' or 'project_name' arguments."
+ ))
+ from avalon.api import AvalonMongoDB
+
+ dbcon = AvalonMongoDB()
+ dbcon.Session["AVALON_PROJECT"] = project_name
+
+ elif not project_name:
+ project_name = dbcon.Session["AVALON_PROJECT"]
+
+ asset_doc = dbcon.find_one(
+ {
+ "type": "asset",
+ "name": asset_name
+ },
+ {
+ "data.tasks": 1
+ }
+ )
+ asset_tasks = asset_doc.get("data", {}).get("tasks") or {}
+ task_info = asset_tasks.get(task_name) or {}
+ task_type = task_info.get("type")
+
+ return get_workfile_template_key(
+ task_type, host_name, project_name, project_settings
+ )
+
+
+def get_workfile_template_key(
+ task_type, host_name, project_name=None, project_settings=None
+):
+ """Workfile template key which should be used to get workfile template.
+
+ Function is using profiles from project settings to return right template
+ for passet task type and host name.
+
+ One of 'project_name' or 'project_settings' must be passed it is preffered
+ to pass settings if are already available.
+
+ Args:
+ task_type(str): Name of task type.
+ host_name(str): Name of host implementation (e.g. "maya", "nuke", ...)
+ project_name(str): Name of project in which context should look for
+ settings. Not required if `project_settings` are passed.
+ project_settings(dict): Prepare project settings for project name.
+ Not needed if `project_name` is passed.
+
+ Raises:
+ ValueError: When both 'project_name' and 'project_settings' were not
+ passed.
+ """
+ default = "work"
+ if not task_type or not host_name:
+ return default
+
+ if not project_settings:
+ if not project_name:
+ raise ValueError((
+ "`get_workfile_template_key` requires to pass"
+ " one of 'project_name' or 'project_settings' arguments."
+ ))
+ project_settings = get_project_settings(project_name)
+
+ try:
+ profiles = (
+ project_settings
+ ["global"]
+ ["tools"]
+ ["Workfiles"]
+ ["workfile_template_profiles"]
+ )
+ except Exception:
+ profiles = []
+
+ if not profiles:
+ return default
+
+ from .profiles_filtering import filter_profiles
+
+ profile_filter = {
+ "task_types": task_type,
+ "hosts": host_name
+ }
+ profile = filter_profiles(profiles, profile_filter)
+ if profile:
+ return profile["workfile_template"] or default
+ return default
+
+
def get_workdir_data(project_doc, asset_doc, task_name, host_name):
"""Prepare data for workdir template filling from entered information.
@@ -373,7 +494,8 @@ def get_workdir_data(project_doc, asset_doc, task_name, host_name):
def get_workdir_with_workdir_data(
- workdir_data, anatomy=None, project_name=None, template_key=None
+ workdir_data, anatomy=None, project_name=None,
+ template_key=None, dbcon=None
):
"""Fill workdir path from entered data and project's anatomy.
@@ -387,8 +509,10 @@ def get_workdir_with_workdir_data(
`project_name` is entered.
project_name (str): Project's name. Optional if `anatomy` is entered
otherwise Anatomy object is created with using the project name.
- template_key (str): Key of work templates in anatomy templates. By
- default is seto to `"work"`.
+ template_key (str): Key of work templates in anatomy templates. If not
+ passed `get_workfile_template_key_from_context` is used to get it.
+ dbcon(AvalonMongoDB): Mongo connection. Required only if 'template_key'
+ and 'project_name' are not passed.
Returns:
TemplateResult: Workdir path.
@@ -406,7 +530,13 @@ def get_workdir_with_workdir_data(
anatomy = Anatomy(project_name)
if not template_key:
- template_key = "work"
+ template_key = get_workfile_template_key_from_context(
+ workdir_data["asset"],
+ workdir_data["task"],
+ workdir_data["app"],
+ project_name=workdir_data["project"]["name"],
+ dbcon=dbcon
+ )
anatomy_filled = anatomy.format(workdir_data)
# Output is TemplateResult object which contain usefull data
@@ -447,7 +577,9 @@ def get_workdir(
project_doc, asset_doc, task_name, host_name
)
# Output is TemplateResult object which contain usefull data
- return get_workdir_with_workdir_data(workdir_data, anatomy, template_key)
+ return get_workdir_with_workdir_data(
+ workdir_data, anatomy, template_key=template_key
+ )
@with_avalon
@@ -516,7 +648,9 @@ def create_workfile_doc(asset_doc, task_name, filename, workdir, dbcon=None):
# Prepare anatomy
anatomy = Anatomy(project_doc["name"])
# Get workdir path (result is anatomy.TemplateResult)
- template_workdir = get_workdir_with_workdir_data(workdir_data, anatomy)
+ template_workdir = get_workdir_with_workdir_data(
+ workdir_data, anatomy, dbcon=dbcon
+ )
template_workdir_path = str(template_workdir).replace("\\", "/")
# Replace slashses in workdir path where workfile is located
diff --git a/openpype/lib/log.py b/openpype/lib/log.py
index 39b6c67080..85cbc733ba 100644
--- a/openpype/lib/log.py
+++ b/openpype/lib/log.py
@@ -72,6 +72,8 @@ class PypeStreamHandler(logging.StreamHandler):
msg = self.format(record)
msg = Terminal.log(msg)
stream = self.stream
+ if stream is None:
+ return
fs = "%s\n"
# if no unicode support...
if not USE_UNICODE:
diff --git a/openpype/lib/python_module_tools.py b/openpype/lib/python_module_tools.py
index 44a1007889..cb5f285ddd 100644
--- a/openpype/lib/python_module_tools.py
+++ b/openpype/lib/python_module_tools.py
@@ -9,6 +9,38 @@ log = logging.getLogger(__name__)
PY3 = sys.version_info[0] == 3
+def import_filepath(filepath, module_name=None):
+ """Import python file as python module.
+
+ Python 2 and Python 3 compatibility.
+
+ Args:
+ filepath(str): Path to python file.
+ module_name(str): Name of loaded module. Only for Python 3. By default
+ is filled with filename of filepath.
+ """
+ if module_name is None:
+ module_name = os.path.splitext(os.path.basename(filepath))[0]
+
+ # Prepare module object where content of file will be parsed
+ module = types.ModuleType(module_name)
+
+ if PY3:
+ # Use loader so module has full specs
+ module_loader = importlib.machinery.SourceFileLoader(
+ module_name, filepath
+ )
+ module_loader.exec_module(module)
+ else:
+ # Execute module code and store content to module
+ with open(filepath) as _stream:
+ # Execute content and store it to module object
+ exec(_stream.read(), module.__dict__)
+
+ module.__file__ = filepath
+ return module
+
+
def modules_from_path(folder_path):
"""Get python scripts as modules from a path.
@@ -55,23 +87,7 @@ def modules_from_path(folder_path):
continue
try:
- # Prepare module object where content of file will be parsed
- module = types.ModuleType(mod_name)
-
- if PY3:
- # Use loader so module has full specs
- module_loader = importlib.machinery.SourceFileLoader(
- mod_name, full_path
- )
- module_loader.exec_module(module)
- else:
- # Execute module code and store content to module
- with open(full_path) as _stream:
- # Execute content and store it to module object
- exec(_stream.read(), module.__dict__)
-
- module.__file__ = full_path
-
+ module = import_filepath(full_path, mod_name)
modules.append((full_path, module))
except Exception:
@@ -127,3 +143,96 @@ def classes_from_module(superclass, module):
classes.append(obj)
return classes
+
+
+def _import_module_from_dirpath_py2(dirpath, module_name, dst_module_name):
+ """Import passed dirpath as python module using `imp`."""
+ if dst_module_name:
+ full_module_name = "{}.{}".format(dst_module_name, module_name)
+ dst_module = sys.modules[dst_module_name]
+ else:
+ full_module_name = module_name
+ dst_module = None
+
+ if full_module_name in sys.modules:
+ return sys.modules[full_module_name]
+
+ import imp
+
+ fp, pathname, description = imp.find_module(module_name, [dirpath])
+ module = imp.load_module(full_module_name, fp, pathname, description)
+ if dst_module is not None:
+ setattr(dst_module, module_name, module)
+
+ return module
+
+
+def _import_module_from_dirpath_py3(dirpath, module_name, dst_module_name):
+ """Import passed dirpath as python module using Python 3 modules."""
+ if dst_module_name:
+ full_module_name = "{}.{}".format(dst_module_name, module_name)
+ dst_module = sys.modules[dst_module_name]
+ else:
+ full_module_name = module_name
+ dst_module = None
+
+ # Skip import if is already imported
+ if full_module_name in sys.modules:
+ return sys.modules[full_module_name]
+
+ import importlib.util
+ from importlib._bootstrap_external import PathFinder
+
+ # Find loader for passed path and name
+ loader = PathFinder.find_module(full_module_name, [dirpath])
+
+ # Load specs of module
+ spec = importlib.util.spec_from_loader(
+ full_module_name, loader, origin=dirpath
+ )
+
+ # Create module based on specs
+ module = importlib.util.module_from_spec(spec)
+
+ # Store module to destination module and `sys.modules`
+ # WARNING this mus be done before module execution
+ if dst_module is not None:
+ setattr(dst_module, module_name, module)
+
+ sys.modules[full_module_name] = module
+
+ # Execute module import
+ loader.exec_module(module)
+
+ return module
+
+
+def import_module_from_dirpath(dirpath, folder_name, dst_module_name=None):
+ """Import passed directory as a python module.
+
+ Python 2 and 3 compatible.
+
+ Imported module can be assigned as a child attribute of already loaded
+ module from `sys.modules` if has support of `setattr`. That is not default
+ behavior of python modules so parent module must be a custom module with
+ that ability.
+
+ It is not possible to reimport already cached module. If you need to
+ reimport module you have to remove it from caches manually.
+
+ Args:
+ dirpath(str): Parent directory path of loaded folder.
+ folder_name(str): Folder name which should be imported inside passed
+ directory.
+ dst_module_name(str): Parent module name under which can be loaded
+ module added.
+ """
+ if PY3:
+ module = _import_module_from_dirpath_py3(
+ dirpath, folder_name, dst_module_name
+ )
+ else:
+ module = _import_module_from_dirpath_py2(
+ dirpath, folder_name, dst_module_name
+ )
+ return module
diff --git a/openpype/lib/usdlib.py b/openpype/lib/usdlib.py
new file mode 100644
index 0000000000..3ae7430c7b
--- /dev/null
+++ b/openpype/lib/usdlib.py
@@ -0,0 +1,350 @@
+import os
+import re
+import logging
+
+try:
+ from pxr import Usd, UsdGeom, Sdf, Kind
+except ImportError:
+ # Allow to fall back on Multiverse 6.3.0+ pxr usd library
+ from mvpxr import Usd, UsdGeom, Sdf, Kind
+
+from avalon import io, api
+
+log = logging.getLogger(__name__)
+
+
+# The predefined steps order used for bootstrapping USD Shots and Assets.
+# These are ordered in order from strongest to weakest opinions, like in USD.
+PIPELINE = {
+ "shot": [
+ "usdLighting",
+ "usdFx",
+ "usdSimulation",
+ "usdAnimation",
+ "usdLayout",
+ ],
+ "asset": ["usdShade", "usdModel"],
+}
+
+
+def create_asset(
+ filepath, asset_name, reference_layers, kind=Kind.Tokens.component
+):
+ """
+ Creates an asset file that consists of a top level layer and sublayers for
+ shading and geometry.
+
+ Args:
+ filepath (str): Filepath where the asset.usd file will be saved.
+ reference_layers (list): USD Files to reference in the asset.
+ Note that the bottom layer (first file, like a model) would
+ be last in the list. The strongest layer will be the first
+ index.
+ asset_name (str): The name for the Asset identifier and default prim.
+ kind (pxr.Kind): A USD Kind for the root asset.
+
+ """
+ # Also see create_asset.py in PixarAnimationStudios/USD endToEnd example
+
+ log.info("Creating asset at %s", filepath)
+
+ # Make the layer ascii - good for readability, plus the file is small
+ root_layer = Sdf.Layer.CreateNew(filepath, args={"format": "usda"})
+ stage = Usd.Stage.Open(root_layer)
+
+ # Define a prim for the asset and make it the default for the stage.
+ asset_prim = UsdGeom.Xform.Define(stage, "/%s" % asset_name).GetPrim()
+ stage.SetDefaultPrim(asset_prim)
+
+ # Let viewing applications know how to orient a free camera properly
+ UsdGeom.SetStageUpAxis(stage, UsdGeom.Tokens.y)
+
+ # Usually we will "loft up" the kind authored into the exported geometry
+ # layer rather than re-stamping here; we'll leave that for a later
+ # tutorial, and just be explicit here.
+ model = Usd.ModelAPI(asset_prim)
+ if kind:
+ model.SetKind(kind)
+
+ model.SetAssetName(asset_name)
+ model.SetAssetIdentifier("%s/%s.usd" % (asset_name, asset_name))
+
+ # Add references to the asset prim
+ references = asset_prim.GetReferences()
+ for reference_filepath in reference_layers:
+ references.AddReference(reference_filepath)
+
+ stage.GetRootLayer().Save()
+
+
+def create_shot(filepath, layers, create_layers=False):
+ """Create a shot with separate layers for departments.
+
+ Args:
+ filepath (str): Filepath where the asset.usd file will be saved.
+ layers (str): When provided this will be added verbatim in the
+ subLayerPaths layers. When the provided layer paths do not exist
+ they are generated using Sdf.Layer.CreateNew
+ create_layers (bool): Whether to create the stub layers on disk if
+ they do not exist yet.
+
+ Returns:
+ str: The saved shot file path
+
+ """
+ # Also see create_shot.py in PixarAnimationStudios/USD endToEnd example
+
+ stage = Usd.Stage.CreateNew(filepath)
+ log.info("Creating shot at %s" % filepath)
+
+ for layer_path in layers:
+ if create_layers and not os.path.exists(layer_path):
+ # We use the Sdf API here to quickly create layers. Also, we're
+ # using it as a way to author the subLayerPaths as there is no
+ # way to do that directly in the Usd API.
+ layer_folder = os.path.dirname(layer_path)
+ if not os.path.exists(layer_folder):
+ os.makedirs(layer_folder)
+
+ Sdf.Layer.CreateNew(layer_path)
+
+ stage.GetRootLayer().subLayerPaths.append(layer_path)
+
+ # Lets viewing applications know how to orient a free camera properly
+ UsdGeom.SetStageUpAxis(stage, UsdGeom.Tokens.y)
+ stage.GetRootLayer().Save()
+
+ return filepath
+
+
+def create_model(filename, asset, variant_subsets):
+ """Create a USD Model file.
+
+ For each of the variation paths it will payload the path and set its
+ relevant variation name.
+
+ """
+
+ asset_doc = io.find_one({"name": asset, "type": "asset"})
+ assert asset_doc, "Asset not found: %s" % asset
+
+ variants = []
+ for subset in variant_subsets:
+ prefix = "usdModel"
+ if subset.startswith(prefix):
+ # Strip off `usdModel_`
+ variant = subset[len(prefix):]
+ else:
+ raise ValueError(
+ "Model subsets must start " "with usdModel: %s" % subset
+ )
+
+ path = get_usd_master_path(
+ asset=asset_doc, subset=subset, representation="usd"
+ )
+ variants.append((variant, path))
+
+ stage = _create_variants_file(
+ filename,
+ variants=variants,
+ variantset="model",
+ variant_prim="/root",
+ reference_prim="/root/geo",
+ as_payload=True,
+ )
+
+ UsdGeom.SetStageMetersPerUnit(stage, 1)
+ UsdGeom.SetStageUpAxis(stage, UsdGeom.Tokens.y)
+
+ # modelAPI = Usd.ModelAPI(root_prim)
+ # modelAPI.SetKind(Kind.Tokens.component)
+
+ # See http://openusd.org/docs/api/class_usd_model_a_p_i.html#details
+ # for more on assetInfo
+ # modelAPI.SetAssetName(asset)
+ # modelAPI.SetAssetIdentifier(asset)
+
+ stage.GetRootLayer().Save()
+
+
+def create_shade(filename, asset, variant_subsets):
+ """Create a master USD shade file for an asset.
+
+ For each available model variation this should generate a reference
+ to a `usdShade_{modelVariant}` subset.
+
+ """
+
+ asset_doc = io.find_one({"name": asset, "type": "asset"})
+ assert asset_doc, "Asset not found: %s" % asset
+
+ variants = []
+
+ for subset in variant_subsets:
+ prefix = "usdModel"
+ if subset.startswith(prefix):
+ # Strip off `usdModel_`
+ variant = subset[len(prefix):]
+ else:
+ raise ValueError(
+ "Model subsets must start " "with usdModel: %s" % subset
+ )
+
+ shade_subset = re.sub("^usdModel", "usdShade", subset)
+ path = get_usd_master_path(
+ asset=asset_doc, subset=shade_subset, representation="usd"
+ )
+ variants.append((variant, path))
+
+ stage = _create_variants_file(
+ filename, variants=variants, variantset="model", variant_prim="/root"
+ )
+
+ stage.GetRootLayer().Save()
+
+
+def create_shade_variation(filename, asset, model_variant, shade_variants):
+ """Create the master Shade file for a specific model variant.
+
+ This should reference all shade variants for the specific model variant.
+
+ """
+
+ asset_doc = io.find_one({"name": asset, "type": "asset"})
+ assert asset_doc, "Asset not found: %s" % asset
+
+ variants = []
+ for variant in shade_variants:
+ subset = "usdShade_{model}_{shade}".format(
+ model=model_variant, shade=variant
+ )
+ path = get_usd_master_path(
+ asset=asset_doc, subset=subset, representation="usd"
+ )
+ variants.append((variant, path))
+
+ stage = _create_variants_file(
+ filename, variants=variants, variantset="shade", variant_prim="/root"
+ )
+
+ stage.GetRootLayer().Save()
+
+
+def _create_variants_file(
+ filename,
+ variants,
+ variantset,
+ default_variant=None,
+ variant_prim="/root",
+ reference_prim=None,
+ set_default_variant=True,
+ as_payload=False,
+ skip_variant_on_single_file=True,
+):
+
+ root_layer = Sdf.Layer.CreateNew(filename, args={"format": "usda"})
+ stage = Usd.Stage.Open(root_layer)
+
+ root_prim = stage.DefinePrim(variant_prim)
+ stage.SetDefaultPrim(root_prim)
+
+ def _reference(path):
+ """Reference/Payload path depending on function arguments"""
+
+ if reference_prim:
+ prim = stage.DefinePrim(reference_prim)
+ else:
+ prim = root_prim
+
+ if as_payload:
+ # Payload
+ prim.GetPayloads().AddPayload(Sdf.Payload(path))
+ else:
+ # Reference
+ prim.GetReferences().AddReference(Sdf.Reference(path))
+
+ assert variants, "Must have variants, got: %s" % variants
+
+ log.info(filename)
+
+ if skip_variant_on_single_file and len(variants) == 1:
+ # Reference directly, no variants
+ variant_path = variants[0][1]
+ _reference(variant_path)
+
+ log.info("Non-variants..")
+ log.info("Path: %s" % variant_path)
+
+ else:
+ # Variants
+ append = Usd.ListPositionBackOfAppendList
+ variant_set = root_prim.GetVariantSets().AddVariantSet(
+ variantset, append
+ )
+
+ for variant, variant_path in variants:
+
+ if default_variant is None:
+ default_variant = variant
+
+ variant_set.AddVariant(variant, append)
+ variant_set.SetVariantSelection(variant)
+ with variant_set.GetVariantEditContext():
+ _reference(variant_path)
+
+ log.info("Variants..")
+ log.info("Variant: %s" % variant)
+ log.info("Path: %s" % variant_path)
+
+ if set_default_variant:
+ variant_set.SetVariantSelection(default_variant)
+
+ return stage
+
+
+def get_usd_master_path(asset, subset, representation):
+ """Get the filepath for a .usd file of a subset.
+
+ This will return the path to an unversioned master file generated by
+ `usd_master_file.py`.
+
+ """
+
+ project = io.find_one(
+ {"type": "project"}, projection={"config.template.publish": True}
+ )
+ template = project["config"]["template"]["publish"]
+
+ if isinstance(asset, dict) and "silo" in asset and "name" in asset:
+ # Allow explicitly passing asset document
+ asset_doc = asset
+ else:
+ asset_doc = io.find_one({"name": asset, "type": "asset"})
+
+ path = template.format(
+ **{
+ "root": api.registered_root(),
+ "project": api.Session["AVALON_PROJECT"],
+ "silo": asset_doc["silo"],
+ "asset": asset_doc["name"],
+ "subset": subset,
+ "representation": representation,
+ "version": 0, # stub version zero
+ }
+ )
+
+ # Remove the version folder
+ subset_folder = os.path.dirname(os.path.dirname(path))
+ master_folder = os.path.join(subset_folder, "master")
+ fname = "{0}.{1}".format(subset, representation)
+
+ return os.path.join(master_folder, fname).replace("\\", "/")
+
+
+def parse_avalon_uri(uri):
+ # URI Pattern: avalon://{asset}/{subset}.{ext}
+ pattern = r"avalon://(?P[^/.]*)/(?P[^/]*)\.(?P.*)"
+ if uri.startswith("avalon://"):
+ match = re.match(pattern, uri)
+ if match:
+ return match.groupdict()
diff --git a/openpype/modules/README.md b/openpype/modules/README.md
index 818375461f..a3733518ac 100644
--- a/openpype/modules/README.md
+++ b/openpype/modules/README.md
@@ -1,7 +1,19 @@
-# Pype modules
-Pype modules should contain separated logic of specific kind of implementation, like Ftrack connection and usage code or Deadline farm rendering.
+# OpenPype modules/addons
+OpenPype modules should contain separated logic of specific kind of implementation, like Ftrack connection and usage code or Deadline farm rendering or may contain only special plugins. Addons work the same way currently there is no difference in module and addon.
-## Base class `PypeModule`
+## Modules concept
+- modules and addons are dynamically imported to virtual python module `openpype_modules` from which it is possible to import them no matter where is the modulo located
+- modules or addons should never be imported directly even if you know possible full import path
+ - it is because all of their content must be imported in specific order and should not be imported without defined functions as it may also break few implementation parts
+
+### TODOs
+- add module/addon manifest
+ - definition of module (not 100% defined content e.g. minimum require OpenPype version etc.)
+ - defying that folder is content of a module or an addon
+- module/addon have it's settings schemas and default values outside OpenPype
+- add general setting of paths to modules
+
+## Base class `OpenPypeModule`
- abstract class as base for each module
- implementation should be module's api withou GUI parts
- may implement `get_global_environments` method which should return dictionary of environments that are globally appliable and value is the same for whole studio if launched at any workstation (except os specific paths)
@@ -17,6 +29,16 @@ Pype modules should contain separated logic of specific kind of implementation,
- interface is class that has defined abstract methods to implement and may contain preimplemented helper methods
- module that inherit from an interface must implement those abstract methods otherwise won't be initialized
- it is easy to find which module object inherited from which interfaces withh 100% chance they have implemented required methods
+- interfaces can be defined in `interfaces.py` inside module directory
+ - the file can't use relative imports or import anything from other parts
+ of module itself at the header of file
+ - this is one of reasons why modules/addons can't be imported directly without using defined functions in OpenPype modules implementation
+
+## Base class `OpenPypeInterface`
+- has nothing implemented
+- has ABCMeta as metaclass
+- is defined to be able find out classes which inherit from this base to be
+ able tell this is an Interface
## Global interfaces
- few interfaces are implemented for global usage
@@ -70,7 +92,7 @@ Pype modules should contain separated logic of specific kind of implementation,
- Clockify has more inharitance it's class definition looks like
```
class ClockifyModule(
- PypeModule, # Says it's Pype module so ModulesManager will try to initialize.
+ OpenPypeModule, # Says it's Pype module so ModulesManager will try to initialize.
ITrayModule, # Says has special implementation when used in tray.
IPluginPaths, # Says has plugin paths that want to register (paths to clockify actions for launcher).
IFtrackEventHandlerPaths, # Says has Ftrack actions/events for user/server.
diff --git a/openpype/modules/__init__.py b/openpype/modules/__init__.py
index d6fb9c0aef..583480b049 100644
--- a/openpype/modules/__init__.py
+++ b/openpype/modules/__init__.py
@@ -1,84 +1,21 @@
# -*- coding: utf-8 -*-
from .base import (
- PypeModule,
- ITrayModule,
- ITrayAction,
- ITrayService,
- IPluginPaths,
- ILaunchHookPaths,
+ OpenPypeModule,
+ OpenPypeInterface,
+
+ load_modules,
+
ModulesManager,
TrayModulesManager
)
-from .settings_action import (
- SettingsAction,
- ISettingsChangeListener,
- LocalSettingsAction
-)
-from .webserver import (
- WebServerModule,
- IWebServerRoutes
-)
-from .idle_manager import (
- IdleManager,
- IIdleManager
-)
-from .timers_manager import (
- TimersManager,
- ITimersManager
-)
-from .avalon_apps import AvalonModule
-from .launcher_action import LauncherAction
-from .ftrack import (
- FtrackModule,
- IFtrackEventHandlerPaths
-)
-from .clockify import ClockifyModule
-from .log_viewer import LogViewModule
-from .muster import MusterModule
-from .deadline import DeadlineModule
-from .project_manager_action import ProjectManagerAction
-from .standalonepublish_action import StandAlonePublishAction
-from .sync_server import SyncServerModule
-from .slack import SlackIntegrationModule
__all__ = (
- "PypeModule",
- "ITrayModule",
- "ITrayAction",
- "ITrayService",
- "IPluginPaths",
- "ILaunchHookPaths",
+ "OpenPypeModule",
+ "OpenPypeInterface",
+
+ "load_modules",
+
"ModulesManager",
- "TrayModulesManager",
-
- "SettingsAction",
- "LocalSettingsAction",
-
- "WebServerModule",
- "IWebServerRoutes",
-
- "IdleManager",
- "IIdleManager",
-
- "TimersManager",
- "ITimersManager",
-
- "AvalonModule",
- "LauncherAction",
-
- "FtrackModule",
- "IFtrackEventHandlerPaths",
-
- "ClockifyModule",
- "IdleManager",
- "LogViewModule",
- "MusterModule",
- "DeadlineModule",
- "ProjectManagerAction",
- "StandAlonePublishAction",
-
- "SyncServerModule",
-
- "SlackIntegrationModule"
+ "TrayModulesManager"
)
diff --git a/openpype/modules/base.py b/openpype/modules/base.py
index c7efbd5ab3..d43d5635d1 100644
--- a/openpype/modules/base.py
+++ b/openpype/modules/base.py
@@ -1,8 +1,11 @@
# -*- coding: utf-8 -*-
"""Base class for Pype Modules."""
+import os
+import sys
import time
import inspect
import logging
+import threading
import collections
from uuid import uuid4
from abc import ABCMeta, abstractmethod
@@ -11,11 +14,305 @@ import six
import openpype
from openpype.settings import get_system_settings
from openpype.lib import PypeLogger
-from openpype import resources
+
+
+# Inherit from `object` for Python 2 hosts
+class _ModuleClass(object):
+ """Fake module class for storing OpenPype modules.
+
+ Object of this class can be stored to `sys.modules` and used for storing
+ dynamically imported modules.
+ """
+ def __init__(self, name):
+ # Call setattr on super class
+ super(_ModuleClass, self).__setattr__("name", name)
+
+ # Where modules and interfaces are stored
+ super(_ModuleClass, self).__setattr__("__attributes__", dict())
+ super(_ModuleClass, self).__setattr__("__defaults__", set())
+
+ super(_ModuleClass, self).__setattr__("_log", None)
+
+ def __getattr__(self, attr_name):
+ if attr_name not in self.__attributes__:
+ if attr_name in ("__path__"):
+ return None
+ raise ImportError("No module named {}.{}".format(
+ self.name, attr_name
+ ))
+ return self.__attributes__[attr_name]
+
+ def __iter__(self):
+ for module in self.values():
+ yield module
+
+ def __setattr__(self, attr_name, value):
+ if attr_name in self.__attributes__:
+ self.log.warning(
+ "Duplicated name \"{}\" in {}. Overriding.".format(
+ self.name, attr_name
+ )
+ )
+ self.__attributes__[attr_name] = value
+
+ def __setitem__(self, key, value):
+ self.__setattr__(key, value)
+
+ def __getitem__(self, key):
+ return getattr(self, key)
+
+ @property
+ def log(self):
+ if self._log is None:
+ super(_ModuleClass, self).__setattr__(
+ "_log", PypeLogger.get_logger(self.name)
+ )
+ return self._log
+
+ def get(self, key, default=None):
+ return self.__attributes__.get(key, default)
+
+ def keys(self):
+ return self.__attributes__.keys()
+
+ def values(self):
+ return self.__attributes__.values()
+
+ def items(self):
+ return self.__attributes__.items()
+
+
+class _InterfacesClass(_ModuleClass):
+ """Fake module class for storing OpenPype interfaces.
+
+ MissingInterface object is returned if interfaces does not exists.
+ - this is because interfaces must be available even if are missing
+ implementation
+ """
+ def __getattr__(self, attr_name):
+ if attr_name not in self.__attributes__:
+ # Fake Interface if is not missing
+ self.__attributes__[attr_name] = type(
+ attr_name,
+ (MissingInteface, ),
+ {}
+ )
+
+ return self.__attributes__[attr_name]
+
+
+class _LoadCache:
+ interfaces_lock = threading.Lock()
+ modules_lock = threading.Lock()
+ interfaces_loaded = False
+ modules_loaded = False
+
+
+def get_default_modules_dir():
+ """Path to default OpenPype modules."""
+ current_dir = os.path.abspath(os.path.dirname(__file__))
+
+ return os.path.join(current_dir, "default_modules")
+
+
+def get_module_dirs():
+ """List of paths where OpenPype modules can be found."""
+ dirpaths = [
+ get_default_modules_dir()
+ ]
+ return dirpaths
+
+
+def load_interfaces(force=False):
+ """Load interfaces from modules into `openpype_interfaces`.
+
+ Only classes which inherit from `OpenPypeInterface` are loaded and stored.
+
+ Args:
+ force(bool): Force to load interfaces even if are already loaded.
+ This won't update already loaded and used (cached) interfaces.
+ """
+
+ if _LoadCache.interfaces_loaded and not force:
+ return
+
+ if not _LoadCache.interfaces_lock.locked():
+ with _LoadCache.interfaces_lock:
+ _load_interfaces()
+ _LoadCache.interfaces_loaded = True
+ else:
+ # If lock is locked wait until is finished
+ while _LoadCache.interfaces_lock.locked():
+ time.sleep(0.1)
+
+
+def _load_interfaces():
+ # Key under which will be modules imported in `sys.modules`
+ from openpype.lib import import_filepath
+
+ modules_key = "openpype_interfaces"
+
+ sys.modules[modules_key] = openpype_interfaces = (
+ _InterfacesClass(modules_key)
+ )
+
+ log = PypeLogger.get_logger("InterfacesLoader")
+
+ dirpaths = get_module_dirs()
+
+ interface_paths = []
+ interface_paths.append(
+ os.path.join(get_default_modules_dir(), "interfaces.py")
+ )
+ for dirpath in dirpaths:
+ for filename in os.listdir(dirpath):
+ if filename in ("__pycache__", ):
+ continue
+
+ full_path = os.path.join(dirpath, filename)
+ if not os.path.isdir(full_path):
+ continue
+
+ interfaces_path = os.path.join(full_path, "interfaces.py")
+ if os.path.exists(interfaces_path):
+ interface_paths.append(interfaces_path)
+
+ for full_path in interface_paths:
+ if not os.path.exists(full_path):
+ continue
+
+ try:
+ # Prepare module object where content of file will be parsed
+ module = import_filepath(full_path)
+
+ except Exception:
+ log.warning(
+ "Failed to load path: \"{0}\"".format(full_path),
+ exc_info=True
+ )
+ continue
+
+ for attr_name in dir(module):
+ attr = getattr(module, attr_name)
+ if (
+ not inspect.isclass(attr)
+ or attr is OpenPypeInterface
+ or not issubclass(attr, OpenPypeInterface)
+ ):
+ continue
+ setattr(openpype_interfaces, attr_name, attr)
+
+
+def load_modules(force=False):
+ """Load OpenPype modules as python modules.
+
+ Modules does not load only classes (like in Interfaces) because there must
+ be ability to use inner code of module and be able to import it from one
+ defined place.
+
+ With this it is possible to import module's content from predefined module.
+
+ Function makes sure that `load_interfaces` was triggered. Modules import
+ has specific order which can't be changed.
+
+ Args:
+ force(bool): Force to load modules even if are already loaded.
+ This won't update already loaded and used (cached) modules.
+ """
+
+ if _LoadCache.modules_loaded and not force:
+ return
+
+ # First load interfaces
+ # - modules must not be imported before interfaces
+ load_interfaces(force)
+
+ if not _LoadCache.modules_lock.locked():
+ with _LoadCache.modules_lock:
+ _load_modules()
+ _LoadCache.modules_loaded = True
+ else:
+ # If lock is locked wait until is finished
+ while _LoadCache.modules_lock.locked():
+ time.sleep(0.1)
+
+
+def _load_modules():
+ # Import helper functions from lib
+ from openpype.lib import (
+ import_filepath,
+ import_module_from_dirpath
+ )
+
+ # Key under which will be modules imported in `sys.modules`
+ modules_key = "openpype_modules"
+
+ # Change `sys.modules`
+ sys.modules[modules_key] = openpype_modules = _ModuleClass(modules_key)
+
+ log = PypeLogger.get_logger("ModulesLoader")
+
+ # Look for OpenPype modules in paths defined with `get_module_dirs`
+ dirpaths = get_module_dirs()
+
+ for dirpath in dirpaths:
+ if not os.path.exists(dirpath):
+ log.warning((
+ "Could not find path when loading OpenPype modules \"{}\""
+ ).format(dirpath))
+ continue
+
+ for filename in os.listdir(dirpath):
+ # Ignore filenames
+ if filename in ("__pycache__", ):
+ continue
+
+ fullpath = os.path.join(dirpath, filename)
+ basename, ext = os.path.splitext(filename)
+
+ # TODO add more logic how to define if folder is module or not
+ # - check manifest and content of manifest
+ if os.path.isdir(fullpath):
+ import_module_from_dirpath(dirpath, filename, modules_key)
+
+ elif ext in (".py", ):
+ module = import_filepath(fullpath)
+ setattr(openpype_modules, basename, module)
+
+
+class _OpenPypeInterfaceMeta(ABCMeta):
+ """OpenPypeInterface meta class to print proper string."""
+ def __str__(self):
+ return "<'OpenPypeInterface.{}'>".format(self.__name__)
+
+ def __repr__(self):
+ return str(self)
+
+
+@six.add_metaclass(_OpenPypeInterfaceMeta)
+class OpenPypeInterface:
+ """Base class of Interface that can be used as Mixin with abstract parts.
+
+ This is way how OpenPype module or addon can tell that has implementation
+ for specific part or for other module/addon.
+
+ Child classes of OpenPypeInterface may be used as mixin in different
+ OpenPype modules which means they have to have implemented methods defined
+ in the interface. By default interface does not have any abstract parts.
+ """
+ pass
+
+
+class MissingInteface(OpenPypeInterface):
+ """Class representing missing interface class.
+
+ Used when interface is not available from currently registered paths.
+ """
+ pass
@six.add_metaclass(ABCMeta)
-class PypeModule:
+class OpenPypeModule:
"""Base class of pype module.
Attributes:
@@ -38,7 +335,7 @@ class PypeModule:
def __init__(self, manager, settings):
self.manager = manager
- self.log = PypeLogger().get_logger(self.name)
+ self.log = PypeLogger.get_logger(self.name)
self.initialize(settings)
@@ -70,265 +367,8 @@ class PypeModule:
return {}
-@six.add_metaclass(ABCMeta)
-class IPluginPaths:
- """Module has plugin paths to return.
-
- Expected result is dictionary with keys "publish", "create", "load" or
- "actions" and values as list or string.
- {
- "publish": ["path/to/publish_plugins"]
- }
- """
- # TODO validation of an output
- @abstractmethod
- def get_plugin_paths(self):
- pass
-
-
-@six.add_metaclass(ABCMeta)
-class ILaunchHookPaths:
- """Module has launch hook paths to return.
-
- Expected result is list of paths.
- ["path/to/launch_hooks_dir"]
- """
-
- @abstractmethod
- def get_launch_hook_paths(self):
- pass
-
-
-@six.add_metaclass(ABCMeta)
-class ITrayModule:
- """Module has special procedures when used in Pype Tray.
-
- IMPORTANT:
- The module still must be usable if is not used in tray even if
- would do nothing.
- """
- tray_initialized = False
- _tray_manager = None
-
- @abstractmethod
- def tray_init(self):
- """Initialization part of tray implementation.
-
- Triggered between `initialization` and `connect_with_modules`.
-
- This is where GUIs should be loaded or tray specific parts should be
- prepared.
- """
- pass
-
- @abstractmethod
- def tray_menu(self, tray_menu):
- """Add module's action to tray menu."""
- pass
-
- @abstractmethod
- def tray_start(self):
- """Start procedure in Pype tray."""
- pass
-
- @abstractmethod
- def tray_exit(self):
- """Cleanup method which is executed on tray shutdown.
-
- This is place where all threads should be shut.
- """
- pass
-
- def execute_in_main_thread(self, callback):
- """ Pushes callback to the queue or process 'callback' on a main thread
-
- Some callbacks need to be processed on main thread (menu actions
- must be added on main thread or they won't get triggered etc.)
- """
- # called without initialized tray, still main thread needed
- if not self.tray_initialized:
- try:
- callback = self._main_thread_callbacks.popleft()
- callback()
- except:
- self.log.warning(
- "Failed to execute {} in main thread".format(callback),
- exc_info=True)
-
- return
- self.manager.tray_manager.execute_in_main_thread(callback)
-
- def show_tray_message(self, title, message, icon=None, msecs=None):
- """Show tray message.
-
- Args:
- title (str): Title of message.
- message (str): Content of message.
- icon (QSystemTrayIcon.MessageIcon): Message's icon. Default is
- Information icon, may differ by Qt version.
- msecs (int): Duration of message visibility in miliseconds.
- Default is 10000 msecs, may differ by Qt version.
- """
- if self._tray_manager:
- self._tray_manager.show_tray_message(title, message, icon, msecs)
-
- def add_doubleclick_callback(self, callback):
- if hasattr(self.manager, "add_doubleclick_callback"):
- self.manager.add_doubleclick_callback(self, callback)
-
-
-class ITrayAction(ITrayModule):
- """Implementation of Tray action.
-
- Add action to tray menu which will trigger `on_action_trigger`.
- It is expected to be used for showing tools.
-
- Methods `tray_start`, `tray_exit` and `connect_with_modules` are overriden
- as it's not expected that action will use them. But it is possible if
- necessary.
- """
-
- admin_action = False
- _admin_submenu = None
-
- @property
- @abstractmethod
- def label(self):
- """Service label showed in menu."""
- pass
-
- @abstractmethod
- def on_action_trigger(self):
- """What happens on actions click."""
- pass
-
- def tray_menu(self, tray_menu):
- from Qt import QtWidgets
-
- if self.admin_action:
- menu = self.admin_submenu(tray_menu)
- action = QtWidgets.QAction(self.label, menu)
- menu.addAction(action)
- if not menu.menuAction().isVisible():
- menu.menuAction().setVisible(True)
-
- else:
- action = QtWidgets.QAction(self.label, tray_menu)
- tray_menu.addAction(action)
-
- action.triggered.connect(self.on_action_trigger)
-
- def tray_start(self):
- return
-
- def tray_exit(self):
- return
-
- @staticmethod
- def admin_submenu(tray_menu):
- if ITrayAction._admin_submenu is None:
- from Qt import QtWidgets
-
- admin_submenu = QtWidgets.QMenu("Admin", tray_menu)
- admin_submenu.menuAction().setVisible(False)
- ITrayAction._admin_submenu = admin_submenu
- return ITrayAction._admin_submenu
-
-
-class ITrayService(ITrayModule):
- # Module's property
- menu_action = None
-
- # Class properties
- _services_submenu = None
- _icon_failed = None
- _icon_running = None
- _icon_idle = None
-
- @property
- @abstractmethod
- def label(self):
- """Service label showed in menu."""
- pass
-
- # TODO be able to get any sort of information to show/print
- # @abstractmethod
- # def get_service_info(self):
- # pass
-
- @staticmethod
- def services_submenu(tray_menu):
- if ITrayService._services_submenu is None:
- from Qt import QtWidgets
-
- services_submenu = QtWidgets.QMenu("Services", tray_menu)
- services_submenu.menuAction().setVisible(False)
- ITrayService._services_submenu = services_submenu
- return ITrayService._services_submenu
-
- @staticmethod
- def add_service_action(action):
- ITrayService._services_submenu.addAction(action)
- if not ITrayService._services_submenu.menuAction().isVisible():
- ITrayService._services_submenu.menuAction().setVisible(True)
-
- @staticmethod
- def _load_service_icons():
- from Qt import QtGui
- ITrayService._failed_icon = QtGui.QIcon(
- resources.get_resource("icons", "circle_red.png")
- )
- ITrayService._icon_running = QtGui.QIcon(
- resources.get_resource("icons", "circle_green.png")
- )
- ITrayService._icon_idle = QtGui.QIcon(
- resources.get_resource("icons", "circle_orange.png")
- )
-
- @staticmethod
- def get_icon_running():
- if ITrayService._icon_running is None:
- ITrayService._load_service_icons()
- return ITrayService._icon_running
-
- @staticmethod
- def get_icon_idle():
- if ITrayService._icon_idle is None:
- ITrayService._load_service_icons()
- return ITrayService._icon_idle
-
- @staticmethod
- def get_icon_failed():
- if ITrayService._failed_icon is None:
- ITrayService._load_service_icons()
- return ITrayService._failed_icon
-
- def tray_menu(self, tray_menu):
- from Qt import QtWidgets
- action = QtWidgets.QAction(
- self.label,
- self.services_submenu(tray_menu)
- )
- self.menu_action = action
-
- self.add_service_action(action)
-
- self.set_service_running_icon()
-
- def set_service_running_icon(self):
- """Change icon of an QAction to green circle."""
- if self.menu_action:
- self.menu_action.setIcon(self.get_icon_running())
-
- def set_service_failed_icon(self):
- """Change icon of an QAction to red circle."""
- if self.menu_action:
- self.menu_action.setIcon(self.get_icon_failed())
-
- def set_service_idle_icon(self):
- """Change icon of an QAction to orange circle."""
- if self.menu_action:
- self.menu_action.setIcon(self.get_icon_idle())
+class OpenPypeAddOn(OpenPypeModule):
+ pass
class ModulesManager:
@@ -357,6 +397,11 @@ class ModulesManager:
def initialize_modules(self):
"""Import and initialize modules."""
+ # Make sure modules are loaded
+ load_modules()
+
+ import openpype_modules
+
self.log.debug("*** Pype modules initialization.")
# Prepare settings for modules
system_settings = getattr(self, "_system_settings", None)
@@ -368,33 +413,43 @@ class ModulesManager:
time_start = time.time()
prev_start_time = time_start
- # Go through globals in `pype.modules`
- for name in dir(openpype.modules):
- modules_item = getattr(openpype.modules, name, None)
- # Filter globals that are not classes which inherit from PypeModule
- if (
- not inspect.isclass(modules_item)
- or modules_item is openpype.modules.PypeModule
- or not issubclass(modules_item, openpype.modules.PypeModule)
- ):
- continue
+ module_classes = []
+ for module in openpype_modules:
+ # Go through globals in `pype.modules`
+ for name in dir(module):
+ modules_item = getattr(module, name, None)
+ # Filter globals that are not classes which inherit from
+ # OpenPypeModule
+ if (
+ not inspect.isclass(modules_item)
+ or modules_item is OpenPypeModule
+ or not issubclass(modules_item, OpenPypeModule)
+ ):
+ continue
- # Check if class is abstract (Developing purpose)
- if inspect.isabstract(modules_item):
- # Find missing implementations by convetion on `abc` module
- not_implemented = []
- for attr_name in dir(modules_item):
- attr = getattr(modules_item, attr_name, None)
- if attr and getattr(attr, "__isabstractmethod__", None):
- not_implemented.append(attr_name)
+ # Check if class is abstract (Developing purpose)
+ if inspect.isabstract(modules_item):
+ # Find missing implementations by convetion on `abc` module
+ not_implemented = []
+ for attr_name in dir(modules_item):
+ attr = getattr(modules_item, attr_name, None)
+ abs_method = getattr(
+ attr, "__isabstractmethod__", None
+ )
+ if attr and abs_method:
+ not_implemented.append(attr_name)
- # Log missing implementations
- self.log.warning((
- "Skipping abstract Class: {}. Missing implementations: {}"
- ).format(name, ", ".join(not_implemented)))
- continue
+ # Log missing implementations
+ self.log.warning((
+ "Skipping abstract Class: {}."
+ " Missing implementations: {}"
+ ).format(name, ", ".join(not_implemented)))
+ continue
+ module_classes.append(modules_item)
+ for modules_item in module_classes:
try:
+ name = modules_item.__name__
# Try initialize module
module = modules_item(self, modules_settings)
# Store initialized object
@@ -492,6 +547,8 @@ class ModulesManager:
and "actions" each containing list of paths.
"""
# Output structure
+ from openpype_interfaces import IPluginPaths
+
output = {
"publish": [],
"create": [],
@@ -544,6 +601,8 @@ class ModulesManager:
Returns:
list: Paths to launch hook directories.
"""
+ from openpype_interfaces import ILaunchHookPaths
+
str_type = type("")
expected_types = (list, tuple, set)
@@ -711,6 +770,7 @@ class TrayModulesManager(ModulesManager):
self.modules_by_id = {}
self.modules_by_name = {}
self._report = {}
+
self.tray_manager = None
self.doubleclick_callbacks = {}
@@ -743,6 +803,8 @@ class TrayModulesManager(ModulesManager):
self.tray_menu(tray_menu)
def get_enabled_tray_modules(self):
+ from openpype_interfaces import ITrayModule
+
output = []
for module in self.modules:
if module.enabled and isinstance(module, ITrayModule):
@@ -818,6 +880,8 @@ class TrayModulesManager(ModulesManager):
self._report["Tray menu"] = report
def start_modules(self):
+ from openpype_interfaces import ITrayService
+
report = {}
time_start = time.time()
prev_start_time = time_start
diff --git a/openpype/modules/deadline/deadline_module.py b/openpype/modules/deadline/deadline_module.py
deleted file mode 100644
index 2a2fba41d6..0000000000
--- a/openpype/modules/deadline/deadline_module.py
+++ /dev/null
@@ -1,29 +0,0 @@
-import os
-from openpype.modules import (
- PypeModule, IPluginPaths)
-
-
-class DeadlineModule(PypeModule, IPluginPaths):
- name = "deadline"
-
- def initialize(self, modules_settings):
- # This module is always enabled
- deadline_settings = modules_settings[self.name]
- self.enabled = deadline_settings["enabled"]
- self.deadline_url = deadline_settings["DEADLINE_REST_URL"]
-
- def get_global_environments(self):
- """Deadline global environments for OpenPype implementation."""
- return {
- "DEADLINE_REST_URL": self.deadline_url
- }
-
- def connect_with_modules(self, *_a, **_kw):
- return
-
- def get_plugin_paths(self):
- """Deadline plugin paths."""
- current_dir = os.path.dirname(os.path.abspath(__file__))
- return {
- "publish": [os.path.join(current_dir, "plugins", "publish")]
- }
diff --git a/openpype/modules/avalon_apps/__init__.py b/openpype/modules/default_modules/avalon_apps/__init__.py
similarity index 100%
rename from openpype/modules/avalon_apps/__init__.py
rename to openpype/modules/default_modules/avalon_apps/__init__.py
diff --git a/openpype/modules/avalon_apps/avalon_app.py b/openpype/modules/default_modules/avalon_apps/avalon_app.py
similarity index 95%
rename from openpype/modules/avalon_apps/avalon_app.py
rename to openpype/modules/default_modules/avalon_apps/avalon_app.py
index 4e95f6e72b..53e06ec90a 100644
--- a/openpype/modules/avalon_apps/avalon_app.py
+++ b/openpype/modules/default_modules/avalon_apps/avalon_app.py
@@ -1,14 +1,14 @@
import os
import openpype
from openpype import resources
-from .. import (
- PypeModule,
+from openpype.modules import OpenPypeModule
+from openpype_interfaces import (
ITrayModule,
IWebServerRoutes
)
-class AvalonModule(PypeModule, ITrayModule, IWebServerRoutes):
+class AvalonModule(OpenPypeModule, ITrayModule, IWebServerRoutes):
name = "avalon"
def initialize(self, modules_settings):
diff --git a/openpype/modules/avalon_apps/rest_api.py b/openpype/modules/default_modules/avalon_apps/rest_api.py
similarity index 97%
rename from openpype/modules/avalon_apps/rest_api.py
rename to openpype/modules/default_modules/avalon_apps/rest_api.py
index b77c256398..533050fc0c 100644
--- a/openpype/modules/avalon_apps/rest_api.py
+++ b/openpype/modules/default_modules/avalon_apps/rest_api.py
@@ -1,16 +1,13 @@
import os
-import re
import json
import datetime
-import bson
from bson.objectid import ObjectId
-import bson.json_util
from aiohttp.web_response import Response
from avalon.api import AvalonMongoDB
-from openpype.modules.webserver.base_routes import RestApiEndpoint
+from openpype_modules.webserver.base_routes import RestApiEndpoint
class _RestApiEndpoint(RestApiEndpoint):
diff --git a/openpype/modules/clockify/__init__.py b/openpype/modules/default_modules/clockify/__init__.py
similarity index 100%
rename from openpype/modules/clockify/__init__.py
rename to openpype/modules/default_modules/clockify/__init__.py
diff --git a/openpype/modules/clockify/clockify_api.py b/openpype/modules/default_modules/clockify/clockify_api.py
similarity index 100%
rename from openpype/modules/clockify/clockify_api.py
rename to openpype/modules/default_modules/clockify/clockify_api.py
diff --git a/openpype/modules/clockify/clockify_module.py b/openpype/modules/default_modules/clockify/clockify_module.py
similarity index 98%
rename from openpype/modules/clockify/clockify_module.py
rename to openpype/modules/default_modules/clockify/clockify_module.py
index e3751c46b8..a9e989f4ec 100644
--- a/openpype/modules/clockify/clockify_module.py
+++ b/openpype/modules/default_modules/clockify/clockify_module.py
@@ -7,8 +7,8 @@ from .constants import (
CLOCKIFY_FTRACK_USER_PATH,
CLOCKIFY_FTRACK_SERVER_PATH
)
-from openpype.modules import (
- PypeModule,
+from openpype.modules import OpenPypeModule
+from openpype_interfaces import (
ITrayModule,
IPluginPaths,
IFtrackEventHandlerPaths,
@@ -17,7 +17,7 @@ from openpype.modules import (
class ClockifyModule(
- PypeModule,
+ OpenPypeModule,
ITrayModule,
IPluginPaths,
IFtrackEventHandlerPaths,
diff --git a/openpype/modules/clockify/constants.py b/openpype/modules/default_modules/clockify/constants.py
similarity index 100%
rename from openpype/modules/clockify/constants.py
rename to openpype/modules/default_modules/clockify/constants.py
diff --git a/openpype/modules/clockify/ftrack/server/action_clockify_sync_server.py b/openpype/modules/default_modules/clockify/ftrack/server/action_clockify_sync_server.py
similarity index 97%
rename from openpype/modules/clockify/ftrack/server/action_clockify_sync_server.py
rename to openpype/modules/default_modules/clockify/ftrack/server/action_clockify_sync_server.py
index 495f87dc7e..c6b55947da 100644
--- a/openpype/modules/clockify/ftrack/server/action_clockify_sync_server.py
+++ b/openpype/modules/default_modules/clockify/ftrack/server/action_clockify_sync_server.py
@@ -1,7 +1,7 @@
import os
import json
-from openpype.modules.ftrack.lib import ServerAction
-from openpype.modules.clockify.clockify_api import ClockifyAPI
+from openpype_modules.ftrack.lib import ServerAction
+from openpype_modules.clockify.clockify_api import ClockifyAPI
class SyncClocifyServer(ServerAction):
diff --git a/openpype/modules/clockify/ftrack/user/action_clockify_sync_local.py b/openpype/modules/default_modules/clockify/ftrack/user/action_clockify_sync_local.py
similarity index 96%
rename from openpype/modules/clockify/ftrack/user/action_clockify_sync_local.py
rename to openpype/modules/default_modules/clockify/ftrack/user/action_clockify_sync_local.py
index 4f4579a8bf..a430791906 100644
--- a/openpype/modules/clockify/ftrack/user/action_clockify_sync_local.py
+++ b/openpype/modules/default_modules/clockify/ftrack/user/action_clockify_sync_local.py
@@ -1,6 +1,6 @@
import json
-from openpype.modules.ftrack.lib import BaseAction, statics_icon
-from openpype.modules.clockify.clockify_api import ClockifyAPI
+from openpype_modules.ftrack.lib import BaseAction, statics_icon
+from openpype_modules.clockify.clockify_api import ClockifyAPI
class SyncClocifyLocal(BaseAction):
diff --git a/openpype/modules/clockify/launcher_actions/ClockifyStart.py b/openpype/modules/default_modules/clockify/launcher_actions/ClockifyStart.py
similarity index 95%
rename from openpype/modules/clockify/launcher_actions/ClockifyStart.py
rename to openpype/modules/default_modules/clockify/launcher_actions/ClockifyStart.py
index c431ea240d..db51964eb7 100644
--- a/openpype/modules/clockify/launcher_actions/ClockifyStart.py
+++ b/openpype/modules/default_modules/clockify/launcher_actions/ClockifyStart.py
@@ -1,6 +1,6 @@
from avalon import api, io
from openpype.api import Logger
-from openpype.modules.clockify.clockify_api import ClockifyAPI
+from openpype_modules.clockify.clockify_api import ClockifyAPI
log = Logger().get_logger(__name__)
diff --git a/openpype/modules/clockify/launcher_actions/ClockifySync.py b/openpype/modules/default_modules/clockify/launcher_actions/ClockifySync.py
similarity index 97%
rename from openpype/modules/clockify/launcher_actions/ClockifySync.py
rename to openpype/modules/default_modules/clockify/launcher_actions/ClockifySync.py
index 1bb168a80b..02982d373a 100644
--- a/openpype/modules/clockify/launcher_actions/ClockifySync.py
+++ b/openpype/modules/default_modules/clockify/launcher_actions/ClockifySync.py
@@ -1,5 +1,5 @@
from avalon import api, io
-from openpype.modules.clockify.clockify_api import ClockifyAPI
+from openpype_modules.clockify.clockify_api import ClockifyAPI
from openpype.api import Logger
log = Logger().get_logger(__name__)
diff --git a/openpype/modules/clockify/widgets.py b/openpype/modules/default_modules/clockify/widgets.py
similarity index 100%
rename from openpype/modules/clockify/widgets.py
rename to openpype/modules/default_modules/clockify/widgets.py
diff --git a/openpype/modules/deadline/__init__.py b/openpype/modules/default_modules/deadline/__init__.py
similarity index 100%
rename from openpype/modules/deadline/__init__.py
rename to openpype/modules/default_modules/deadline/__init__.py
diff --git a/openpype/modules/default_modules/deadline/deadline_module.py b/openpype/modules/default_modules/deadline/deadline_module.py
new file mode 100644
index 0000000000..ada5e8225a
--- /dev/null
+++ b/openpype/modules/default_modules/deadline/deadline_module.py
@@ -0,0 +1,37 @@
+import os
+from openpype.modules import OpenPypeModule
+from openpype_interfaces import IPluginPaths
+
+
+class DeadlineModule(OpenPypeModule, IPluginPaths):
+ name = "deadline"
+
+ def __init__(self, manager, settings):
+ self.deadline_urls = {}
+ super(DeadlineModule, self).__init__(manager, settings)
+
+ def initialize(self, modules_settings):
+ # This module is always enabled
+ deadline_settings = modules_settings[self.name]
+ self.enabled = deadline_settings["enabled"]
+ deadline_url = deadline_settings.get("DEADLINE_REST_URL")
+ if deadline_url:
+ self.deadline_urls = {"default": deadline_url}
+ else:
+ self.deadline_urls = deadline_settings.get("deadline_urls") # noqa: E501
+
+ if not self.deadline_urls:
+ self.enabled = False
+ self.log.warning(("default Deadline Webservice URL "
+ "not specified. Disabling module."))
+ return
+
+ def connect_with_modules(self, *_a, **_kw):
+ return
+
+ def get_plugin_paths(self):
+ """Deadline plugin paths."""
+ current_dir = os.path.dirname(os.path.abspath(__file__))
+ return {
+ "publish": [os.path.join(current_dir, "plugins", "publish")]
+ }
diff --git a/openpype/modules/default_modules/deadline/plugins/publish/collect_deadline_server_from_instance.py b/openpype/modules/default_modules/deadline/plugins/publish/collect_deadline_server_from_instance.py
new file mode 100644
index 0000000000..784616615d
--- /dev/null
+++ b/openpype/modules/default_modules/deadline/plugins/publish/collect_deadline_server_from_instance.py
@@ -0,0 +1,71 @@
+# -*- coding: utf-8 -*-
+"""Collect Deadline servers from instance.
+
+This is resolving index of server lists stored in `deadlineServers` instance
+attribute or using default server if that attribute doesn't exists.
+
+"""
+import pyblish.api
+
+
+class CollectDeadlineServerFromInstance(pyblish.api.InstancePlugin):
+ """Collect Deadline Webservice URL from instance."""
+
+ order = pyblish.api.CollectorOrder
+ label = "Deadline Webservice from the Instance"
+ families = ["rendering"]
+
+ def process(self, instance):
+ instance.data["deadlineUrl"] = self._collect_deadline_url(instance)
+ self.log.info(
+ "Using {} for submission.".format(instance.data["deadlineUrl"]))
+
+ @staticmethod
+ def _collect_deadline_url(render_instance):
+ # type: (pyblish.api.Instance) -> str
+ """Get Deadline Webservice URL from render instance.
+
+ This will get all configured Deadline Webservice URLs and create
+ subset of them based upon project configuration. It will then take
+ `deadlineServers` from render instance that is now basically `int`
+ index of that list.
+
+ Args:
+ render_instance (pyblish.api.Instance): Render instance created
+ by Creator in Maya.
+
+ Returns:
+ str: Selected Deadline Webservice URL.
+
+ """
+
+ deadline_settings = (
+ render_instance.context.data
+ ["system_settings"]
+ ["modules"]
+ ["deadline"]
+ )
+
+ try:
+ default_servers = deadline_settings["deadline_urls"]
+ project_servers = (
+ render_instance.context.data
+ ["project_settings"]
+ ["deadline"]
+ ["deadline_servers"]
+ )
+ deadline_servers = {
+ k: default_servers[k]
+ for k in project_servers
+ if k in default_servers
+ }
+
+ except AttributeError:
+ # Handle situation were we had only one url for deadline.
+ return render_instance.context.data["defaultDeadline"]
+
+ return deadline_servers[
+ list(deadline_servers.keys())[
+ int(render_instance.data.get("deadlineServers"))
+ ]
+ ]
diff --git a/openpype/modules/default_modules/deadline/plugins/publish/collect_default_deadline_server.py b/openpype/modules/default_modules/deadline/plugins/publish/collect_default_deadline_server.py
new file mode 100644
index 0000000000..53231bd7e4
--- /dev/null
+++ b/openpype/modules/default_modules/deadline/plugins/publish/collect_default_deadline_server.py
@@ -0,0 +1,21 @@
+# -*- coding: utf-8 -*-
+"""Collect default Deadline server."""
+import pyblish.api
+
+
+class CollectDefaultDeadlineServer(pyblish.api.ContextPlugin):
+ """Collect default Deadline Webservice URL."""
+
+ order = pyblish.api.CollectorOrder + 0.01
+ label = "Default Deadline Webservice"
+
+ def process(self, context):
+ try:
+ deadline_module = context.data.get("openPypeModules")["deadline"]
+ except AttributeError:
+ self.log.error("Cannot get OpenPype Deadline module.")
+ raise AssertionError("OpenPype Deadline module not found.")
+
+ # get default deadline webservice url from deadline module
+ self.log.debug(deadline_module.deadline_urls)
+ context.data["defaultDeadline"] = deadline_module.deadline_urls["default"] # noqa: E501
diff --git a/openpype/modules/deadline/plugins/publish/submit_aftereffects_deadline.py b/openpype/modules/default_modules/deadline/plugins/publish/submit_aftereffects_deadline.py
similarity index 100%
rename from openpype/modules/deadline/plugins/publish/submit_aftereffects_deadline.py
rename to openpype/modules/default_modules/deadline/plugins/publish/submit_aftereffects_deadline.py
diff --git a/openpype/modules/deadline/plugins/publish/submit_harmony_deadline.py b/openpype/modules/default_modules/deadline/plugins/publish/submit_harmony_deadline.py
similarity index 100%
rename from openpype/modules/deadline/plugins/publish/submit_harmony_deadline.py
rename to openpype/modules/default_modules/deadline/plugins/publish/submit_harmony_deadline.py
diff --git a/openpype/modules/default_modules/deadline/plugins/publish/submit_houdini_remote_publish.py b/openpype/modules/default_modules/deadline/plugins/publish/submit_houdini_remote_publish.py
new file mode 100644
index 0000000000..9ada437716
--- /dev/null
+++ b/openpype/modules/default_modules/deadline/plugins/publish/submit_houdini_remote_publish.py
@@ -0,0 +1,153 @@
+import os
+import json
+
+import hou
+
+from avalon import api, io
+from avalon.vendor import requests
+
+import pyblish.api
+
+
+class HoudiniSubmitPublishDeadline(pyblish.api.ContextPlugin):
+ """Submit Houdini scene to perform a local publish in Deadline.
+
+ Publishing in Deadline can be helpful for scenes that publish very slow.
+ This way it can process in the background on another machine without the
+ Artist having to wait for the publish to finish on their local machine.
+
+ Submission is done through the Deadline Web Service as
+ supplied via the environment variable AVALON_DEADLINE.
+
+ """
+
+ label = "Submit Scene to Deadline"
+ order = pyblish.api.IntegratorOrder
+ hosts = ["houdini"]
+ families = ["*"]
+ targets = ["deadline"]
+
+ def process(self, context):
+
+ # Ensure no errors so far
+ assert all(
+ result["success"] for result in context.data["results"]
+ ), "Errors found, aborting integration.."
+
+ # Deadline connection
+ AVALON_DEADLINE = api.Session.get(
+ "AVALON_DEADLINE", "http://localhost:8082"
+ )
+ assert AVALON_DEADLINE, "Requires AVALON_DEADLINE"
+
+ # Note that `publish` data member might change in the future.
+ # See: https://github.com/pyblish/pyblish-base/issues/307
+ actives = [i for i in context if i.data["publish"]]
+ instance_names = sorted(instance.name for instance in actives)
+
+ if not instance_names:
+ self.log.warning(
+ "No active instances found. " "Skipping submission.."
+ )
+ return
+
+ scene = context.data["currentFile"]
+ scenename = os.path.basename(scene)
+
+ # Get project code
+ project = io.find_one({"type": "project"})
+ code = project["data"].get("code", project["name"])
+
+ job_name = "{scene} [PUBLISH]".format(scene=scenename)
+ batch_name = "{code} - {scene}".format(code=code, scene=scenename)
+ deadline_user = "roy" # todo: get deadline user dynamically
+
+ # Get only major.minor version of Houdini, ignore patch version
+ version = hou.applicationVersionString()
+ version = ".".join(version.split(".")[:2])
+
+ # Generate the payload for Deadline submission
+ payload = {
+ "JobInfo": {
+ "Plugin": "Houdini",
+ "Pool": "houdini", # todo: remove hardcoded pool
+ "BatchName": batch_name,
+ "Comment": context.data.get("comment", ""),
+ "Priority": 50,
+ "Frames": "1-1", # Always trigger a single frame
+ "IsFrameDependent": False,
+ "Name": job_name,
+ "UserName": deadline_user,
+ # "Comment": instance.context.data.get("comment", ""),
+ # "InitialStatus": state
+ },
+ "PluginInfo": {
+ "Build": None, # Don't force build
+ "IgnoreInputs": True,
+ # Inputs
+ "SceneFile": scene,
+ "OutputDriver": "/out/REMOTE_PUBLISH",
+ # Mandatory for Deadline
+ "Version": version,
+ },
+ # Mandatory for Deadline, may be empty
+ "AuxFiles": [],
+ }
+
+ # Process submission per individual instance if the submission
+ # is set to publish each instance as a separate job. Else submit
+ # a single job to process all instances.
+ per_instance = context.data.get("separateJobPerInstance", False)
+ if per_instance:
+ # Submit a job per instance
+ job_name = payload["JobInfo"]["Name"]
+ for instance in instance_names:
+ # Clarify job name per submission (include instance name)
+ payload["JobInfo"]["Name"] = job_name + " - %s" % instance
+ self.submit_job(
+ payload, instances=[instance], deadline=AVALON_DEADLINE
+ )
+ else:
+ # Submit a single job
+ self.submit_job(
+ payload, instances=instance_names, deadline=AVALON_DEADLINE
+ )
+
+ def submit_job(self, payload, instances, deadline):
+
+ # Ensure we operate on a copy, a shallow copy is fine.
+ payload = payload.copy()
+
+ # Include critical environment variables with submission + api.Session
+ keys = [
+ # Submit along the current Avalon tool setup that we launched
+ # this application with so the Render Slave can build its own
+ # similar environment using it, e.g. "houdini17.5;pluginx2.3"
+ "AVALON_TOOLS",
+ ]
+
+ environment = dict(
+ {key: os.environ[key] for key in keys if key in os.environ},
+ **api.Session
+ )
+ environment["PYBLISH_ACTIVE_INSTANCES"] = ",".join(instances)
+
+ payload["JobInfo"].update(
+ {
+ "EnvironmentKeyValue%d"
+ % index: "{key}={value}".format(
+ key=key, value=environment[key]
+ )
+ for index, key in enumerate(environment)
+ }
+ )
+
+ # Submit
+ self.log.info("Submitting..")
+ self.log.debug(json.dumps(payload, indent=4, sort_keys=True))
+
+ # E.g. http://192.168.0.1:8082/api/jobs
+ url = "{}/api/jobs".format(deadline)
+ response = requests.post(url, json=payload)
+ if not response.ok:
+ raise Exception(response.text)
diff --git a/openpype/modules/default_modules/deadline/plugins/publish/submit_houdini_render_deadline.py b/openpype/modules/default_modules/deadline/plugins/publish/submit_houdini_render_deadline.py
new file mode 100644
index 0000000000..f471d788b6
--- /dev/null
+++ b/openpype/modules/default_modules/deadline/plugins/publish/submit_houdini_render_deadline.py
@@ -0,0 +1,158 @@
+import os
+import json
+import getpass
+
+from avalon import api
+from avalon.vendor import requests
+
+import pyblish.api
+
+import hou
+
+
+class HoudiniSubmitRenderDeadline(pyblish.api.InstancePlugin):
+ """Submit Solaris USD Render ROPs to Deadline.
+
+ Renders are submitted to a Deadline Web Service as
+ supplied via the environment variable AVALON_DEADLINE.
+
+ Target "local":
+ Even though this does *not* render locally this is seen as
+ a 'local' submission as it is the regular way of submitting
+ a Houdini render locally.
+
+ """
+
+ label = "Submit Render to Deadline"
+ order = pyblish.api.IntegratorOrder
+ hosts = ["houdini"]
+ families = ["usdrender",
+ "redshift_rop"]
+ targets = ["local"]
+
+ def process(self, instance):
+
+ context = instance.context
+ code = context.data["code"]
+ filepath = context.data["currentFile"]
+ filename = os.path.basename(filepath)
+ comment = context.data.get("comment", "")
+ deadline_user = context.data.get("deadlineUser", getpass.getuser())
+ jobname = "%s - %s" % (filename, instance.name)
+
+ # Support code prefix label for batch name
+ batch_name = filename
+ if code:
+ batch_name = "{0} - {1}".format(code, batch_name)
+
+ # Output driver to render
+ driver = instance[0]
+
+ # StartFrame to EndFrame by byFrameStep
+ frames = "{start}-{end}x{step}".format(
+ start=int(instance.data["startFrame"]),
+ end=int(instance.data["endFrame"]),
+ step=int(instance.data["byFrameStep"]),
+ )
+
+ # Documentation for keys available at:
+ # https://docs.thinkboxsoftware.com
+ # /products/deadline/8.0/1_User%20Manual/manual
+ # /manual-submission.html#job-info-file-options
+ payload = {
+ "JobInfo": {
+ # Top-level group name
+ "BatchName": batch_name,
+
+ # Job name, as seen in Monitor
+ "Name": jobname,
+
+ # Arbitrary username, for visualisation in Monitor
+ "UserName": deadline_user,
+
+ "Plugin": "Houdini",
+ "Pool": "houdini_redshift", # todo: remove hardcoded pool
+ "Frames": frames,
+
+ "ChunkSize": instance.data.get("chunkSize", 10),
+
+ "Comment": comment
+ },
+ "PluginInfo": {
+ # Input
+ "SceneFile": filepath,
+ "OutputDriver": driver.path(),
+
+ # Mandatory for Deadline
+ # Houdini version without patch number
+ "Version": hou.applicationVersionString().rsplit(".", 1)[0],
+
+ "IgnoreInputs": True
+ },
+
+ # Mandatory for Deadline, may be empty
+ "AuxFiles": []
+ }
+
+ # Include critical environment variables with submission + api.Session
+ keys = [
+ # Submit along the current Avalon tool setup that we launched
+ # this application with so the Render Slave can build its own
+ # similar environment using it, e.g. "maya2018;vray4.x;yeti3.1.9"
+ "AVALON_TOOLS",
+ ]
+ environment = dict({key: os.environ[key] for key in keys
+ if key in os.environ}, **api.Session)
+
+ payload["JobInfo"].update({
+ "EnvironmentKeyValue%d" % index: "{key}={value}".format(
+ key=key,
+ value=environment[key]
+ ) for index, key in enumerate(environment)
+ })
+
+ # Include OutputFilename entries
+ # The first entry also enables double-click to preview rendered
+ # frames from Deadline Monitor
+ output_data = {}
+ for i, filepath in enumerate(instance.data["files"]):
+ dirname = os.path.dirname(filepath)
+ fname = os.path.basename(filepath)
+ output_data["OutputDirectory%d" % i] = dirname.replace("\\", "/")
+ output_data["OutputFilename%d" % i] = fname
+
+ # For now ensure destination folder exists otherwise HUSK
+ # will fail to render the output image. This is supposedly fixed
+ # in new production builds of Houdini
+ # TODO Remove this workaround with Houdini 18.0.391+
+ if not os.path.exists(dirname):
+ self.log.info("Ensuring output directory exists: %s" %
+ dirname)
+ os.makedirs(dirname)
+
+ payload["JobInfo"].update(output_data)
+
+ self.submit(instance, payload)
+
+ def submit(self, instance, payload):
+
+ AVALON_DEADLINE = api.Session.get("AVALON_DEADLINE",
+ "http://localhost:8082")
+ assert AVALON_DEADLINE, "Requires AVALON_DEADLINE"
+
+ plugin = payload["JobInfo"]["Plugin"]
+ self.log.info("Using Render Plugin : {}".format(plugin))
+
+ self.log.info("Submitting..")
+ self.log.debug(json.dumps(payload, indent=4, sort_keys=True))
+
+ # E.g. http://192.168.0.1:8082/api/jobs
+ url = "{}/api/jobs".format(AVALON_DEADLINE)
+ response = requests.post(url, json=payload)
+ if not response.ok:
+ raise Exception(response.text)
+
+ # Store output dir for unified publisher (filesequence)
+ output_dir = os.path.dirname(instance.data["files"][0])
+ instance.data["outputDir"] = output_dir
+ instance.data["deadlineSubmissionJob"] = response.json()
diff --git a/openpype/modules/deadline/plugins/publish/submit_maya_deadline.py b/openpype/modules/default_modules/deadline/plugins/publish/submit_maya_deadline.py
similarity index 98%
rename from openpype/modules/deadline/plugins/publish/submit_maya_deadline.py
rename to openpype/modules/default_modules/deadline/plugins/publish/submit_maya_deadline.py
index 6b52e4b387..1ab3dc2554 100644
--- a/openpype/modules/deadline/plugins/publish/submit_maya_deadline.py
+++ b/openpype/modules/default_modules/deadline/plugins/publish/submit_maya_deadline.py
@@ -264,12 +264,13 @@ class MayaSubmitDeadline(pyblish.api.InstancePlugin):
self._instance = instance
self.payload_skeleton = copy.deepcopy(payload_skeleton_template)
- self._deadline_url = (
- context.data["system_settings"]
- ["modules"]
- ["deadline"]
- ["DEADLINE_REST_URL"]
- )
+
+ # get default deadline webservice url from deadline module
+ self.deadline_url = instance.context.data.get("defaultDeadline")
+ # if custom one is set in instance, use that
+ if instance.data.get("deadlineUrl"):
+ self.deadline_url = instance.data.get("deadlineUrl")
+ assert self.deadline_url, "Requires Deadline Webservice URL"
self._job_info = (
context.data["project_settings"].get(
@@ -287,8 +288,6 @@ class MayaSubmitDeadline(pyblish.api.InstancePlugin):
"pluginInfo", {})
)
- assert self._deadline_url, "Requires DEADLINE_REST_URL"
-
context = instance.context
workspace = context.data["workspaceDir"]
anatomy = context.data['anatomy']
@@ -683,7 +682,7 @@ class MayaSubmitDeadline(pyblish.api.InstancePlugin):
self.log.info(
"Submitting tile job(s) [{}] ...".format(len(frame_payloads)))
- url = "{}/api/jobs".format(self._deadline_url)
+ url = "{}/api/jobs".format(self.deadline_url)
tiles_count = instance.data.get("tilesX") * instance.data.get("tilesY") # noqa: E501
for tile_job in frame_payloads:
@@ -767,7 +766,7 @@ class MayaSubmitDeadline(pyblish.api.InstancePlugin):
self.log.debug(json.dumps(payload, indent=4, sort_keys=True))
# E.g. http://192.168.0.1:8082/api/jobs
- url = "{}/api/jobs".format(self._deadline_url)
+ url = "{}/api/jobs".format(self.deadline_url)
response = self._requests_post(url, json=payload)
if not response.ok:
raise Exception(response.text)
@@ -975,7 +974,7 @@ class MayaSubmitDeadline(pyblish.api.InstancePlugin):
payload = self._get_arnold_export_payload(data)
self.log.info("Submitting ass export job.")
- url = "{}/api/jobs".format(self._deadline_url)
+ url = "{}/api/jobs".format(self.deadline_url)
response = self._requests_post(url, json=payload)
if not response.ok:
self.log.error("Submition failed!")
diff --git a/openpype/modules/deadline/plugins/publish/submit_nuke_deadline.py b/openpype/modules/default_modules/deadline/plugins/publish/submit_nuke_deadline.py
similarity index 90%
rename from openpype/modules/deadline/plugins/publish/submit_nuke_deadline.py
rename to openpype/modules/default_modules/deadline/plugins/publish/submit_nuke_deadline.py
index fed98d8a08..4cba35963c 100644
--- a/openpype/modules/deadline/plugins/publish/submit_nuke_deadline.py
+++ b/openpype/modules/default_modules/deadline/plugins/publish/submit_nuke_deadline.py
@@ -42,13 +42,12 @@ class NukeSubmitDeadline(pyblish.api.InstancePlugin):
node = instance[0]
context = instance.context
- deadline_url = (
- context.data["system_settings"]
- ["modules"]
- ["deadline"]
- ["DEADLINE_REST_URL"]
- )
- assert deadline_url, "Requires DEADLINE_REST_URL"
+ # get default deadline webservice url from deadline module
+ deadline_url = instance.context.data["defaultDeadline"]
+ # if custom one is set in instance, use that
+ if instance.data.get("deadlineUrl"):
+ deadline_url = instance.data.get("deadlineUrl")
+ assert deadline_url, "Requires Deadline Webservice URL"
self.deadline_url = "{}/api/jobs".format(deadline_url)
self._comment = context.data.get("comment", "")
@@ -252,39 +251,11 @@ class NukeSubmitDeadline(pyblish.api.InstancePlugin):
environment = dict({key: os.environ[key] for key in keys
if key in os.environ}, **api.Session)
- # self.log.debug("enviro: {}".format(pprint(environment)))
for _path in os.environ:
if _path.lower().startswith('openpype_'):
environment[_path] = os.environ[_path]
- clean_environment = {}
- for key, value in environment.items():
- clean_path = ""
- self.log.debug("key: {}".format(key))
- if "://" in value:
- clean_path = value
- else:
- valid_paths = []
- for path in value.split(os.pathsep):
- if not path:
- continue
- try:
- path.decode('UTF-8', 'strict')
- valid_paths.append(os.path.normpath(path))
- except UnicodeDecodeError:
- print('path contains non UTF characters')
-
- if valid_paths:
- clean_path = os.pathsep.join(valid_paths)
-
- if key == "PYTHONPATH":
- clean_path = clean_path.replace('python2', 'python3')
-
- self.log.debug("clean path: {}".format(clean_path))
- clean_environment[key] = clean_path
-
- environment = clean_environment
# to recognize job from PYPE for turning Event On/Off
environment["OPENPYPE_RENDER_JOB"] = "1"
diff --git a/openpype/modules/deadline/plugins/publish/submit_publish_job.py b/openpype/modules/default_modules/deadline/plugins/publish/submit_publish_job.py
similarity index 98%
rename from openpype/modules/deadline/plugins/publish/submit_publish_job.py
rename to openpype/modules/default_modules/deadline/plugins/publish/submit_publish_job.py
index 41f8337fd8..19e3174384 100644
--- a/openpype/modules/deadline/plugins/publish/submit_publish_job.py
+++ b/openpype/modules/default_modules/deadline/plugins/publish/submit_publish_job.py
@@ -5,7 +5,6 @@ import os
import json
import re
from copy import copy, deepcopy
-import sys
import openpype.api
from avalon import api, io
@@ -615,14 +614,16 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin):
instance["families"] = families
def process(self, instance):
+ # type: (pyblish.api.Instance) -> None
"""Process plugin.
Detect type of renderfarm submission and create and post dependend job
in case of Deadline. It creates json file with metadata needed for
publishing in directory of render.
- :param instance: Instance data
- :type instance: dict
+ Args:
+ instance (pyblish.api.Instance): Instance data.
+
"""
data = instance.data.copy()
context = instance.context
@@ -908,13 +909,12 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin):
}
if submission_type == "deadline":
- self.deadline_url = (
- context.data["system_settings"]
- ["modules"]
- ["deadline"]
- ["DEADLINE_REST_URL"]
- )
- assert self.deadline_url, "Requires DEADLINE_REST_URL"
+ # get default deadline webservice url from deadline module
+ self.deadline_url = instance.context.data["defaultDeadline"]
+ # if custom one is set in instance, use that
+ if instance.data.get("deadlineUrl"):
+ self.deadline_url = instance.data.get("deadlineUrl")
+ assert self.deadline_url, "Requires Deadline Webservice URL"
self._submit_deadline_post_job(instance, render_job, instances)
diff --git a/openpype/modules/deadline/plugins/publish/validate_deadline_connection.py b/openpype/modules/default_modules/deadline/plugins/publish/validate_deadline_connection.py
similarity index 68%
rename from openpype/modules/deadline/plugins/publish/validate_deadline_connection.py
rename to openpype/modules/default_modules/deadline/plugins/publish/validate_deadline_connection.py
index 9b10619c0b..ff664d9f83 100644
--- a/openpype/modules/deadline/plugins/publish/validate_deadline_connection.py
+++ b/openpype/modules/default_modules/deadline/plugins/publish/validate_deadline_connection.py
@@ -1,11 +1,10 @@
import pyblish.api
from avalon.vendor import requests
-from openpype.plugin import contextplugin_should_run
import os
-class ValidateDeadlineConnection(pyblish.api.ContextPlugin):
+class ValidateDeadlineConnection(pyblish.api.InstancePlugin):
"""Validate Deadline Web Service is running"""
label = "Validate Deadline Web Service"
@@ -13,18 +12,16 @@ class ValidateDeadlineConnection(pyblish.api.ContextPlugin):
hosts = ["maya", "nuke"]
families = ["renderlayer"]
- def process(self, context):
-
- # Workaround bug pyblish-base#250
- if not contextplugin_should_run(self, context):
- return
-
- deadline_url = (
- context.data["system_settings"]
- ["modules"]
- ["deadline"]
- ["DEADLINE_REST_URL"]
- )
+ def process(self, instance):
+ # get default deadline webservice url from deadline module
+ deadline_url = instance.context.data["defaultDeadline"]
+ # if custom one is set in instance, use that
+ if instance.data.get("deadlineUrl"):
+ deadline_url = instance.data.get("deadlineUrl")
+ self.log.info(
+ "We have deadline URL on instance {}".format(
+ deadline_url))
+ assert deadline_url, "Requires Deadline Webservice URL"
# Check response
response = self._requests_get(deadline_url)
diff --git a/openpype/modules/deadline/plugins/publish/validate_expected_and_rendered_files.py b/openpype/modules/default_modules/deadline/plugins/publish/validate_expected_and_rendered_files.py
similarity index 94%
rename from openpype/modules/deadline/plugins/publish/validate_expected_and_rendered_files.py
rename to openpype/modules/default_modules/deadline/plugins/publish/validate_expected_and_rendered_files.py
index 305c71b035..addd4a2e80 100644
--- a/openpype/modules/deadline/plugins/publish/validate_expected_and_rendered_files.py
+++ b/openpype/modules/default_modules/deadline/plugins/publish/validate_expected_and_rendered_files.py
@@ -4,7 +4,6 @@ import pyblish.api
from avalon.vendor import requests
-from openpype.api import get_system_settings
from openpype.lib.abstract_submit_deadline import requests_get
from openpype.lib.delivery import collect_frames
@@ -22,6 +21,7 @@ class ValidateExpectedFiles(pyblish.api.InstancePlugin):
allow_user_override = True
def process(self, instance):
+ self.instance = instance
frame_list = self._get_frame_list(instance.data["render_job_id"])
for repre in instance.data["representations"]:
@@ -129,13 +129,12 @@ class ValidateExpectedFiles(pyblish.api.InstancePlugin):
Might be different than job info saved in metadata.json if user
manually changes job pre/during rendering.
"""
- deadline_url = (
- get_system_settings()
- ["modules"]
- ["deadline"]
- ["DEADLINE_REST_URL"]
- )
- assert deadline_url, "Requires DEADLINE_REST_URL"
+ # get default deadline webservice url from deadline module
+ deadline_url = self.instance.context.data["defaultDeadline"]
+ # if custom one is set in instance, use that
+ if self.instance.data.get("deadlineUrl"):
+ deadline_url = self.instance.data.get("deadlineUrl")
+ assert deadline_url, "Requires Deadline Webservice URL"
url = "{}/api/jobs?JobID={}".format(deadline_url, job_id)
try:
diff --git a/openpype/modules/ftrack/__init__.py b/openpype/modules/default_modules/ftrack/__init__.py
similarity index 67%
rename from openpype/modules/ftrack/__init__.py
rename to openpype/modules/default_modules/ftrack/__init__.py
index c1a557812c..7261254c6f 100644
--- a/openpype/modules/ftrack/__init__.py
+++ b/openpype/modules/default_modules/ftrack/__init__.py
@@ -1,11 +1,9 @@
from .ftrack_module import (
FtrackModule,
- IFtrackEventHandlerPaths,
FTRACK_MODULE_DIR
)
__all__ = (
"FtrackModule",
- "IFtrackEventHandlerPaths",
"FTRACK_MODULE_DIR"
)
diff --git a/openpype/modules/ftrack/event_handlers_server/action_clone_review_session.py b/openpype/modules/default_modules/ftrack/event_handlers_server/action_clone_review_session.py
similarity index 98%
rename from openpype/modules/ftrack/event_handlers_server/action_clone_review_session.py
rename to openpype/modules/default_modules/ftrack/event_handlers_server/action_clone_review_session.py
index 59c8bffb75..1ad7a17785 100644
--- a/openpype/modules/ftrack/event_handlers_server/action_clone_review_session.py
+++ b/openpype/modules/default_modules/ftrack/event_handlers_server/action_clone_review_session.py
@@ -1,6 +1,6 @@
import json
-from openpype.modules.ftrack.lib import ServerAction
+from openpype_modules.ftrack.lib import ServerAction
def clone_review_session(session, entity):
diff --git a/openpype/modules/ftrack/event_handlers_server/action_multiple_notes.py b/openpype/modules/default_modules/ftrack/event_handlers_server/action_multiple_notes.py
similarity index 98%
rename from openpype/modules/ftrack/event_handlers_server/action_multiple_notes.py
rename to openpype/modules/default_modules/ftrack/event_handlers_server/action_multiple_notes.py
index 9ad7b1a969..f9aac2c80a 100644
--- a/openpype/modules/ftrack/event_handlers_server/action_multiple_notes.py
+++ b/openpype/modules/default_modules/ftrack/event_handlers_server/action_multiple_notes.py
@@ -1,4 +1,4 @@
-from openpype.modules.ftrack.lib import ServerAction
+from openpype_modules.ftrack.lib import ServerAction
class MultipleNotesServer(ServerAction):
diff --git a/openpype/modules/ftrack/event_handlers_server/action_prepare_project.py b/openpype/modules/default_modules/ftrack/event_handlers_server/action_prepare_project.py
similarity index 99%
rename from openpype/modules/ftrack/event_handlers_server/action_prepare_project.py
rename to openpype/modules/default_modules/ftrack/event_handlers_server/action_prepare_project.py
index 3a96ae3311..85317031b2 100644
--- a/openpype/modules/ftrack/event_handlers_server/action_prepare_project.py
+++ b/openpype/modules/default_modules/ftrack/event_handlers_server/action_prepare_project.py
@@ -4,7 +4,7 @@ from avalon.api import AvalonMongoDB
from openpype.api import ProjectSettings
from openpype.lib import create_project
-from openpype.modules.ftrack.lib import (
+from openpype_modules.ftrack.lib import (
ServerAction,
get_openpype_attr,
CUST_ATTR_AUTO_SYNC
diff --git a/openpype/modules/ftrack/event_handlers_server/action_private_project_detection.py b/openpype/modules/default_modules/ftrack/event_handlers_server/action_private_project_detection.py
similarity index 97%
rename from openpype/modules/ftrack/event_handlers_server/action_private_project_detection.py
rename to openpype/modules/default_modules/ftrack/event_handlers_server/action_private_project_detection.py
index 5213e10ba3..62772740cd 100644
--- a/openpype/modules/ftrack/event_handlers_server/action_private_project_detection.py
+++ b/openpype/modules/default_modules/ftrack/event_handlers_server/action_private_project_detection.py
@@ -1,4 +1,4 @@
-from openpype.modules.ftrack.lib import ServerAction
+from openpype_modules.ftrack.lib import ServerAction
class PrivateProjectDetectionAction(ServerAction):
diff --git a/openpype/modules/ftrack/event_handlers_server/action_push_frame_values_to_task.py b/openpype/modules/default_modules/ftrack/event_handlers_server/action_push_frame_values_to_task.py
similarity index 99%
rename from openpype/modules/ftrack/event_handlers_server/action_push_frame_values_to_task.py
rename to openpype/modules/default_modules/ftrack/event_handlers_server/action_push_frame_values_to_task.py
index b38e18d089..3f63ce6fac 100644
--- a/openpype/modules/ftrack/event_handlers_server/action_push_frame_values_to_task.py
+++ b/openpype/modules/default_modules/ftrack/event_handlers_server/action_push_frame_values_to_task.py
@@ -2,7 +2,7 @@ import sys
import json
import collections
import ftrack_api
-from openpype.modules.ftrack.lib import ServerAction
+from openpype_modules.ftrack.lib import ServerAction
class PushHierValuesToNonHier(ServerAction):
diff --git a/openpype/modules/ftrack/event_handlers_server/action_sync_to_avalon.py b/openpype/modules/default_modules/ftrack/event_handlers_server/action_sync_to_avalon.py
similarity index 98%
rename from openpype/modules/ftrack/event_handlers_server/action_sync_to_avalon.py
rename to openpype/modules/default_modules/ftrack/event_handlers_server/action_sync_to_avalon.py
index 8f78f998ac..d449c4b7df 100644
--- a/openpype/modules/ftrack/event_handlers_server/action_sync_to_avalon.py
+++ b/openpype/modules/default_modules/ftrack/event_handlers_server/action_sync_to_avalon.py
@@ -1,8 +1,8 @@
import time
import traceback
-from openpype.modules.ftrack.lib import ServerAction
-from openpype.modules.ftrack.lib.avalon_sync import SyncEntitiesFactory
+from openpype_modules.ftrack.lib import ServerAction
+from openpype_modules.ftrack.lib.avalon_sync import SyncEntitiesFactory
class SyncToAvalonServer(ServerAction):
diff --git a/openpype/modules/ftrack/event_handlers_server/event_del_avalon_id_from_new.py b/openpype/modules/default_modules/ftrack/event_handlers_server/event_del_avalon_id_from_new.py
similarity index 90%
rename from openpype/modules/ftrack/event_handlers_server/event_del_avalon_id_from_new.py
rename to openpype/modules/default_modules/ftrack/event_handlers_server/event_del_avalon_id_from_new.py
index 078596cc2e..35b5d809fd 100644
--- a/openpype/modules/ftrack/event_handlers_server/event_del_avalon_id_from_new.py
+++ b/openpype/modules/default_modules/ftrack/event_handlers_server/event_del_avalon_id_from_new.py
@@ -1,6 +1,6 @@
-from openpype.modules.ftrack.lib import BaseEvent
-from openpype.modules.ftrack.lib.avalon_sync import CUST_ATTR_ID_KEY
-from openpype.modules.ftrack.event_handlers_server.event_sync_to_avalon import (
+from openpype_modules.ftrack.lib import BaseEvent
+from openpype_modules.ftrack.lib.avalon_sync import CUST_ATTR_ID_KEY
+from openpype_modules.ftrack.event_handlers_server.event_sync_to_avalon import (
SyncToAvalonEvent
)
diff --git a/openpype/modules/ftrack/event_handlers_server/event_first_version_status.py b/openpype/modules/default_modules/ftrack/event_handlers_server/event_first_version_status.py
similarity index 99%
rename from openpype/modules/ftrack/event_handlers_server/event_first_version_status.py
rename to openpype/modules/default_modules/ftrack/event_handlers_server/event_first_version_status.py
index 511f62a207..ecc6c95d90 100644
--- a/openpype/modules/ftrack/event_handlers_server/event_first_version_status.py
+++ b/openpype/modules/default_modules/ftrack/event_handlers_server/event_first_version_status.py
@@ -1,4 +1,4 @@
-from openpype.modules.ftrack.lib import BaseEvent
+from openpype_modules.ftrack.lib import BaseEvent
class FirstVersionStatus(BaseEvent):
diff --git a/openpype/modules/ftrack/event_handlers_server/event_next_task_update.py b/openpype/modules/default_modules/ftrack/event_handlers_server/event_next_task_update.py
similarity index 99%
rename from openpype/modules/ftrack/event_handlers_server/event_next_task_update.py
rename to openpype/modules/default_modules/ftrack/event_handlers_server/event_next_task_update.py
index ad62beb296..a65ae46545 100644
--- a/openpype/modules/ftrack/event_handlers_server/event_next_task_update.py
+++ b/openpype/modules/default_modules/ftrack/event_handlers_server/event_next_task_update.py
@@ -1,5 +1,5 @@
import collections
-from openpype.modules.ftrack.lib import BaseEvent
+from openpype_modules.ftrack.lib import BaseEvent
class NextTaskUpdate(BaseEvent):
diff --git a/openpype/modules/ftrack/event_handlers_server/event_push_frame_values_to_task.py b/openpype/modules/default_modules/ftrack/event_handlers_server/event_push_frame_values_to_task.py
similarity index 99%
rename from openpype/modules/ftrack/event_handlers_server/event_push_frame_values_to_task.py
rename to openpype/modules/default_modules/ftrack/event_handlers_server/event_push_frame_values_to_task.py
index 81719258e1..10b165e7f6 100644
--- a/openpype/modules/ftrack/event_handlers_server/event_push_frame_values_to_task.py
+++ b/openpype/modules/default_modules/ftrack/event_handlers_server/event_push_frame_values_to_task.py
@@ -2,7 +2,7 @@ import collections
import datetime
import ftrack_api
-from openpype.modules.ftrack.lib import (
+from openpype_modules.ftrack.lib import (
BaseEvent,
query_custom_attributes
)
diff --git a/openpype/modules/ftrack/event_handlers_server/event_radio_buttons.py b/openpype/modules/default_modules/ftrack/event_handlers_server/event_radio_buttons.py
similarity index 96%
rename from openpype/modules/ftrack/event_handlers_server/event_radio_buttons.py
rename to openpype/modules/default_modules/ftrack/event_handlers_server/event_radio_buttons.py
index 1ebd7b68d2..99ad3aec37 100644
--- a/openpype/modules/ftrack/event_handlers_server/event_radio_buttons.py
+++ b/openpype/modules/default_modules/ftrack/event_handlers_server/event_radio_buttons.py
@@ -1,5 +1,5 @@
import ftrack_api
-from openpype.modules.ftrack.lib import BaseEvent
+from openpype_modules.ftrack.lib import BaseEvent
class RadioButtons(BaseEvent):
diff --git a/openpype/modules/ftrack/event_handlers_server/event_sync_to_avalon.py b/openpype/modules/default_modules/ftrack/event_handlers_server/event_sync_to_avalon.py
similarity index 99%
rename from openpype/modules/ftrack/event_handlers_server/event_sync_to_avalon.py
rename to openpype/modules/default_modules/ftrack/event_handlers_server/event_sync_to_avalon.py
index 1dd056adee..93a0404c0b 100644
--- a/openpype/modules/ftrack/event_handlers_server/event_sync_to_avalon.py
+++ b/openpype/modules/default_modules/ftrack/event_handlers_server/event_sync_to_avalon.py
@@ -17,7 +17,7 @@ import ftrack_api
from avalon import schema
from avalon.api import AvalonMongoDB
-from openpype.modules.ftrack.lib import (
+from openpype_modules.ftrack.lib import (
get_openpype_attr,
CUST_ATTR_ID_KEY,
CUST_ATTR_AUTO_SYNC,
diff --git a/openpype/modules/ftrack/event_handlers_server/event_task_to_parent_status.py b/openpype/modules/default_modules/ftrack/event_handlers_server/event_task_to_parent_status.py
similarity index 99%
rename from openpype/modules/ftrack/event_handlers_server/event_task_to_parent_status.py
rename to openpype/modules/default_modules/ftrack/event_handlers_server/event_task_to_parent_status.py
index 4192a4bed0..a0e039926e 100644
--- a/openpype/modules/ftrack/event_handlers_server/event_task_to_parent_status.py
+++ b/openpype/modules/default_modules/ftrack/event_handlers_server/event_task_to_parent_status.py
@@ -1,5 +1,5 @@
import collections
-from openpype.modules.ftrack.lib import BaseEvent
+from openpype_modules.ftrack.lib import BaseEvent
class TaskStatusToParent(BaseEvent):
diff --git a/openpype/modules/ftrack/event_handlers_server/event_task_to_version_status.py b/openpype/modules/default_modules/ftrack/event_handlers_server/event_task_to_version_status.py
similarity index 99%
rename from openpype/modules/ftrack/event_handlers_server/event_task_to_version_status.py
rename to openpype/modules/default_modules/ftrack/event_handlers_server/event_task_to_version_status.py
index f2d3723021..b77849c678 100644
--- a/openpype/modules/ftrack/event_handlers_server/event_task_to_version_status.py
+++ b/openpype/modules/default_modules/ftrack/event_handlers_server/event_task_to_version_status.py
@@ -1,5 +1,5 @@
import collections
-from openpype.modules.ftrack.lib import BaseEvent
+from openpype_modules.ftrack.lib import BaseEvent
class TaskToVersionStatus(BaseEvent):
diff --git a/openpype/modules/ftrack/event_handlers_server/event_thumbnail_updates.py b/openpype/modules/default_modules/ftrack/event_handlers_server/event_thumbnail_updates.py
similarity index 99%
rename from openpype/modules/ftrack/event_handlers_server/event_thumbnail_updates.py
rename to openpype/modules/default_modules/ftrack/event_handlers_server/event_thumbnail_updates.py
index cbeeeee5c5..64673f792c 100644
--- a/openpype/modules/ftrack/event_handlers_server/event_thumbnail_updates.py
+++ b/openpype/modules/default_modules/ftrack/event_handlers_server/event_thumbnail_updates.py
@@ -1,5 +1,5 @@
import collections
-from openpype.modules.ftrack.lib import BaseEvent
+from openpype_modules.ftrack.lib import BaseEvent
class ThumbnailEvents(BaseEvent):
diff --git a/openpype/modules/ftrack/event_handlers_server/event_user_assigment.py b/openpype/modules/default_modules/ftrack/event_handlers_server/event_user_assigment.py
similarity index 98%
rename from openpype/modules/ftrack/event_handlers_server/event_user_assigment.py
rename to openpype/modules/default_modules/ftrack/event_handlers_server/event_user_assigment.py
index a0734e14a1..efc1e76775 100644
--- a/openpype/modules/ftrack/event_handlers_server/event_user_assigment.py
+++ b/openpype/modules/default_modules/ftrack/event_handlers_server/event_user_assigment.py
@@ -2,8 +2,8 @@ import os
import re
import subprocess
-from openpype.modules.ftrack.lib import BaseEvent
-from openpype.modules.ftrack.lib.avalon_sync import CUST_ATTR_ID_KEY
+from openpype_modules.ftrack.lib import BaseEvent
+from openpype_modules.ftrack.lib.avalon_sync import CUST_ATTR_ID_KEY
from avalon.api import AvalonMongoDB
from bson.objectid import ObjectId
diff --git a/openpype/modules/ftrack/event_handlers_server/event_version_to_task_statuses.py b/openpype/modules/default_modules/ftrack/event_handlers_server/event_version_to_task_statuses.py
similarity index 99%
rename from openpype/modules/ftrack/event_handlers_server/event_version_to_task_statuses.py
rename to openpype/modules/default_modules/ftrack/event_handlers_server/event_version_to_task_statuses.py
index f215bedcc2..e36c3eecd9 100644
--- a/openpype/modules/ftrack/event_handlers_server/event_version_to_task_statuses.py
+++ b/openpype/modules/default_modules/ftrack/event_handlers_server/event_version_to_task_statuses.py
@@ -1,4 +1,4 @@
-from openpype.modules.ftrack.lib import BaseEvent
+from openpype_modules.ftrack.lib import BaseEvent
class VersionToTaskStatus(BaseEvent):
diff --git a/openpype/modules/ftrack/event_handlers_user/action_applications.py b/openpype/modules/default_modules/ftrack/event_handlers_user/action_applications.py
similarity index 99%
rename from openpype/modules/ftrack/event_handlers_user/action_applications.py
rename to openpype/modules/default_modules/ftrack/event_handlers_user/action_applications.py
index 74d14c2fc4..6d45d43958 100644
--- a/openpype/modules/ftrack/event_handlers_user/action_applications.py
+++ b/openpype/modules/default_modules/ftrack/event_handlers_user/action_applications.py
@@ -1,7 +1,7 @@
import os
from uuid import uuid4
-from openpype.modules.ftrack.lib import BaseAction
+from openpype_modules.ftrack.lib import BaseAction
from openpype.lib import (
ApplicationManager,
ApplicationLaunchFailed,
diff --git a/openpype/modules/ftrack/event_handlers_user/action_batch_task_creation.py b/openpype/modules/default_modules/ftrack/event_handlers_user/action_batch_task_creation.py
similarity index 98%
rename from openpype/modules/ftrack/event_handlers_user/action_batch_task_creation.py
rename to openpype/modules/default_modules/ftrack/event_handlers_user/action_batch_task_creation.py
index b9f0e7c5d3..c7fb1af98b 100644
--- a/openpype/modules/ftrack/event_handlers_user/action_batch_task_creation.py
+++ b/openpype/modules/default_modules/ftrack/event_handlers_user/action_batch_task_creation.py
@@ -2,7 +2,7 @@
Taken from https://github.com/tokejepsen/ftrack-hooks/tree/master/batch_tasks
"""
-from openpype.modules.ftrack.lib import BaseAction, statics_icon
+from openpype_modules.ftrack.lib import BaseAction, statics_icon
class BatchTasksAction(BaseAction):
diff --git a/openpype/modules/ftrack/event_handlers_user/action_clean_hierarchical_attributes.py b/openpype/modules/default_modules/ftrack/event_handlers_user/action_clean_hierarchical_attributes.py
similarity index 98%
rename from openpype/modules/ftrack/event_handlers_user/action_clean_hierarchical_attributes.py
rename to openpype/modules/default_modules/ftrack/event_handlers_user/action_clean_hierarchical_attributes.py
index 45cc9adf55..dc97ed972d 100644
--- a/openpype/modules/ftrack/event_handlers_user/action_clean_hierarchical_attributes.py
+++ b/openpype/modules/default_modules/ftrack/event_handlers_user/action_clean_hierarchical_attributes.py
@@ -1,6 +1,6 @@
import collections
import ftrack_api
-from openpype.modules.ftrack.lib import (
+from openpype_modules.ftrack.lib import (
BaseAction,
statics_icon,
get_openpype_attr
diff --git a/openpype/modules/ftrack/event_handlers_user/action_client_review_sort.py b/openpype/modules/default_modules/ftrack/event_handlers_user/action_client_review_sort.py
similarity index 97%
rename from openpype/modules/ftrack/event_handlers_user/action_client_review_sort.py
rename to openpype/modules/default_modules/ftrack/event_handlers_user/action_client_review_sort.py
index 7c9a2881d6..5ad5f10e8e 100644
--- a/openpype/modules/ftrack/event_handlers_user/action_client_review_sort.py
+++ b/openpype/modules/default_modules/ftrack/event_handlers_user/action_client_review_sort.py
@@ -1,4 +1,4 @@
-from openpype.modules.ftrack.lib import BaseAction, statics_icon
+from openpype_modules.ftrack.lib import BaseAction, statics_icon
try:
from functools import cmp_to_key
except Exception:
diff --git a/openpype/modules/ftrack/event_handlers_user/action_component_open.py b/openpype/modules/default_modules/ftrack/event_handlers_user/action_component_open.py
similarity index 96%
rename from openpype/modules/ftrack/event_handlers_user/action_component_open.py
rename to openpype/modules/default_modules/ftrack/event_handlers_user/action_component_open.py
index b3cdac0722..c731713c10 100644
--- a/openpype/modules/ftrack/event_handlers_user/action_component_open.py
+++ b/openpype/modules/default_modules/ftrack/event_handlers_user/action_component_open.py
@@ -1,7 +1,7 @@
import os
import sys
import subprocess
-from openpype.modules.ftrack.lib import BaseAction, statics_icon
+from openpype_modules.ftrack.lib import BaseAction, statics_icon
class ComponentOpen(BaseAction):
diff --git a/openpype/modules/ftrack/event_handlers_user/action_create_cust_attrs.py b/openpype/modules/default_modules/ftrack/event_handlers_user/action_create_cust_attrs.py
similarity index 99%
rename from openpype/modules/ftrack/event_handlers_user/action_create_cust_attrs.py
rename to openpype/modules/default_modules/ftrack/event_handlers_user/action_create_cust_attrs.py
index 63605eda5e..3869d8ad08 100644
--- a/openpype/modules/ftrack/event_handlers_user/action_create_cust_attrs.py
+++ b/openpype/modules/default_modules/ftrack/event_handlers_user/action_create_cust_attrs.py
@@ -2,7 +2,7 @@ import collections
import json
import arrow
import ftrack_api
-from openpype.modules.ftrack.lib import (
+from openpype_modules.ftrack.lib import (
BaseAction,
statics_icon,
@@ -43,7 +43,7 @@ dictionary level, task's attributes are nested more.
group (string)
- name of group
- - based on attribute `openpype.modules.ftrack.lib.CUST_ATTR_GROUP`
+ - based on attribute `openpype_modules.ftrack.lib.CUST_ATTR_GROUP`
- "pype" by default
*** Required ***************************************************************
diff --git a/openpype/modules/ftrack/event_handlers_user/action_create_folders.py b/openpype/modules/default_modules/ftrack/event_handlers_user/action_create_folders.py
similarity index 99%
rename from openpype/modules/ftrack/event_handlers_user/action_create_folders.py
rename to openpype/modules/default_modules/ftrack/event_handlers_user/action_create_folders.py
index 075b8d3d25..994dbd90e4 100644
--- a/openpype/modules/ftrack/event_handlers_user/action_create_folders.py
+++ b/openpype/modules/default_modules/ftrack/event_handlers_user/action_create_folders.py
@@ -1,5 +1,5 @@
import os
-from openpype.modules.ftrack.lib import BaseAction, statics_icon
+from openpype_modules.ftrack.lib import BaseAction, statics_icon
from avalon import lib as avalonlib
from openpype.api import (
Anatomy,
diff --git a/openpype/modules/ftrack/event_handlers_user/action_create_project_structure.py b/openpype/modules/default_modules/ftrack/event_handlers_user/action_create_project_structure.py
similarity index 99%
rename from openpype/modules/ftrack/event_handlers_user/action_create_project_structure.py
rename to openpype/modules/default_modules/ftrack/event_handlers_user/action_create_project_structure.py
index 035a1c60de..121c9f652b 100644
--- a/openpype/modules/ftrack/event_handlers_user/action_create_project_structure.py
+++ b/openpype/modules/default_modules/ftrack/event_handlers_user/action_create_project_structure.py
@@ -2,7 +2,7 @@ import os
import re
import json
-from openpype.modules.ftrack.lib import BaseAction, statics_icon
+from openpype_modules.ftrack.lib import BaseAction, statics_icon
from openpype.api import Anatomy, get_project_settings
diff --git a/openpype/modules/ftrack/event_handlers_user/action_delete_asset.py b/openpype/modules/default_modules/ftrack/event_handlers_user/action_delete_asset.py
similarity index 99%
rename from openpype/modules/ftrack/event_handlers_user/action_delete_asset.py
rename to openpype/modules/default_modules/ftrack/event_handlers_user/action_delete_asset.py
index c20491349f..f860065b26 100644
--- a/openpype/modules/ftrack/event_handlers_user/action_delete_asset.py
+++ b/openpype/modules/default_modules/ftrack/event_handlers_user/action_delete_asset.py
@@ -4,7 +4,7 @@ from datetime import datetime
from queue import Queue
from bson.objectid import ObjectId
-from openpype.modules.ftrack.lib import BaseAction, statics_icon
+from openpype_modules.ftrack.lib import BaseAction, statics_icon
from avalon.api import AvalonMongoDB
diff --git a/openpype/modules/ftrack/event_handlers_user/action_delete_old_versions.py b/openpype/modules/default_modules/ftrack/event_handlers_user/action_delete_old_versions.py
similarity index 99%
rename from openpype/modules/ftrack/event_handlers_user/action_delete_old_versions.py
rename to openpype/modules/default_modules/ftrack/event_handlers_user/action_delete_old_versions.py
index dbddc7a95e..063f086e9c 100644
--- a/openpype/modules/ftrack/event_handlers_user/action_delete_old_versions.py
+++ b/openpype/modules/default_modules/ftrack/event_handlers_user/action_delete_old_versions.py
@@ -5,7 +5,7 @@ import uuid
import clique
from pymongo import UpdateOne
-from openpype.modules.ftrack.lib import BaseAction, statics_icon
+from openpype_modules.ftrack.lib import BaseAction, statics_icon
from avalon.api import AvalonMongoDB
from openpype.api import Anatomy
diff --git a/openpype/modules/ftrack/event_handlers_user/action_delivery.py b/openpype/modules/default_modules/ftrack/event_handlers_user/action_delivery.py
similarity index 99%
rename from openpype/modules/ftrack/event_handlers_user/action_delivery.py
rename to openpype/modules/default_modules/ftrack/event_handlers_user/action_delivery.py
index 2e7599647a..1f28b18900 100644
--- a/openpype/modules/ftrack/event_handlers_user/action_delivery.py
+++ b/openpype/modules/default_modules/ftrack/event_handlers_user/action_delivery.py
@@ -6,8 +6,8 @@ import collections
from bson.objectid import ObjectId
from openpype.api import Anatomy, config
-from openpype.modules.ftrack.lib import BaseAction, statics_icon
-from openpype.modules.ftrack.lib.avalon_sync import CUST_ATTR_ID_KEY
+from openpype_modules.ftrack.lib import BaseAction, statics_icon
+from openpype_modules.ftrack.lib.avalon_sync import CUST_ATTR_ID_KEY
from openpype.lib.delivery import (
path_from_representation,
get_format_dict,
diff --git a/openpype/modules/ftrack/event_handlers_user/action_djvview.py b/openpype/modules/default_modules/ftrack/event_handlers_user/action_djvview.py
similarity index 98%
rename from openpype/modules/ftrack/event_handlers_user/action_djvview.py
rename to openpype/modules/default_modules/ftrack/event_handlers_user/action_djvview.py
index c05fbed2d0..c603a2d200 100644
--- a/openpype/modules/ftrack/event_handlers_user/action_djvview.py
+++ b/openpype/modules/default_modules/ftrack/event_handlers_user/action_djvview.py
@@ -1,7 +1,7 @@
import os
import subprocess
from operator import itemgetter
-from openpype.modules.ftrack.lib import BaseAction, statics_icon
+from openpype_modules.ftrack.lib import BaseAction, statics_icon
class DJVViewAction(BaseAction):
diff --git a/openpype/modules/ftrack/event_handlers_user/action_job_killer.py b/openpype/modules/default_modules/ftrack/event_handlers_user/action_job_killer.py
similarity index 98%
rename from openpype/modules/ftrack/event_handlers_user/action_job_killer.py
rename to openpype/modules/default_modules/ftrack/event_handlers_user/action_job_killer.py
index 47ed1e7895..af24e0280d 100644
--- a/openpype/modules/ftrack/event_handlers_user/action_job_killer.py
+++ b/openpype/modules/default_modules/ftrack/event_handlers_user/action_job_killer.py
@@ -1,5 +1,5 @@
import json
-from openpype.modules.ftrack.lib import BaseAction, statics_icon
+from openpype_modules.ftrack.lib import BaseAction, statics_icon
class JobKiller(BaseAction):
diff --git a/openpype/modules/ftrack/event_handlers_user/action_multiple_notes.py b/openpype/modules/default_modules/ftrack/event_handlers_user/action_multiple_notes.py
similarity index 98%
rename from openpype/modules/ftrack/event_handlers_user/action_multiple_notes.py
rename to openpype/modules/default_modules/ftrack/event_handlers_user/action_multiple_notes.py
index f5af044de0..825fd97b06 100644
--- a/openpype/modules/ftrack/event_handlers_user/action_multiple_notes.py
+++ b/openpype/modules/default_modules/ftrack/event_handlers_user/action_multiple_notes.py
@@ -1,4 +1,4 @@
-from openpype.modules.ftrack.lib import BaseAction, statics_icon
+from openpype_modules.ftrack.lib import BaseAction, statics_icon
class MultipleNotes(BaseAction):
diff --git a/openpype/modules/ftrack/event_handlers_user/action_prepare_project.py b/openpype/modules/default_modules/ftrack/event_handlers_user/action_prepare_project.py
similarity index 99%
rename from openpype/modules/ftrack/event_handlers_user/action_prepare_project.py
rename to openpype/modules/default_modules/ftrack/event_handlers_user/action_prepare_project.py
index 4b42500e8f..87d3329179 100644
--- a/openpype/modules/ftrack/event_handlers_user/action_prepare_project.py
+++ b/openpype/modules/default_modules/ftrack/event_handlers_user/action_prepare_project.py
@@ -4,7 +4,7 @@ from avalon.api import AvalonMongoDB
from openpype.api import ProjectSettings
from openpype.lib import create_project
-from openpype.modules.ftrack.lib import (
+from openpype_modules.ftrack.lib import (
BaseAction,
statics_icon,
get_openpype_attr,
diff --git a/openpype/modules/ftrack/event_handlers_user/action_rv.py b/openpype/modules/default_modules/ftrack/event_handlers_user/action_rv.py
similarity index 99%
rename from openpype/modules/ftrack/event_handlers_user/action_rv.py
rename to openpype/modules/default_modules/ftrack/event_handlers_user/action_rv.py
index 3172b74261..71d790f7e7 100644
--- a/openpype/modules/ftrack/event_handlers_user/action_rv.py
+++ b/openpype/modules/default_modules/ftrack/event_handlers_user/action_rv.py
@@ -3,7 +3,7 @@ import subprocess
import traceback
import json
-from openpype.modules.ftrack.lib import BaseAction, statics_icon
+from openpype_modules.ftrack.lib import BaseAction, statics_icon
import ftrack_api
from avalon import io, api
diff --git a/openpype/modules/ftrack/event_handlers_user/action_seed.py b/openpype/modules/default_modules/ftrack/event_handlers_user/action_seed.py
similarity index 99%
rename from openpype/modules/ftrack/event_handlers_user/action_seed.py
rename to openpype/modules/default_modules/ftrack/event_handlers_user/action_seed.py
index 1f01f0af1d..4021d70c0a 100644
--- a/openpype/modules/ftrack/event_handlers_user/action_seed.py
+++ b/openpype/modules/default_modules/ftrack/event_handlers_user/action_seed.py
@@ -1,6 +1,6 @@
import os
from operator import itemgetter
-from openpype.modules.ftrack.lib import BaseAction, statics_icon
+from openpype_modules.ftrack.lib import BaseAction, statics_icon
class SeedDebugProject(BaseAction):
diff --git a/openpype/modules/ftrack/event_handlers_user/action_store_thumbnails_to_avalon.py b/openpype/modules/default_modules/ftrack/event_handlers_user/action_store_thumbnails_to_avalon.py
similarity index 99%
rename from openpype/modules/ftrack/event_handlers_user/action_store_thumbnails_to_avalon.py
rename to openpype/modules/default_modules/ftrack/event_handlers_user/action_store_thumbnails_to_avalon.py
index 4464e51d3d..4820925844 100644
--- a/openpype/modules/ftrack/event_handlers_user/action_store_thumbnails_to_avalon.py
+++ b/openpype/modules/default_modules/ftrack/event_handlers_user/action_store_thumbnails_to_avalon.py
@@ -4,11 +4,11 @@ import json
import requests
from bson.objectid import ObjectId
-from openpype.modules.ftrack.lib import BaseAction, statics_icon
+from openpype_modules.ftrack.lib import BaseAction, statics_icon
from openpype.api import Anatomy
from avalon.api import AvalonMongoDB
-from openpype.modules.ftrack.lib.avalon_sync import CUST_ATTR_ID_KEY
+from openpype_modules.ftrack.lib.avalon_sync import CUST_ATTR_ID_KEY
class StoreThumbnailsToAvalon(BaseAction):
diff --git a/openpype/modules/ftrack/event_handlers_user/action_sync_to_avalon.py b/openpype/modules/default_modules/ftrack/event_handlers_user/action_sync_to_avalon.py
similarity index 98%
rename from openpype/modules/ftrack/event_handlers_user/action_sync_to_avalon.py
rename to openpype/modules/default_modules/ftrack/event_handlers_user/action_sync_to_avalon.py
index 89fac7cf80..d6ca561bbe 100644
--- a/openpype/modules/ftrack/event_handlers_user/action_sync_to_avalon.py
+++ b/openpype/modules/default_modules/ftrack/event_handlers_user/action_sync_to_avalon.py
@@ -1,8 +1,8 @@
import time
import traceback
-from openpype.modules.ftrack.lib import BaseAction, statics_icon
-from openpype.modules.ftrack.lib.avalon_sync import SyncEntitiesFactory
+from openpype_modules.ftrack.lib import BaseAction, statics_icon
+from openpype_modules.ftrack.lib.avalon_sync import SyncEntitiesFactory
class SyncToAvalonLocal(BaseAction):
diff --git a/openpype/modules/ftrack/event_handlers_user/action_test.py b/openpype/modules/default_modules/ftrack/event_handlers_user/action_test.py
similarity index 89%
rename from openpype/modules/ftrack/event_handlers_user/action_test.py
rename to openpype/modules/default_modules/ftrack/event_handlers_user/action_test.py
index 206c67de50..bd71ba5bf9 100644
--- a/openpype/modules/ftrack/event_handlers_user/action_test.py
+++ b/openpype/modules/default_modules/ftrack/event_handlers_user/action_test.py
@@ -1,4 +1,4 @@
-from openpype.modules.ftrack.lib import BaseAction, statics_icon
+from openpype_modules.ftrack.lib import BaseAction, statics_icon
class TestAction(BaseAction):
diff --git a/openpype/modules/ftrack/event_handlers_user/action_thumbnail_to_childern.py b/openpype/modules/default_modules/ftrack/event_handlers_user/action_thumbnail_to_childern.py
similarity index 96%
rename from openpype/modules/ftrack/event_handlers_user/action_thumbnail_to_childern.py
rename to openpype/modules/default_modules/ftrack/event_handlers_user/action_thumbnail_to_childern.py
index a12f25b57d..3b90960160 100644
--- a/openpype/modules/ftrack/event_handlers_user/action_thumbnail_to_childern.py
+++ b/openpype/modules/default_modules/ftrack/event_handlers_user/action_thumbnail_to_childern.py
@@ -1,5 +1,5 @@
import json
-from openpype.modules.ftrack.lib import BaseAction, statics_icon
+from openpype_modules.ftrack.lib import BaseAction, statics_icon
class ThumbToChildren(BaseAction):
diff --git a/openpype/modules/ftrack/event_handlers_user/action_thumbnail_to_parent.py b/openpype/modules/default_modules/ftrack/event_handlers_user/action_thumbnail_to_parent.py
similarity index 97%
rename from openpype/modules/ftrack/event_handlers_user/action_thumbnail_to_parent.py
rename to openpype/modules/default_modules/ftrack/event_handlers_user/action_thumbnail_to_parent.py
index 284723bb0f..2f0110b7aa 100644
--- a/openpype/modules/ftrack/event_handlers_user/action_thumbnail_to_parent.py
+++ b/openpype/modules/default_modules/ftrack/event_handlers_user/action_thumbnail_to_parent.py
@@ -1,5 +1,5 @@
import json
-from openpype.modules.ftrack.lib import BaseAction, statics_icon
+from openpype_modules.ftrack.lib import BaseAction, statics_icon
class ThumbToParent(BaseAction):
diff --git a/openpype/modules/ftrack/event_handlers_user/action_where_run_ask.py b/openpype/modules/default_modules/ftrack/event_handlers_user/action_where_run_ask.py
similarity index 97%
rename from openpype/modules/ftrack/event_handlers_user/action_where_run_ask.py
rename to openpype/modules/default_modules/ftrack/event_handlers_user/action_where_run_ask.py
index 2c427cfff7..0d69913996 100644
--- a/openpype/modules/ftrack/event_handlers_user/action_where_run_ask.py
+++ b/openpype/modules/default_modules/ftrack/event_handlers_user/action_where_run_ask.py
@@ -2,7 +2,7 @@ import platform
import socket
import getpass
-from openpype.modules.ftrack.lib import BaseAction, statics_icon
+from openpype_modules.ftrack.lib import BaseAction
class ActionWhereIRun(BaseAction):
diff --git a/openpype/modules/ftrack/ftrack_module.py b/openpype/modules/default_modules/ftrack/ftrack_module.py
similarity index 96%
rename from openpype/modules/ftrack/ftrack_module.py
rename to openpype/modules/default_modules/ftrack/ftrack_module.py
index ee139a500e..1de152535c 100644
--- a/openpype/modules/ftrack/ftrack_module.py
+++ b/openpype/modules/default_modules/ftrack/ftrack_module.py
@@ -1,35 +1,24 @@
import os
import json
import collections
-from abc import ABCMeta, abstractmethod
-import six
import openpype
-from openpype.modules import (
- PypeModule,
+from openpype.modules import OpenPypeModule
+
+from openpype_interfaces import (
ITrayModule,
IPluginPaths,
ITimersManager,
ILaunchHookPaths,
- ISettingsChangeListener
+ ISettingsChangeListener,
+ IFtrackEventHandlerPaths
)
from openpype.settings import SaveWarningExc
FTRACK_MODULE_DIR = os.path.dirname(os.path.abspath(__file__))
-@six.add_metaclass(ABCMeta)
-class IFtrackEventHandlerPaths:
- """Other modules interface to return paths to ftrack event handlers.
-
- Expected output is dictionary with "server" and "user" keys.
- """
- @abstractmethod
- def get_event_handler_paths(self):
- pass
-
-
class FtrackModule(
- PypeModule,
+ OpenPypeModule,
ITrayModule,
IPluginPaths,
ITimersManager,
@@ -242,7 +231,7 @@ class FtrackModule(
return
import ftrack_api
- from openpype.modules.ftrack.lib import get_openpype_attr
+ from openpype_modules.ftrack.lib import get_openpype_attr
try:
session = self.create_ftrack_session()
diff --git a/openpype/modules/ftrack/ftrack_server/__init__.py b/openpype/modules/default_modules/ftrack/ftrack_server/__init__.py
similarity index 100%
rename from openpype/modules/ftrack/ftrack_server/__init__.py
rename to openpype/modules/default_modules/ftrack/ftrack_server/__init__.py
diff --git a/openpype/modules/ftrack/ftrack_server/event_server_cli.py b/openpype/modules/default_modules/ftrack/ftrack_server/event_server_cli.py
similarity index 97%
rename from openpype/modules/ftrack/ftrack_server/event_server_cli.py
rename to openpype/modules/default_modules/ftrack/ftrack_server/event_server_cli.py
index 8bba22b475..d8e4d05580 100644
--- a/openpype/modules/ftrack/ftrack_server/event_server_cli.py
+++ b/openpype/modules/default_modules/ftrack/ftrack_server/event_server_cli.py
@@ -18,17 +18,10 @@ from openpype.lib import (
get_pype_execute_args,
OpenPypeMongoConnection
)
-from openpype.modules.ftrack import FTRACK_MODULE_DIR
-from openpype.modules.ftrack.lib import (
- credentials,
- get_ftrack_url_from_settings
-)
-from openpype.modules.ftrack.ftrack_server.lib import (
- check_ftrack_url,
- get_ftrack_event_mongo_info
-)
-
-from openpype.modules.ftrack.ftrack_server import socket_thread
+from openpype_modules.ftrack import FTRACK_MODULE_DIR
+from openpype_modules.ftrack.lib import credentials
+from openpype_modules.ftrack.ftrack_server.lib import check_ftrack_url
+from openpype_modules.ftrack.ftrack_server import socket_thread
class MongoPermissionsError(Exception):
diff --git a/openpype/modules/ftrack/ftrack_server/ftrack_server.py b/openpype/modules/default_modules/ftrack/ftrack_server/ftrack_server.py
similarity index 100%
rename from openpype/modules/ftrack/ftrack_server/ftrack_server.py
rename to openpype/modules/default_modules/ftrack/ftrack_server/ftrack_server.py
diff --git a/openpype/modules/ftrack/ftrack_server/lib.py b/openpype/modules/default_modules/ftrack/ftrack_server/lib.py
similarity index 99%
rename from openpype/modules/ftrack/ftrack_server/lib.py
rename to openpype/modules/default_modules/ftrack/ftrack_server/lib.py
index 88f849e765..e80d6a3a6b 100644
--- a/openpype/modules/ftrack/ftrack_server/lib.py
+++ b/openpype/modules/default_modules/ftrack/ftrack_server/lib.py
@@ -22,7 +22,7 @@ try:
from weakref import WeakMethod
except ImportError:
from ftrack_api._weakref import WeakMethod
-from openpype.modules.ftrack.lib import get_ftrack_event_mongo_info
+from openpype_modules.ftrack.lib import get_ftrack_event_mongo_info
from openpype.lib import OpenPypeMongoConnection
from openpype.api import Logger
diff --git a/openpype/modules/ftrack/ftrack_server/socket_thread.py b/openpype/modules/default_modules/ftrack/ftrack_server/socket_thread.py
similarity index 100%
rename from openpype/modules/ftrack/ftrack_server/socket_thread.py
rename to openpype/modules/default_modules/ftrack/ftrack_server/socket_thread.py
diff --git a/openpype/modules/default_modules/ftrack/interfaces.py b/openpype/modules/default_modules/ftrack/interfaces.py
new file mode 100644
index 0000000000..16ce0d2e62
--- /dev/null
+++ b/openpype/modules/default_modules/ftrack/interfaces.py
@@ -0,0 +1,12 @@
+from abc import abstractmethod
+from openpype.modules import OpenPypeInterface
+
+
+class IFtrackEventHandlerPaths(OpenPypeInterface):
+ """Other modules interface to return paths to ftrack event handlers.
+
+ Expected output is dictionary with "server" and "user" keys.
+ """
+ @abstractmethod
+ def get_event_handler_paths(self):
+ pass
diff --git a/openpype/modules/ftrack/launch_hooks/post_ftrack_changes.py b/openpype/modules/default_modules/ftrack/launch_hooks/post_ftrack_changes.py
similarity index 100%
rename from openpype/modules/ftrack/launch_hooks/post_ftrack_changes.py
rename to openpype/modules/default_modules/ftrack/launch_hooks/post_ftrack_changes.py
diff --git a/openpype/modules/ftrack/launch_hooks/pre_python2_vendor.py b/openpype/modules/default_modules/ftrack/launch_hooks/pre_python2_vendor.py
similarity index 96%
rename from openpype/modules/ftrack/launch_hooks/pre_python2_vendor.py
rename to openpype/modules/default_modules/ftrack/launch_hooks/pre_python2_vendor.py
index d34b6533fb..0dd894bebf 100644
--- a/openpype/modules/ftrack/launch_hooks/pre_python2_vendor.py
+++ b/openpype/modules/default_modules/ftrack/launch_hooks/pre_python2_vendor.py
@@ -1,6 +1,6 @@
import os
from openpype.lib import PreLaunchHook
-from openpype.modules.ftrack import FTRACK_MODULE_DIR
+from openpype_modules.ftrack import FTRACK_MODULE_DIR
class PrePython2Support(PreLaunchHook):
diff --git a/openpype/modules/ftrack/lib/__init__.py b/openpype/modules/default_modules/ftrack/lib/__init__.py
similarity index 100%
rename from openpype/modules/ftrack/lib/__init__.py
rename to openpype/modules/default_modules/ftrack/lib/__init__.py
diff --git a/openpype/modules/ftrack/lib/avalon_sync.py b/openpype/modules/default_modules/ftrack/lib/avalon_sync.py
similarity index 100%
rename from openpype/modules/ftrack/lib/avalon_sync.py
rename to openpype/modules/default_modules/ftrack/lib/avalon_sync.py
diff --git a/openpype/modules/ftrack/lib/constants.py b/openpype/modules/default_modules/ftrack/lib/constants.py
similarity index 100%
rename from openpype/modules/ftrack/lib/constants.py
rename to openpype/modules/default_modules/ftrack/lib/constants.py
diff --git a/openpype/modules/ftrack/lib/credentials.py b/openpype/modules/default_modules/ftrack/lib/credentials.py
similarity index 100%
rename from openpype/modules/ftrack/lib/credentials.py
rename to openpype/modules/default_modules/ftrack/lib/credentials.py
diff --git a/openpype/modules/ftrack/lib/custom_attributes.json b/openpype/modules/default_modules/ftrack/lib/custom_attributes.json
similarity index 100%
rename from openpype/modules/ftrack/lib/custom_attributes.json
rename to openpype/modules/default_modules/ftrack/lib/custom_attributes.json
diff --git a/openpype/modules/ftrack/lib/custom_attributes.py b/openpype/modules/default_modules/ftrack/lib/custom_attributes.py
similarity index 100%
rename from openpype/modules/ftrack/lib/custom_attributes.py
rename to openpype/modules/default_modules/ftrack/lib/custom_attributes.py
diff --git a/openpype/modules/ftrack/lib/ftrack_action_handler.py b/openpype/modules/default_modules/ftrack/lib/ftrack_action_handler.py
similarity index 100%
rename from openpype/modules/ftrack/lib/ftrack_action_handler.py
rename to openpype/modules/default_modules/ftrack/lib/ftrack_action_handler.py
diff --git a/openpype/modules/ftrack/lib/ftrack_base_handler.py b/openpype/modules/default_modules/ftrack/lib/ftrack_base_handler.py
similarity index 99%
rename from openpype/modules/ftrack/lib/ftrack_base_handler.py
rename to openpype/modules/default_modules/ftrack/lib/ftrack_base_handler.py
index 7b7ebfb099..7027154d86 100644
--- a/openpype/modules/ftrack/lib/ftrack_base_handler.py
+++ b/openpype/modules/default_modules/ftrack/lib/ftrack_base_handler.py
@@ -10,7 +10,7 @@ from openpype.api import Logger
from openpype.settings import get_project_settings
import ftrack_api
-from openpype.modules.ftrack import ftrack_server
+from openpype_modules.ftrack import ftrack_server
class MissingPermision(Exception):
diff --git a/openpype/modules/ftrack/lib/ftrack_event_handler.py b/openpype/modules/default_modules/ftrack/lib/ftrack_event_handler.py
similarity index 100%
rename from openpype/modules/ftrack/lib/ftrack_event_handler.py
rename to openpype/modules/default_modules/ftrack/lib/ftrack_event_handler.py
diff --git a/openpype/modules/ftrack/lib/settings.py b/openpype/modules/default_modules/ftrack/lib/settings.py
similarity index 100%
rename from openpype/modules/ftrack/lib/settings.py
rename to openpype/modules/default_modules/ftrack/lib/settings.py
diff --git a/openpype/modules/ftrack/plugins/_unused_publish/integrate_ftrack_comments.py b/openpype/modules/default_modules/ftrack/plugins/_unused_publish/integrate_ftrack_comments.py
similarity index 100%
rename from openpype/modules/ftrack/plugins/_unused_publish/integrate_ftrack_comments.py
rename to openpype/modules/default_modules/ftrack/plugins/_unused_publish/integrate_ftrack_comments.py
diff --git a/openpype/modules/ftrack/plugins/publish/collect_ftrack_api.py b/openpype/modules/default_modules/ftrack/plugins/publish/collect_ftrack_api.py
similarity index 100%
rename from openpype/modules/ftrack/plugins/publish/collect_ftrack_api.py
rename to openpype/modules/default_modules/ftrack/plugins/publish/collect_ftrack_api.py
diff --git a/openpype/modules/ftrack/plugins/publish/collect_ftrack_family.py b/openpype/modules/default_modules/ftrack/plugins/publish/collect_ftrack_family.py
similarity index 96%
rename from openpype/modules/ftrack/plugins/publish/collect_ftrack_family.py
rename to openpype/modules/default_modules/ftrack/plugins/publish/collect_ftrack_family.py
index 8464a43ef7..cc2a5b7d37 100644
--- a/openpype/modules/ftrack/plugins/publish/collect_ftrack_family.py
+++ b/openpype/modules/default_modules/ftrack/plugins/publish/collect_ftrack_family.py
@@ -63,8 +63,9 @@ class CollectFtrackFamily(pyblish.api.InstancePlugin):
self.log.debug("Adding ftrack family for '{}'".
format(instance.data.get("family")))
- if families and "ftrack" not in families:
- instance.data["families"].append("ftrack")
+ if families:
+ if "ftrack" not in families:
+ instance.data["families"].append("ftrack")
else:
instance.data["families"] = ["ftrack"]
else:
diff --git a/openpype/modules/default_modules/ftrack/plugins/publish/collect_username.py b/openpype/modules/default_modules/ftrack/plugins/publish/collect_username.py
new file mode 100644
index 0000000000..39b7433e11
--- /dev/null
+++ b/openpype/modules/default_modules/ftrack/plugins/publish/collect_username.py
@@ -0,0 +1,49 @@
+"""Loads publishing context from json and continues in publish process.
+
+Requires:
+ anatomy -> context["anatomy"] *(pyblish.api.CollectorOrder - 0.11)
+
+Provides:
+ context, instances -> All data from previous publishing process.
+"""
+
+import ftrack_api
+import os
+
+import pyblish.api
+
+
+class CollectUsername(pyblish.api.ContextPlugin):
+ """
+ Translates user email to Ftrack username.
+
+ Emails in Ftrack are same as company's Slack, username is needed to
+ load data to Ftrack.
+
+ Expects "pype.club" user created on Ftrack and FTRACK_BOT_API_KEY env
+ var set up.
+
+ """
+ order = pyblish.api.CollectorOrder - 0.488
+ label = "Collect ftrack username"
+ hosts = ["webpublisher"]
+
+ _context = None
+
+ def process(self, context):
+ os.environ["FTRACK_API_USER"] = os.environ["FTRACK_BOT_API_USER"]
+ os.environ["FTRACK_API_KEY"] = os.environ["FTRACK_BOT_API_KEY"]
+ self.log.info("CollectUsername")
+ for instance in context:
+ email = instance.data["user_email"]
+ self.log.info("email:: {}".format(email))
+ session = ftrack_api.Session(auto_connect_event_hub=False)
+ user = session.query("User where email like '{}'".format(
+ email))
+
+ if not user:
+ raise ValueError(
+ "Couldnt find user with {} email".format(email))
+
+ os.environ["FTRACK_API_USER"] = user[0].get("username")
+ break
diff --git a/openpype/modules/ftrack/plugins/publish/integrate_ftrack_api.py b/openpype/modules/default_modules/ftrack/plugins/publish/integrate_ftrack_api.py
similarity index 100%
rename from openpype/modules/ftrack/plugins/publish/integrate_ftrack_api.py
rename to openpype/modules/default_modules/ftrack/plugins/publish/integrate_ftrack_api.py
diff --git a/openpype/modules/ftrack/plugins/publish/integrate_ftrack_component_overwrite.py b/openpype/modules/default_modules/ftrack/plugins/publish/integrate_ftrack_component_overwrite.py
similarity index 100%
rename from openpype/modules/ftrack/plugins/publish/integrate_ftrack_component_overwrite.py
rename to openpype/modules/default_modules/ftrack/plugins/publish/integrate_ftrack_component_overwrite.py
diff --git a/openpype/modules/ftrack/plugins/publish/integrate_ftrack_instances.py b/openpype/modules/default_modules/ftrack/plugins/publish/integrate_ftrack_instances.py
similarity index 100%
rename from openpype/modules/ftrack/plugins/publish/integrate_ftrack_instances.py
rename to openpype/modules/default_modules/ftrack/plugins/publish/integrate_ftrack_instances.py
diff --git a/openpype/modules/ftrack/plugins/publish/integrate_ftrack_note.py b/openpype/modules/default_modules/ftrack/plugins/publish/integrate_ftrack_note.py
similarity index 100%
rename from openpype/modules/ftrack/plugins/publish/integrate_ftrack_note.py
rename to openpype/modules/default_modules/ftrack/plugins/publish/integrate_ftrack_note.py
diff --git a/openpype/modules/ftrack/plugins/publish/integrate_hierarchy_ftrack.py b/openpype/modules/default_modules/ftrack/plugins/publish/integrate_hierarchy_ftrack.py
similarity index 99%
rename from openpype/modules/ftrack/plugins/publish/integrate_hierarchy_ftrack.py
rename to openpype/modules/default_modules/ftrack/plugins/publish/integrate_hierarchy_ftrack.py
index 118a73a636..fbd64d9f70 100644
--- a/openpype/modules/ftrack/plugins/publish/integrate_hierarchy_ftrack.py
+++ b/openpype/modules/default_modules/ftrack/plugins/publish/integrate_hierarchy_ftrack.py
@@ -4,12 +4,12 @@ import six
import pyblish.api
from avalon import io
-# Copy of constant `openpype.modules.ftrack.lib.avalon_sync.CUST_ATTR_AUTO_SYNC`
+# Copy of constant `openpype_modules.ftrack.lib.avalon_sync.CUST_ATTR_AUTO_SYNC`
CUST_ATTR_AUTO_SYNC = "avalon_auto_sync"
CUST_ATTR_GROUP = "openpype"
-# Copy of `get_pype_attr` from openpype.modules.ftrack.lib
+# Copy of `get_pype_attr` from openpype_modules.ftrack.lib
# TODO import from openpype's ftrack module when possible to not break Python 2
def get_pype_attr(session, split_hierarchical=True):
custom_attributes = []
diff --git a/openpype/modules/ftrack/plugins/publish/integrate_remove_components.py b/openpype/modules/default_modules/ftrack/plugins/publish/integrate_remove_components.py
similarity index 100%
rename from openpype/modules/ftrack/plugins/publish/integrate_remove_components.py
rename to openpype/modules/default_modules/ftrack/plugins/publish/integrate_remove_components.py
diff --git a/openpype/modules/ftrack/plugins/publish/validate_custom_ftrack_attributes.py b/openpype/modules/default_modules/ftrack/plugins/publish/validate_custom_ftrack_attributes.py
similarity index 100%
rename from openpype/modules/ftrack/plugins/publish/validate_custom_ftrack_attributes.py
rename to openpype/modules/default_modules/ftrack/plugins/publish/validate_custom_ftrack_attributes.py
diff --git a/openpype/modules/default_modules/ftrack/python2_vendor/arrow b/openpype/modules/default_modules/ftrack/python2_vendor/arrow
new file mode 160000
index 0000000000..b746fedf72
--- /dev/null
+++ b/openpype/modules/default_modules/ftrack/python2_vendor/arrow
@@ -0,0 +1 @@
+Subproject commit b746fedf7286c3755a46f07ab72f4c414cd41fc0
diff --git a/openpype/modules/default_modules/ftrack/python2_vendor/backports.functools_lru_cache/backports/__init__.py b/openpype/modules/default_modules/ftrack/python2_vendor/backports.functools_lru_cache/backports/__init__.py
new file mode 100644
index 0000000000..69e3be50da
--- /dev/null
+++ b/openpype/modules/default_modules/ftrack/python2_vendor/backports.functools_lru_cache/backports/__init__.py
@@ -0,0 +1 @@
+__path__ = __import__('pkgutil').extend_path(__path__, __name__)
diff --git a/openpype/modules/ftrack/python2_vendor/backports.functools_lru_cache/backports/configparser/__init__.py b/openpype/modules/default_modules/ftrack/python2_vendor/backports.functools_lru_cache/backports/configparser/__init__.py
similarity index 100%
rename from openpype/modules/ftrack/python2_vendor/backports.functools_lru_cache/backports/configparser/__init__.py
rename to openpype/modules/default_modules/ftrack/python2_vendor/backports.functools_lru_cache/backports/configparser/__init__.py
diff --git a/openpype/modules/ftrack/python2_vendor/backports.functools_lru_cache/backports/configparser/helpers.py b/openpype/modules/default_modules/ftrack/python2_vendor/backports.functools_lru_cache/backports/configparser/helpers.py
similarity index 100%
rename from openpype/modules/ftrack/python2_vendor/backports.functools_lru_cache/backports/configparser/helpers.py
rename to openpype/modules/default_modules/ftrack/python2_vendor/backports.functools_lru_cache/backports/configparser/helpers.py
diff --git a/openpype/modules/ftrack/python2_vendor/backports.functools_lru_cache/backports/functools_lru_cache.py b/openpype/modules/default_modules/ftrack/python2_vendor/backports.functools_lru_cache/backports/functools_lru_cache.py
similarity index 100%
rename from openpype/modules/ftrack/python2_vendor/backports.functools_lru_cache/backports/functools_lru_cache.py
rename to openpype/modules/default_modules/ftrack/python2_vendor/backports.functools_lru_cache/backports/functools_lru_cache.py
diff --git a/openpype/modules/ftrack/python2_vendor/builtins/builtins/__init__.py b/openpype/modules/default_modules/ftrack/python2_vendor/builtins/builtins/__init__.py
similarity index 100%
rename from openpype/modules/ftrack/python2_vendor/builtins/builtins/__init__.py
rename to openpype/modules/default_modules/ftrack/python2_vendor/builtins/builtins/__init__.py
diff --git a/openpype/modules/default_modules/ftrack/python2_vendor/ftrack-python-api b/openpype/modules/default_modules/ftrack/python2_vendor/ftrack-python-api
new file mode 160000
index 0000000000..d277f474ab
--- /dev/null
+++ b/openpype/modules/default_modules/ftrack/python2_vendor/ftrack-python-api
@@ -0,0 +1 @@
+Subproject commit d277f474ab016e7b53479c36af87cb861d0cc53e
diff --git a/openpype/modules/ftrack/scripts/sub_event_processor.py b/openpype/modules/default_modules/ftrack/scripts/sub_event_processor.py
similarity index 95%
rename from openpype/modules/ftrack/scripts/sub_event_processor.py
rename to openpype/modules/default_modules/ftrack/scripts/sub_event_processor.py
index 0d94fa7264..51b45eb93b 100644
--- a/openpype/modules/ftrack/scripts/sub_event_processor.py
+++ b/openpype/modules/default_modules/ftrack/scripts/sub_event_processor.py
@@ -4,8 +4,8 @@ import signal
import socket
import datetime
-from openpype.modules.ftrack.ftrack_server.ftrack_server import FtrackServer
-from openpype.modules.ftrack.ftrack_server.lib import (
+from openpype_modules.ftrack.ftrack_server.ftrack_server import FtrackServer
+from openpype_modules.ftrack.ftrack_server.lib import (
SocketSession,
ProcessEventHub,
TOPIC_STATUS_SERVER
diff --git a/openpype/modules/ftrack/scripts/sub_event_status.py b/openpype/modules/default_modules/ftrack/scripts/sub_event_status.py
similarity index 98%
rename from openpype/modules/ftrack/scripts/sub_event_status.py
rename to openpype/modules/default_modules/ftrack/scripts/sub_event_status.py
index 24b9bfb789..8a2733b635 100644
--- a/openpype/modules/ftrack/scripts/sub_event_status.py
+++ b/openpype/modules/default_modules/ftrack/scripts/sub_event_status.py
@@ -7,8 +7,8 @@ import socket
import datetime
import ftrack_api
-from openpype.modules.ftrack.ftrack_server.ftrack_server import FtrackServer
-from openpype.modules.ftrack.ftrack_server.lib import (
+from openpype_modules.ftrack.ftrack_server.ftrack_server import FtrackServer
+from openpype_modules.ftrack.ftrack_server.lib import (
SocketSession,
StatusEventHub,
TOPIC_STATUS_SERVER,
diff --git a/openpype/modules/ftrack/scripts/sub_event_storer.py b/openpype/modules/default_modules/ftrack/scripts/sub_event_storer.py
similarity index 96%
rename from openpype/modules/ftrack/scripts/sub_event_storer.py
rename to openpype/modules/default_modules/ftrack/scripts/sub_event_storer.py
index 6e2990ef0b..a8649e0ccc 100644
--- a/openpype/modules/ftrack/scripts/sub_event_storer.py
+++ b/openpype/modules/default_modules/ftrack/scripts/sub_event_storer.py
@@ -6,14 +6,14 @@ import socket
import pymongo
import ftrack_api
-from openpype.modules.ftrack.ftrack_server.ftrack_server import FtrackServer
-from openpype.modules.ftrack.ftrack_server.lib import (
+from openpype_modules.ftrack.ftrack_server.ftrack_server import FtrackServer
+from openpype_modules.ftrack.ftrack_server.lib import (
SocketSession,
StorerEventHub,
TOPIC_STATUS_SERVER,
TOPIC_STATUS_SERVER_RESULT
)
-from openpype.modules.ftrack.lib import get_ftrack_event_mongo_info
+from openpype_modules.ftrack.lib import get_ftrack_event_mongo_info
from openpype.lib import OpenPypeMongoConnection
from openpype.api import Logger
diff --git a/openpype/modules/ftrack/scripts/sub_legacy_server.py b/openpype/modules/default_modules/ftrack/scripts/sub_legacy_server.py
similarity index 97%
rename from openpype/modules/ftrack/scripts/sub_legacy_server.py
rename to openpype/modules/default_modules/ftrack/scripts/sub_legacy_server.py
index ae6aefa908..e3a623c376 100644
--- a/openpype/modules/ftrack/scripts/sub_legacy_server.py
+++ b/openpype/modules/default_modules/ftrack/scripts/sub_legacy_server.py
@@ -7,7 +7,7 @@ import threading
import ftrack_api
from openpype.api import Logger
from openpype.modules import ModulesManager
-from openpype.modules.ftrack.ftrack_server.ftrack_server import FtrackServer
+from openpype_modules.ftrack.ftrack_server.ftrack_server import FtrackServer
log = Logger().get_logger("Event Server Legacy")
diff --git a/openpype/modules/ftrack/scripts/sub_user_server.py b/openpype/modules/default_modules/ftrack/scripts/sub_user_server.py
similarity index 93%
rename from openpype/modules/ftrack/scripts/sub_user_server.py
rename to openpype/modules/default_modules/ftrack/scripts/sub_user_server.py
index 971a31b703..a3701a0950 100644
--- a/openpype/modules/ftrack/scripts/sub_user_server.py
+++ b/openpype/modules/default_modules/ftrack/scripts/sub_user_server.py
@@ -2,8 +2,8 @@ import sys
import signal
import socket
-from openpype.modules.ftrack.ftrack_server.ftrack_server import FtrackServer
-from openpype.modules.ftrack.ftrack_server.lib import (
+from openpype_modules.ftrack.ftrack_server.ftrack_server import FtrackServer
+from openpype_modules.ftrack.ftrack_server.lib import (
SocketSession,
SocketBaseEventHub
)
diff --git a/openpype/modules/ftrack/tray/__init__.py b/openpype/modules/default_modules/ftrack/tray/__init__.py
similarity index 100%
rename from openpype/modules/ftrack/tray/__init__.py
rename to openpype/modules/default_modules/ftrack/tray/__init__.py
diff --git a/openpype/modules/ftrack/tray/ftrack_tray.py b/openpype/modules/default_modules/ftrack/tray/ftrack_tray.py
similarity index 100%
rename from openpype/modules/ftrack/tray/ftrack_tray.py
rename to openpype/modules/default_modules/ftrack/tray/ftrack_tray.py
diff --git a/openpype/modules/ftrack/tray/login_dialog.py b/openpype/modules/default_modules/ftrack/tray/login_dialog.py
similarity index 99%
rename from openpype/modules/ftrack/tray/login_dialog.py
rename to openpype/modules/default_modules/ftrack/tray/login_dialog.py
index cc5689bee5..6384621c8e 100644
--- a/openpype/modules/ftrack/tray/login_dialog.py
+++ b/openpype/modules/default_modules/ftrack/tray/login_dialog.py
@@ -1,7 +1,7 @@
import os
import requests
from openpype import style
-from openpype.modules.ftrack.lib import credentials
+from openpype_modules.ftrack.lib import credentials
from . import login_tools
from openpype import resources
from Qt import QtCore, QtGui, QtWidgets
diff --git a/openpype/modules/ftrack/tray/login_tools.py b/openpype/modules/default_modules/ftrack/tray/login_tools.py
similarity index 100%
rename from openpype/modules/ftrack/tray/login_tools.py
rename to openpype/modules/default_modules/ftrack/tray/login_tools.py
diff --git a/openpype/modules/idle_manager/__init__.py b/openpype/modules/default_modules/idle_manager/__init__.py
similarity index 54%
rename from openpype/modules/idle_manager/__init__.py
rename to openpype/modules/default_modules/idle_manager/__init__.py
index 651f360c50..9d6e10bf39 100644
--- a/openpype/modules/idle_manager/__init__.py
+++ b/openpype/modules/default_modules/idle_manager/__init__.py
@@ -1,10 +1,8 @@
from .idle_module import (
- IdleManager,
- IIdleManager
+ IdleManager
)
__all__ = (
"IdleManager",
- "IIdleManager"
)
diff --git a/openpype/modules/idle_manager/idle_module.py b/openpype/modules/default_modules/idle_manager/idle_module.py
similarity index 74%
rename from openpype/modules/idle_manager/idle_module.py
rename to openpype/modules/default_modules/idle_manager/idle_module.py
index 5dd5160aa7..1a6d71a961 100644
--- a/openpype/modules/idle_manager/idle_module.py
+++ b/openpype/modules/default_modules/idle_manager/idle_module.py
@@ -1,38 +1,14 @@
import platform
import collections
-from abc import ABCMeta, abstractmethod
-import six
-
-from openpype.modules import PypeModule, ITrayService
+from openpype.modules import OpenPypeModule
+from openpype_interfaces import (
+ ITrayService,
+ IIdleManager
+)
-@six.add_metaclass(ABCMeta)
-class IIdleManager:
- """Other modules interface to return callbacks by idle time in seconds.
-
- Expected output is dictionary with seconds as keys and callback/s
- as value, value may be callback of list of callbacks.
- EXAMPLE:
- ```
- {
- 60: self.on_minute_idle
- }
- ```
- """
- idle_manager = None
-
- @abstractmethod
- def callbacks_by_idle_time(self):
- pass
-
- @property
- def idle_time(self):
- if self.idle_manager:
- return self.idle_manager.idle_time
-
-
-class IdleManager(PypeModule, ITrayService):
+class IdleManager(OpenPypeModule, ITrayService):
""" Measure user's idle time in seconds.
Idle time resets on keyboard/mouse input.
Is able to emit signals at specific time idle.
diff --git a/openpype/modules/idle_manager/idle_threads.py b/openpype/modules/default_modules/idle_manager/idle_threads.py
similarity index 100%
rename from openpype/modules/idle_manager/idle_threads.py
rename to openpype/modules/default_modules/idle_manager/idle_threads.py
diff --git a/openpype/modules/default_modules/idle_manager/interfaces.py b/openpype/modules/default_modules/idle_manager/interfaces.py
new file mode 100644
index 0000000000..71cd17a64a
--- /dev/null
+++ b/openpype/modules/default_modules/idle_manager/interfaces.py
@@ -0,0 +1,26 @@
+from abc import abstractmethod
+from openpype.modules import OpenPypeInterface
+
+
+class IIdleManager(OpenPypeInterface):
+ """Other modules interface to return callbacks by idle time in seconds.
+
+ Expected output is dictionary with seconds as keys and callback/s
+ as value, value may be callback of list of callbacks.
+ EXAMPLE:
+ ```
+ {
+ 60: self.on_minute_idle
+ }
+ ```
+ """
+ idle_manager = None
+
+ @abstractmethod
+ def callbacks_by_idle_time(self):
+ pass
+
+ @property
+ def idle_time(self):
+ if self.idle_manager:
+ return self.idle_manager.idle_time
diff --git a/openpype/modules/default_modules/interfaces.py b/openpype/modules/default_modules/interfaces.py
new file mode 100644
index 0000000000..a60c5fa606
--- /dev/null
+++ b/openpype/modules/default_modules/interfaces.py
@@ -0,0 +1,265 @@
+from abc import abstractmethod
+
+from openpype import resources
+
+from openpype.modules import OpenPypeInterface
+
+
+class IPluginPaths(OpenPypeInterface):
+ """Module has plugin paths to return.
+
+ Expected result is dictionary with keys "publish", "create", "load" or
+ "actions" and values as list or string.
+ {
+ "publish": ["path/to/publish_plugins"]
+ }
+ """
+ # TODO validation of an output
+ @abstractmethod
+ def get_plugin_paths(self):
+ pass
+
+
+class ILaunchHookPaths(OpenPypeInterface):
+ """Module has launch hook paths to return.
+
+ Expected result is list of paths.
+ ["path/to/launch_hooks_dir"]
+ """
+
+ @abstractmethod
+ def get_launch_hook_paths(self):
+ pass
+
+
+class ITrayModule(OpenPypeInterface):
+ """Module has special procedures when used in Pype Tray.
+
+ IMPORTANT:
+ The module still must be usable if is not used in tray even if
+ would do nothing.
+ """
+ tray_initialized = False
+ _tray_manager = None
+
+ @abstractmethod
+ def tray_init(self):
+ """Initialization part of tray implementation.
+
+ Triggered between `initialization` and `connect_with_modules`.
+
+ This is where GUIs should be loaded or tray specific parts should be
+ prepared.
+ """
+ pass
+
+ @abstractmethod
+ def tray_menu(self, tray_menu):
+ """Add module's action to tray menu."""
+ pass
+
+ @abstractmethod
+ def tray_start(self):
+ """Start procedure in Pype tray."""
+ pass
+
+ @abstractmethod
+ def tray_exit(self):
+ """Cleanup method which is executed on tray shutdown.
+
+ This is place where all threads should be shut.
+ """
+ pass
+
+ def execute_in_main_thread(self, callback):
+ """ Pushes callback to the queue or process 'callback' on a main thread
+
+ Some callbacks need to be processed on main thread (menu actions
+ must be added on main thread or they won't get triggered etc.)
+ """
+ if not self.tray_initialized:
+ # TODO Called without initialized tray, still main thread needed
+ try:
+ callback()
+
+ except Exception:
+ self.log.warning(
+ "Failed to execute {} in main thread".format(callback),
+ exc_info=True)
+
+ return
+ self.manager.tray_manager.execute_in_main_thread(callback)
+
+ def show_tray_message(self, title, message, icon=None, msecs=None):
+ """Show tray message.
+
+ Args:
+ title (str): Title of message.
+ message (str): Content of message.
+ icon (QSystemTrayIcon.MessageIcon): Message's icon. Default is
+ Information icon, may differ by Qt version.
+ msecs (int): Duration of message visibility in miliseconds.
+ Default is 10000 msecs, may differ by Qt version.
+ """
+ if self._tray_manager:
+ self._tray_manager.show_tray_message(title, message, icon, msecs)
+
+ def add_doubleclick_callback(self, callback):
+ if hasattr(self.manager, "add_doubleclick_callback"):
+ self.manager.add_doubleclick_callback(self, callback)
+
+
+class ITrayAction(ITrayModule):
+ """Implementation of Tray action.
+
+ Add action to tray menu which will trigger `on_action_trigger`.
+ It is expected to be used for showing tools.
+
+ Methods `tray_start`, `tray_exit` and `connect_with_modules` are overriden
+ as it's not expected that action will use them. But it is possible if
+ necessary.
+ """
+
+ admin_action = False
+ _admin_submenu = None
+
+ @property
+ @abstractmethod
+ def label(self):
+ """Service label showed in menu."""
+ pass
+
+ @abstractmethod
+ def on_action_trigger(self):
+ """What happens on actions click."""
+ pass
+
+ def tray_menu(self, tray_menu):
+ from Qt import QtWidgets
+
+ if self.admin_action:
+ menu = self.admin_submenu(tray_menu)
+ action = QtWidgets.QAction(self.label, menu)
+ menu.addAction(action)
+ if not menu.menuAction().isVisible():
+ menu.menuAction().setVisible(True)
+
+ else:
+ action = QtWidgets.QAction(self.label, tray_menu)
+ tray_menu.addAction(action)
+
+ action.triggered.connect(self.on_action_trigger)
+
+ def tray_start(self):
+ return
+
+ def tray_exit(self):
+ return
+
+ @staticmethod
+ def admin_submenu(tray_menu):
+ if ITrayAction._admin_submenu is None:
+ from Qt import QtWidgets
+
+ admin_submenu = QtWidgets.QMenu("Admin", tray_menu)
+ admin_submenu.menuAction().setVisible(False)
+ ITrayAction._admin_submenu = admin_submenu
+ return ITrayAction._admin_submenu
+
+
+class ITrayService(ITrayModule):
+ # Module's property
+ menu_action = None
+
+ # Class properties
+ _services_submenu = None
+ _icon_failed = None
+ _icon_running = None
+ _icon_idle = None
+
+ @property
+ @abstractmethod
+ def label(self):
+ """Service label showed in menu."""
+ pass
+
+ # TODO be able to get any sort of information to show/print
+ # @abstractmethod
+ # def get_service_info(self):
+ # pass
+
+ @staticmethod
+ def services_submenu(tray_menu):
+ if ITrayService._services_submenu is None:
+ from Qt import QtWidgets
+
+ services_submenu = QtWidgets.QMenu("Services", tray_menu)
+ services_submenu.menuAction().setVisible(False)
+ ITrayService._services_submenu = services_submenu
+ return ITrayService._services_submenu
+
+ @staticmethod
+ def add_service_action(action):
+ ITrayService._services_submenu.addAction(action)
+ if not ITrayService._services_submenu.menuAction().isVisible():
+ ITrayService._services_submenu.menuAction().setVisible(True)
+
+ @staticmethod
+ def _load_service_icons():
+ from Qt import QtGui
+
+ ITrayService._failed_icon = QtGui.QIcon(
+ resources.get_resource("icons", "circle_red.png")
+ )
+ ITrayService._icon_running = QtGui.QIcon(
+ resources.get_resource("icons", "circle_green.png")
+ )
+ ITrayService._icon_idle = QtGui.QIcon(
+ resources.get_resource("icons", "circle_orange.png")
+ )
+
+ @staticmethod
+ def get_icon_running():
+ if ITrayService._icon_running is None:
+ ITrayService._load_service_icons()
+ return ITrayService._icon_running
+
+ @staticmethod
+ def get_icon_idle():
+ if ITrayService._icon_idle is None:
+ ITrayService._load_service_icons()
+ return ITrayService._icon_idle
+
+ @staticmethod
+ def get_icon_failed():
+ if ITrayService._failed_icon is None:
+ ITrayService._load_service_icons()
+ return ITrayService._failed_icon
+
+ def tray_menu(self, tray_menu):
+ from Qt import QtWidgets
+
+ action = QtWidgets.QAction(
+ self.label,
+ self.services_submenu(tray_menu)
+ )
+ self.menu_action = action
+
+ self.add_service_action(action)
+
+ self.set_service_running_icon()
+
+ def set_service_running_icon(self):
+ """Change icon of an QAction to green circle."""
+ if self.menu_action:
+ self.menu_action.setIcon(self.get_icon_running())
+
+ def set_service_failed_icon(self):
+ """Change icon of an QAction to red circle."""
+ if self.menu_action:
+ self.menu_action.setIcon(self.get_icon_failed())
+
+ def set_service_idle_icon(self):
+ """Change icon of an QAction to orange circle."""
+ if self.menu_action:
+ self.menu_action.setIcon(self.get_icon_idle())
diff --git a/openpype/modules/launcher_action.py b/openpype/modules/default_modules/launcher_action.py
similarity index 89%
rename from openpype/modules/launcher_action.py
rename to openpype/modules/default_modules/launcher_action.py
index 0059ff021b..e3252e3842 100644
--- a/openpype/modules/launcher_action.py
+++ b/openpype/modules/default_modules/launcher_action.py
@@ -1,7 +1,8 @@
-from . import PypeModule, ITrayAction
+from openpype.modules import OpenPypeModule
+from openpype_interfaces import ITrayAction
-class LauncherAction(PypeModule, ITrayAction):
+class LauncherAction(OpenPypeModule, ITrayAction):
label = "Launcher"
name = "launcher_tool"
diff --git a/openpype/modules/log_viewer/__init__.py b/openpype/modules/default_modules/log_viewer/__init__.py
similarity index 100%
rename from openpype/modules/log_viewer/__init__.py
rename to openpype/modules/default_modules/log_viewer/__init__.py
diff --git a/openpype/modules/log_viewer/log_view_module.py b/openpype/modules/default_modules/log_viewer/log_view_module.py
similarity index 89%
rename from openpype/modules/log_viewer/log_view_module.py
rename to openpype/modules/default_modules/log_viewer/log_view_module.py
index dde482b04c..bc1a98f4ad 100644
--- a/openpype/modules/log_viewer/log_view_module.py
+++ b/openpype/modules/default_modules/log_viewer/log_view_module.py
@@ -1,8 +1,9 @@
from openpype.api import Logger
-from .. import PypeModule, ITrayModule
+from openpype.modules import OpenPypeModule
+from openpype_interfaces import ITrayModule
-class LogViewModule(PypeModule, ITrayModule):
+class LogViewModule(OpenPypeModule, ITrayModule):
name = "log_viewer"
def initialize(self, modules_settings):
diff --git a/openpype/modules/sync_server/providers/__init__.py b/openpype/modules/default_modules/log_viewer/tray/__init__.py
similarity index 100%
rename from openpype/modules/sync_server/providers/__init__.py
rename to openpype/modules/default_modules/log_viewer/tray/__init__.py
diff --git a/openpype/modules/log_viewer/tray/app.py b/openpype/modules/default_modules/log_viewer/tray/app.py
similarity index 100%
rename from openpype/modules/log_viewer/tray/app.py
rename to openpype/modules/default_modules/log_viewer/tray/app.py
diff --git a/openpype/modules/log_viewer/tray/models.py b/openpype/modules/default_modules/log_viewer/tray/models.py
similarity index 100%
rename from openpype/modules/log_viewer/tray/models.py
rename to openpype/modules/default_modules/log_viewer/tray/models.py
diff --git a/openpype/modules/log_viewer/tray/widgets.py b/openpype/modules/default_modules/log_viewer/tray/widgets.py
similarity index 100%
rename from openpype/modules/log_viewer/tray/widgets.py
rename to openpype/modules/default_modules/log_viewer/tray/widgets.py
diff --git a/openpype/modules/muster/__init__.py b/openpype/modules/default_modules/muster/__init__.py
similarity index 100%
rename from openpype/modules/muster/__init__.py
rename to openpype/modules/default_modules/muster/__init__.py
diff --git a/openpype/modules/muster/muster.py b/openpype/modules/default_modules/muster/muster.py
similarity index 97%
rename from openpype/modules/muster/muster.py
rename to openpype/modules/default_modules/muster/muster.py
index 1a82926802..a0e72006af 100644
--- a/openpype/modules/muster/muster.py
+++ b/openpype/modules/default_modules/muster/muster.py
@@ -2,14 +2,14 @@ import os
import json
import appdirs
import requests
-from .. import (
- PypeModule,
+from openpype.modules import OpenPypeModule
+from openpype_interfaces import (
ITrayModule,
IWebServerRoutes
)
-class MusterModule(PypeModule, ITrayModule, IWebServerRoutes):
+class MusterModule(OpenPypeModule, ITrayModule, IWebServerRoutes):
"""
Module handling Muster Render credentials. This will display dialog
asking for user credentials for Muster if not already specified.
diff --git a/openpype/modules/muster/rest_api.py b/openpype/modules/default_modules/muster/rest_api.py
similarity index 100%
rename from openpype/modules/muster/rest_api.py
rename to openpype/modules/default_modules/muster/rest_api.py
diff --git a/openpype/modules/muster/widget_login.py b/openpype/modules/default_modules/muster/widget_login.py
similarity index 100%
rename from openpype/modules/muster/widget_login.py
rename to openpype/modules/default_modules/muster/widget_login.py
diff --git a/openpype/modules/project_manager_action.py b/openpype/modules/default_modules/project_manager_action.py
similarity index 92%
rename from openpype/modules/project_manager_action.py
rename to openpype/modules/default_modules/project_manager_action.py
index 1387aa258c..c1f984a8cb 100644
--- a/openpype/modules/project_manager_action.py
+++ b/openpype/modules/default_modules/project_manager_action.py
@@ -1,7 +1,8 @@
-from . import PypeModule, ITrayAction
+from openpype.modules import OpenPypeModule
+from openpype_interfaces import ITrayAction
-class ProjectManagerAction(PypeModule, ITrayAction):
+class ProjectManagerAction(OpenPypeModule, ITrayAction):
label = "Project Manager (beta)"
name = "project_manager"
admin_action = True
diff --git a/openpype/modules/default_modules/python_console_interpreter/__init__.py b/openpype/modules/default_modules/python_console_interpreter/__init__.py
new file mode 100644
index 0000000000..5f54ac497b
--- /dev/null
+++ b/openpype/modules/default_modules/python_console_interpreter/__init__.py
@@ -0,0 +1,8 @@
+from .module import (
+ PythonInterpreterAction
+)
+
+
+__all__ = (
+ "PythonInterpreterAction",
+)
diff --git a/openpype/modules/default_modules/python_console_interpreter/module.py b/openpype/modules/default_modules/python_console_interpreter/module.py
new file mode 100644
index 0000000000..f4df3fb6d8
--- /dev/null
+++ b/openpype/modules/default_modules/python_console_interpreter/module.py
@@ -0,0 +1,46 @@
+from openpype.modules import OpenPypeModule
+from openpype_interfaces import ITrayAction
+
+
+class PythonInterpreterAction(OpenPypeModule, ITrayAction):
+ label = "Console"
+ name = "python_interpreter"
+ admin_action = True
+
+ def initialize(self, modules_settings):
+ self.enabled = True
+ self._interpreter_window = None
+
+ def tray_init(self):
+ self.create_interpreter_window()
+
+ def tray_exit(self):
+ if self._interpreter_window is not None:
+ self._interpreter_window.save_registry()
+
+ def connect_with_modules(self, *args, **kwargs):
+ pass
+
+ def create_interpreter_window(self):
+ """Initializa Settings Qt window."""
+ if self._interpreter_window:
+ return
+
+ from openpype_modules.python_console_interpreter.window import (
+ PythonInterpreterWidget
+ )
+
+ self._interpreter_window = PythonInterpreterWidget()
+
+ def on_action_trigger(self):
+ self.show_interpreter_window()
+
+ def show_interpreter_window(self):
+ self.create_interpreter_window()
+
+ if self._interpreter_window.isVisible():
+ self._interpreter_window.activateWindow()
+ self._interpreter_window.raise_()
+ return
+
+ self._interpreter_window.show()
diff --git a/openpype/modules/default_modules/python_console_interpreter/window/__init__.py b/openpype/modules/default_modules/python_console_interpreter/window/__init__.py
new file mode 100644
index 0000000000..92fd6f1df2
--- /dev/null
+++ b/openpype/modules/default_modules/python_console_interpreter/window/__init__.py
@@ -0,0 +1,8 @@
+from .widgets import (
+ PythonInterpreterWidget
+)
+
+
+__all__ = (
+ "PythonInterpreterWidget",
+)
diff --git a/openpype/modules/default_modules/python_console_interpreter/window/widgets.py b/openpype/modules/default_modules/python_console_interpreter/window/widgets.py
new file mode 100644
index 0000000000..975decf4f4
--- /dev/null
+++ b/openpype/modules/default_modules/python_console_interpreter/window/widgets.py
@@ -0,0 +1,583 @@
+import os
+import re
+import sys
+import collections
+from code import InteractiveInterpreter
+
+import appdirs
+from Qt import QtCore, QtWidgets, QtGui
+
+from openpype import resources
+from openpype.style import load_stylesheet
+from openpype.lib import JSONSettingRegistry
+
+
+openpype_art = """
+ . . .. . ..
+ _oOOP3OPP3Op_. .
+ .PPpo~. .. ~2p. .. .... . .
+ .Ppo . .pPO3Op.. . O:. . . .
+ .3Pp . oP3'. 'P33. . 4 .. . . . .. . . .
+ .~OP 3PO. .Op3 : . .. _____ _____ _____
+ .P3O . oP3oP3O3P' . . . . / /./ /./ /
+ O3:. O3p~ . .:. . ./____/./____/ /____/
+ 'P . 3p3. oP3~. ..P:. . . .. . . .. . . .
+ . ': . Po' .Opo'. .3O. . o[ by Pype Club ]]]==- - - . .
+ . '_ .. . . _OP3.. . .https://openpype.io.. .
+ ~P3.OPPPO3OP~ . .. .
+ . ' '. . .. . . . .. .
+
+
+"""
+
+
+class PythonInterpreterRegistry(JSONSettingRegistry):
+ """Class handling OpenPype general settings registry.
+
+ Attributes:
+ vendor (str): Name used for path construction.
+ product (str): Additional name used for path construction.
+
+ """
+
+ def __init__(self):
+ self.vendor = "pypeclub"
+ self.product = "openpype"
+ name = "python_interpreter_tool"
+ path = appdirs.user_data_dir(self.product, self.vendor)
+ super(PythonInterpreterRegistry, self).__init__(name, path)
+
+
+class StdOEWrap:
+ def __init__(self):
+ self._origin_stdout_write = None
+ self._origin_stderr_write = None
+ self._listening = False
+ self.lines = collections.deque()
+
+ if not sys.stdout:
+ sys.stdout = open(os.devnull, "w")
+
+ if not sys.stderr:
+ sys.stderr = open(os.devnull, "w")
+
+ if self._origin_stdout_write is None:
+ self._origin_stdout_write = sys.stdout.write
+
+ if self._origin_stderr_write is None:
+ self._origin_stderr_write = sys.stderr.write
+
+ self._listening = True
+ sys.stdout.write = self._stdout_listener
+ sys.stderr.write = self._stderr_listener
+
+ def stop_listen(self):
+ self._listening = False
+
+ def _stdout_listener(self, text):
+ if self._listening:
+ self.lines.append(text)
+ if self._origin_stdout_write is not None:
+ self._origin_stdout_write(text)
+
+ def _stderr_listener(self, text):
+ if self._listening:
+ self.lines.append(text)
+ if self._origin_stderr_write is not None:
+ self._origin_stderr_write(text)
+
+
+class PythonCodeEditor(QtWidgets.QPlainTextEdit):
+ execute_requested = QtCore.Signal()
+
+ def __init__(self, parent):
+ super(PythonCodeEditor, self).__init__(parent)
+
+ self.setObjectName("PythonCodeEditor")
+
+ self._indent = 4
+
+ def _tab_shift_right(self):
+ cursor = self.textCursor()
+ selected_text = cursor.selectedText()
+ if not selected_text:
+ cursor.insertText(" " * self._indent)
+ return
+
+ sel_start = cursor.selectionStart()
+ sel_end = cursor.selectionEnd()
+ cursor.setPosition(sel_end)
+ end_line = cursor.blockNumber()
+ cursor.setPosition(sel_start)
+ while True:
+ cursor.movePosition(QtGui.QTextCursor.StartOfLine)
+ text = cursor.block().text()
+ spaces = len(text) - len(text.lstrip(" "))
+ new_spaces = spaces % self._indent
+ if not new_spaces:
+ new_spaces = self._indent
+
+ cursor.insertText(" " * new_spaces)
+ if cursor.blockNumber() == end_line:
+ break
+
+ cursor.movePosition(QtGui.QTextCursor.NextBlock)
+
+ def _tab_shift_left(self):
+ tmp_cursor = self.textCursor()
+ sel_start = tmp_cursor.selectionStart()
+ sel_end = tmp_cursor.selectionEnd()
+
+ cursor = QtGui.QTextCursor(self.document())
+ cursor.setPosition(sel_end)
+ end_line = cursor.blockNumber()
+ cursor.setPosition(sel_start)
+ while True:
+ cursor.movePosition(QtGui.QTextCursor.StartOfLine)
+ text = cursor.block().text()
+ spaces = len(text) - len(text.lstrip(" "))
+ if spaces:
+ spaces_to_remove = (spaces % self._indent) or self._indent
+ if spaces_to_remove > spaces:
+ spaces_to_remove = spaces
+
+ cursor.setPosition(
+ cursor.position() + spaces_to_remove,
+ QtGui.QTextCursor.KeepAnchor
+ )
+ cursor.removeSelectedText()
+
+ if cursor.blockNumber() == end_line:
+ break
+
+ cursor.movePosition(QtGui.QTextCursor.NextBlock)
+
+ def keyPressEvent(self, event):
+ if event.key() == QtCore.Qt.Key_Backtab:
+ self._tab_shift_left()
+ event.accept()
+ return
+
+ if event.key() == QtCore.Qt.Key_Tab:
+ if event.modifiers() == QtCore.Qt.NoModifier:
+ self._tab_shift_right()
+ event.accept()
+ return
+
+ if (
+ event.key() == QtCore.Qt.Key_Return
+ and event.modifiers() == QtCore.Qt.ControlModifier
+ ):
+ self.execute_requested.emit()
+ event.accept()
+ return
+
+ super(PythonCodeEditor, self).keyPressEvent(event)
+
+
+class PythonTabWidget(QtWidgets.QWidget):
+ before_execute = QtCore.Signal(str)
+
+ def __init__(self, parent):
+ super(PythonTabWidget, self).__init__(parent)
+
+ code_input = PythonCodeEditor(self)
+
+ self.setFocusProxy(code_input)
+
+ execute_btn = QtWidgets.QPushButton("Execute", self)
+ execute_btn.setToolTip("Execute command (Ctrl + Enter)")
+
+ btns_layout = QtWidgets.QHBoxLayout()
+ btns_layout.setContentsMargins(0, 0, 0, 0)
+ btns_layout.addStretch(1)
+ btns_layout.addWidget(execute_btn)
+
+ layout = QtWidgets.QVBoxLayout(self)
+ layout.setContentsMargins(0, 0, 0, 0)
+ layout.addWidget(code_input, 1)
+ layout.addLayout(btns_layout, 0)
+
+ execute_btn.clicked.connect(self._on_execute_clicked)
+ code_input.execute_requested.connect(self.execute)
+
+ self._code_input = code_input
+ self._interpreter = InteractiveInterpreter()
+
+ def _on_execute_clicked(self):
+ self.execute()
+
+ def get_code(self):
+ return self._code_input.toPlainText()
+
+ def set_code(self, code_text):
+ self._code_input.setPlainText(code_text)
+
+ def execute(self):
+ code_text = self._code_input.toPlainText()
+ self.before_execute.emit(code_text)
+ self._interpreter.runcode(code_text)
+
+
+class TabNameDialog(QtWidgets.QDialog):
+ default_width = 330
+ default_height = 85
+
+ def __init__(self, parent):
+ super(TabNameDialog, self).__init__(parent)
+
+ self.setWindowTitle("Enter tab name")
+
+ name_label = QtWidgets.QLabel("Tab name:", self)
+ name_input = QtWidgets.QLineEdit(self)
+
+ inputs_layout = QtWidgets.QHBoxLayout()
+ inputs_layout.addWidget(name_label)
+ inputs_layout.addWidget(name_input)
+
+ ok_btn = QtWidgets.QPushButton("Ok", self)
+ cancel_btn = QtWidgets.QPushButton("Cancel", self)
+ btns_layout = QtWidgets.QHBoxLayout()
+ btns_layout.addStretch(1)
+ btns_layout.addWidget(ok_btn)
+ btns_layout.addWidget(cancel_btn)
+
+ layout = QtWidgets.QVBoxLayout(self)
+ layout.addLayout(inputs_layout)
+ layout.addStretch(1)
+ layout.addLayout(btns_layout)
+
+ ok_btn.clicked.connect(self._on_ok_clicked)
+ cancel_btn.clicked.connect(self._on_cancel_clicked)
+
+ self._name_input = name_input
+ self._ok_btn = ok_btn
+ self._cancel_btn = cancel_btn
+
+ self._result = None
+
+ self.resize(self.default_width, self.default_height)
+
+ def set_tab_name(self, name):
+ self._name_input.setText(name)
+
+ def result(self):
+ return self._result
+
+ def showEvent(self, event):
+ super(TabNameDialog, self).showEvent(event)
+ btns_width = max(
+ self._ok_btn.width(),
+ self._cancel_btn.width()
+ )
+
+ self._ok_btn.setMinimumWidth(btns_width)
+ self._cancel_btn.setMinimumWidth(btns_width)
+
+ def _on_ok_clicked(self):
+ self._result = self._name_input.text()
+ self.accept()
+
+ def _on_cancel_clicked(self):
+ self._result = None
+ self.reject()
+
+
+class OutputTextWidget(QtWidgets.QTextEdit):
+ v_max_offset = 4
+
+ def vertical_scroll_at_max(self):
+ v_scroll = self.verticalScrollBar()
+ return v_scroll.value() > v_scroll.maximum() - self.v_max_offset
+
+ def scroll_to_bottom(self):
+ v_scroll = self.verticalScrollBar()
+ return v_scroll.setValue(v_scroll.maximum())
+
+
+class EnhancedTabBar(QtWidgets.QTabBar):
+ double_clicked = QtCore.Signal(QtCore.QPoint)
+ right_clicked = QtCore.Signal(QtCore.QPoint)
+ mid_clicked = QtCore.Signal(QtCore.QPoint)
+
+ def __init__(self, parent):
+ super(EnhancedTabBar, self).__init__(parent)
+
+ self.setDrawBase(False)
+
+ def mouseDoubleClickEvent(self, event):
+ self.double_clicked.emit(event.globalPos())
+ event.accept()
+
+ def mouseReleaseEvent(self, event):
+ if event.button() == QtCore.Qt.RightButton:
+ self.right_clicked.emit(event.globalPos())
+ event.accept()
+ return
+
+ elif event.button() == QtCore.Qt.MidButton:
+ self.mid_clicked.emit(event.globalPos())
+ event.accept()
+
+ else:
+ super(EnhancedTabBar, self).mouseReleaseEvent(event)
+
+
+class PythonInterpreterWidget(QtWidgets.QWidget):
+ default_width = 1000
+ default_height = 600
+
+ def __init__(self, parent=None):
+ super(PythonInterpreterWidget, self).__init__(parent)
+
+ self.setWindowTitle("OpenPype Console")
+ self.setWindowIcon(QtGui.QIcon(resources.pype_icon_filepath()))
+
+ self.ansi_escape = re.compile(
+ r"(?:\x1B[@-_]|[\x80-\x9F])[0-?]*[ -/]*[@-~]"
+ )
+
+ self._tabs = []
+
+ self._stdout_err_wrapper = StdOEWrap()
+
+ output_widget = OutputTextWidget(self)
+ output_widget.setObjectName("PythonInterpreterOutput")
+ output_widget.setLineWrapMode(QtWidgets.QTextEdit.NoWrap)
+ output_widget.setTextInteractionFlags(QtCore.Qt.TextBrowserInteraction)
+
+ tab_widget = QtWidgets.QTabWidget(self)
+ tab_bar = EnhancedTabBar(tab_widget)
+ tab_widget.setTabBar(tab_bar)
+ tab_widget.setTabsClosable(False)
+ tab_widget.setContextMenuPolicy(QtCore.Qt.CustomContextMenu)
+
+ add_tab_btn = QtWidgets.QPushButton("+", tab_widget)
+ tab_widget.setCornerWidget(add_tab_btn, QtCore.Qt.TopLeftCorner)
+
+ widgets_splitter = QtWidgets.QSplitter(self)
+ widgets_splitter.setOrientation(QtCore.Qt.Vertical)
+ widgets_splitter.addWidget(output_widget)
+ widgets_splitter.addWidget(tab_widget)
+ widgets_splitter.setStretchFactor(0, 1)
+ widgets_splitter.setStretchFactor(1, 1)
+ height = int(self.default_height / 2)
+ widgets_splitter.setSizes([height, self.default_height - height])
+
+ layout = QtWidgets.QVBoxLayout(self)
+ layout.addWidget(widgets_splitter)
+
+ line_check_timer = QtCore.QTimer()
+ line_check_timer.setInterval(200)
+
+ line_check_timer.timeout.connect(self._on_timer_timeout)
+ add_tab_btn.clicked.connect(self._on_add_clicked)
+ tab_bar.right_clicked.connect(self._on_tab_right_click)
+ tab_bar.double_clicked.connect(self._on_tab_double_click)
+ tab_bar.mid_clicked.connect(self._on_tab_mid_click)
+ tab_widget.tabCloseRequested.connect(self._on_tab_close_req)
+
+ self._widgets_splitter = widgets_splitter
+ self._add_tab_btn = add_tab_btn
+ self._output_widget = output_widget
+ self._tab_widget = tab_widget
+ self._line_check_timer = line_check_timer
+
+ self._append_lines([openpype_art])
+
+ self.setStyleSheet(load_stylesheet())
+
+ self.resize(self.default_width, self.default_height)
+
+ self._init_from_registry()
+
+ if self._tab_widget.count() < 1:
+ self.add_tab("Python")
+
+ def _init_from_registry(self):
+ setting_registry = PythonInterpreterRegistry()
+
+ try:
+ width = setting_registry.get_item("width")
+ height = setting_registry.get_item("height")
+ if width is not None and height is not None:
+ self.resize(width, height)
+
+ except ValueError:
+ pass
+
+ try:
+ sizes = setting_registry.get_item("splitter_sizes")
+ if len(sizes) == len(self._widgets_splitter.sizes()):
+ self._widgets_splitter.setSizes(sizes)
+
+ except ValueError:
+ pass
+
+ try:
+ tab_defs = setting_registry.get_item("tabs") or []
+ for tab_def in tab_defs:
+ widget = self.add_tab(tab_def["name"])
+ widget.set_code(tab_def["code"])
+
+ except ValueError:
+ pass
+
+ def save_registry(self):
+ setting_registry = PythonInterpreterRegistry()
+
+ setting_registry.set_item("width", self.width())
+ setting_registry.set_item("height", self.height())
+
+ setting_registry.set_item(
+ "splitter_sizes", self._widgets_splitter.sizes()
+ )
+
+ tabs = []
+ for tab_idx in range(self._tab_widget.count()):
+ widget = self._tab_widget.widget(tab_idx)
+ tab_code = widget.get_code()
+ tab_name = self._tab_widget.tabText(tab_idx)
+ tabs.append({
+ "name": tab_name,
+ "code": tab_code
+ })
+
+ setting_registry.set_item("tabs", tabs)
+
+ def _on_tab_right_click(self, global_point):
+ point = self._tab_widget.mapFromGlobal(global_point)
+ tab_bar = self._tab_widget.tabBar()
+ tab_idx = tab_bar.tabAt(point)
+ last_index = tab_bar.count() - 1
+ if tab_idx < 0 or tab_idx > last_index:
+ return
+
+ menu = QtWidgets.QMenu(self._tab_widget)
+ menu.addAction("Rename")
+ result = menu.exec_(global_point)
+ if result is None:
+ return
+
+ if result.text() == "Rename":
+ self._rename_tab_req(tab_idx)
+
+ def _rename_tab_req(self, tab_idx):
+ dialog = TabNameDialog(self)
+ dialog.set_tab_name(self._tab_widget.tabText(tab_idx))
+ dialog.exec_()
+ tab_name = dialog.result()
+ if tab_name:
+ self._tab_widget.setTabText(tab_idx, tab_name)
+
+ def _on_tab_mid_click(self, global_point):
+ point = self._tab_widget.mapFromGlobal(global_point)
+ tab_bar = self._tab_widget.tabBar()
+ tab_idx = tab_bar.tabAt(point)
+ last_index = tab_bar.count() - 1
+ if tab_idx < 0 or tab_idx > last_index:
+ return
+
+ self._on_tab_close_req(tab_idx)
+
+ def _on_tab_double_click(self, global_point):
+ point = self._tab_widget.mapFromGlobal(global_point)
+ tab_bar = self._tab_widget.tabBar()
+ tab_idx = tab_bar.tabAt(point)
+ last_index = tab_bar.count() - 1
+ if tab_idx < 0 or tab_idx > last_index:
+ return
+
+ self._rename_tab_req(tab_idx)
+
+ def _on_tab_close_req(self, tab_index):
+ if self._tab_widget.count() == 1:
+ return
+
+ widget = self._tab_widget.widget(tab_index)
+ if widget in self._tabs:
+ self._tabs.remove(widget)
+ self._tab_widget.removeTab(tab_index)
+
+ if self._tab_widget.count() == 1:
+ self._tab_widget.setTabsClosable(False)
+
+ def _append_lines(self, lines):
+ at_max = self._output_widget.vertical_scroll_at_max()
+ tmp_cursor = QtGui.QTextCursor(self._output_widget.document())
+ tmp_cursor.movePosition(QtGui.QTextCursor.End)
+ for line in lines:
+ tmp_cursor.insertText(line)
+
+ if at_max:
+ self._output_widget.scroll_to_bottom()
+
+ def _on_timer_timeout(self):
+ if self._stdout_err_wrapper.lines:
+ lines = []
+ while self._stdout_err_wrapper.lines:
+ line = self._stdout_err_wrapper.lines.popleft()
+ lines.append(self.ansi_escape.sub("", line))
+ self._append_lines(lines)
+
+ def _on_add_clicked(self):
+ dialog = TabNameDialog(self)
+ dialog.exec_()
+ tab_name = dialog.result()
+ if tab_name:
+ self.add_tab(tab_name)
+
+ def _on_before_execute(self, code_text):
+ at_max = self._output_widget.vertical_scroll_at_max()
+ document = self._output_widget.document()
+ tmp_cursor = QtGui.QTextCursor(document)
+ tmp_cursor.movePosition(QtGui.QTextCursor.End)
+ tmp_cursor.insertText("{}\nExecuting command:\n".format(20 * "-"))
+
+ code_block_format = QtGui.QTextFrameFormat()
+ code_block_format.setBackground(QtGui.QColor(27, 27, 27))
+ code_block_format.setPadding(4)
+
+ tmp_cursor.insertFrame(code_block_format)
+ char_format = tmp_cursor.charFormat()
+ char_format.setForeground(
+ QtGui.QBrush(QtGui.QColor(114, 224, 198))
+ )
+ tmp_cursor.setCharFormat(char_format)
+ tmp_cursor.insertText(code_text)
+
+ # Create new cursor
+ tmp_cursor = QtGui.QTextCursor(document)
+ tmp_cursor.movePosition(QtGui.QTextCursor.End)
+ tmp_cursor.insertText("{}\n".format(20 * "-"))
+
+ if at_max:
+ self._output_widget.scroll_to_bottom()
+
+ def add_tab(self, tab_name, index=None):
+ widget = PythonTabWidget(self)
+ widget.before_execute.connect(self._on_before_execute)
+ if index is None:
+ if self._tab_widget.count() > 0:
+ index = self._tab_widget.currentIndex() + 1
+ else:
+ index = 0
+
+ self._tabs.append(widget)
+ self._tab_widget.insertTab(index, widget, tab_name)
+ self._tab_widget.setCurrentIndex(index)
+
+ if self._tab_widget.count() > 1:
+ self._tab_widget.setTabsClosable(True)
+ widget.setFocus()
+ return widget
+
+ def showEvent(self, event):
+ self._line_check_timer.start()
+ super(PythonInterpreterWidget, self).showEvent(event)
+ self._output_widget.scroll_to_bottom()
+
+ def closeEvent(self, event):
+ self.save_registry()
+ super(PythonInterpreterWidget, self).closeEvent(event)
+ self._line_check_timer.stop()
diff --git a/openpype/modules/default_modules/settings_module/__init__.py b/openpype/modules/default_modules/settings_module/__init__.py
new file mode 100644
index 0000000000..95510eba9d
--- /dev/null
+++ b/openpype/modules/default_modules/settings_module/__init__.py
@@ -0,0 +1,9 @@
+from .settings_action import (
+ LocalSettingsAction,
+ SettingsAction
+)
+
+__all__ = (
+ "LocalSettingsAction",
+ "SettingsAction"
+)
diff --git a/openpype/modules/default_modules/settings_module/interfaces.py b/openpype/modules/default_modules/settings_module/interfaces.py
new file mode 100644
index 0000000000..42db395649
--- /dev/null
+++ b/openpype/modules/default_modules/settings_module/interfaces.py
@@ -0,0 +1,30 @@
+from abc import abstractmethod
+from openpype.modules import OpenPypeInterface
+
+
+class ISettingsChangeListener(OpenPypeInterface):
+ """Module has plugin paths to return.
+
+ Expected result is dictionary with keys "publish", "create", "load" or
+ "actions" and values as list or string.
+ {
+ "publish": ["path/to/publish_plugins"]
+ }
+ """
+ @abstractmethod
+ def on_system_settings_save(
+ self, old_value, new_value, changes, new_value_metadata
+ ):
+ pass
+
+ @abstractmethod
+ def on_project_settings_save(
+ self, old_value, new_value, changes, project_name, new_value_metadata
+ ):
+ pass
+
+ @abstractmethod
+ def on_project_anatomy_save(
+ self, old_value, new_value, changes, project_name, new_value_metadata
+ ):
+ pass
diff --git a/openpype/modules/settings_action.py b/openpype/modules/default_modules/settings_module/settings_action.py
similarity index 80%
rename from openpype/modules/settings_action.py
rename to openpype/modules/default_modules/settings_module/settings_action.py
index 9db4a252bc..7140c57bab 100644
--- a/openpype/modules/settings_action.py
+++ b/openpype/modules/default_modules/settings_module/settings_action.py
@@ -1,40 +1,8 @@
-from abc import ABCMeta, abstractmethod
-
-import six
-
-from . import PypeModule, ITrayAction
+from openpype.modules import OpenPypeModule
+from openpype_interfaces import ITrayAction
-@six.add_metaclass(ABCMeta)
-class ISettingsChangeListener:
- """Module has plugin paths to return.
-
- Expected result is dictionary with keys "publish", "create", "load" or
- "actions" and values as list or string.
- {
- "publish": ["path/to/publish_plugins"]
- }
- """
- @abstractmethod
- def on_system_settings_save(
- self, old_value, new_value, changes, new_value_metadata
- ):
- pass
-
- @abstractmethod
- def on_project_settings_save(
- self, old_value, new_value, changes, project_name, new_value_metadata
- ):
- pass
-
- @abstractmethod
- def on_project_anatomy_save(
- self, old_value, new_value, changes, project_name, new_value_metadata
- ):
- pass
-
-
-class SettingsAction(PypeModule, ITrayAction):
+class SettingsAction(OpenPypeModule, ITrayAction):
"""Action to show Setttings tool."""
name = "settings"
label = "Studio Settings"
@@ -103,7 +71,7 @@ class SettingsAction(PypeModule, ITrayAction):
self.settings_window.reset()
-class LocalSettingsAction(PypeModule, ITrayAction):
+class LocalSettingsAction(OpenPypeModule, ITrayAction):
"""Action to show Setttings tool."""
name = "local_settings"
label = "Settings"
diff --git a/openpype/modules/slack/README.md b/openpype/modules/default_modules/slack/README.md
similarity index 100%
rename from openpype/modules/slack/README.md
rename to openpype/modules/default_modules/slack/README.md
diff --git a/openpype/modules/slack/__init__.py b/openpype/modules/default_modules/slack/__init__.py
similarity index 100%
rename from openpype/modules/slack/__init__.py
rename to openpype/modules/default_modules/slack/__init__.py
diff --git a/openpype/modules/slack/launch_hooks/pre_python2_vendor.py b/openpype/modules/default_modules/slack/launch_hooks/pre_python2_vendor.py
similarity index 95%
rename from openpype/modules/slack/launch_hooks/pre_python2_vendor.py
rename to openpype/modules/default_modules/slack/launch_hooks/pre_python2_vendor.py
index a2c1f8a9e0..0f4bc22a34 100644
--- a/openpype/modules/slack/launch_hooks/pre_python2_vendor.py
+++ b/openpype/modules/default_modules/slack/launch_hooks/pre_python2_vendor.py
@@ -1,6 +1,6 @@
import os
from openpype.lib import PreLaunchHook
-from openpype.modules.slack import SLACK_MODULE_DIR
+from openpype_modules.slack import SLACK_MODULE_DIR
class PrePython2Support(PreLaunchHook):
diff --git a/openpype/modules/slack/manifest.yml b/openpype/modules/default_modules/slack/manifest.yml
similarity index 100%
rename from openpype/modules/slack/manifest.yml
rename to openpype/modules/default_modules/slack/manifest.yml
diff --git a/openpype/modules/slack/plugins/publish/collect_slack_family.py b/openpype/modules/default_modules/slack/plugins/publish/collect_slack_family.py
similarity index 100%
rename from openpype/modules/slack/plugins/publish/collect_slack_family.py
rename to openpype/modules/default_modules/slack/plugins/publish/collect_slack_family.py
diff --git a/openpype/modules/slack/plugins/publish/integrate_slack_api.py b/openpype/modules/default_modules/slack/plugins/publish/integrate_slack_api.py
similarity index 100%
rename from openpype/modules/slack/plugins/publish/integrate_slack_api.py
rename to openpype/modules/default_modules/slack/plugins/publish/integrate_slack_api.py
diff --git a/openpype/modules/slack/python2_vendor/python-slack-sdk-1/.appveyor.yml b/openpype/modules/default_modules/slack/python2_vendor/python-slack-sdk-1/.appveyor.yml
similarity index 100%
rename from openpype/modules/slack/python2_vendor/python-slack-sdk-1/.appveyor.yml
rename to openpype/modules/default_modules/slack/python2_vendor/python-slack-sdk-1/.appveyor.yml
diff --git a/openpype/modules/slack/python2_vendor/python-slack-sdk-1/.coveragerc b/openpype/modules/default_modules/slack/python2_vendor/python-slack-sdk-1/.coveragerc
similarity index 100%
rename from openpype/modules/slack/python2_vendor/python-slack-sdk-1/.coveragerc
rename to openpype/modules/default_modules/slack/python2_vendor/python-slack-sdk-1/.coveragerc
diff --git a/openpype/modules/slack/python2_vendor/python-slack-sdk-1/.flake8 b/openpype/modules/default_modules/slack/python2_vendor/python-slack-sdk-1/.flake8
similarity index 100%
rename from openpype/modules/slack/python2_vendor/python-slack-sdk-1/.flake8
rename to openpype/modules/default_modules/slack/python2_vendor/python-slack-sdk-1/.flake8
diff --git a/openpype/modules/slack/python2_vendor/python-slack-sdk-1/.github/contributing.md b/openpype/modules/default_modules/slack/python2_vendor/python-slack-sdk-1/.github/contributing.md
similarity index 100%
rename from openpype/modules/slack/python2_vendor/python-slack-sdk-1/.github/contributing.md
rename to openpype/modules/default_modules/slack/python2_vendor/python-slack-sdk-1/.github/contributing.md
diff --git a/openpype/modules/slack/python2_vendor/python-slack-sdk-1/.github/issue_template.md b/openpype/modules/default_modules/slack/python2_vendor/python-slack-sdk-1/.github/issue_template.md
similarity index 100%
rename from openpype/modules/slack/python2_vendor/python-slack-sdk-1/.github/issue_template.md
rename to openpype/modules/default_modules/slack/python2_vendor/python-slack-sdk-1/.github/issue_template.md
diff --git a/openpype/modules/slack/python2_vendor/python-slack-sdk-1/.github/maintainers_guide.md b/openpype/modules/default_modules/slack/python2_vendor/python-slack-sdk-1/.github/maintainers_guide.md
similarity index 100%
rename from openpype/modules/slack/python2_vendor/python-slack-sdk-1/.github/maintainers_guide.md
rename to openpype/modules/default_modules/slack/python2_vendor/python-slack-sdk-1/.github/maintainers_guide.md
diff --git a/openpype/modules/slack/python2_vendor/python-slack-sdk-1/.github/pull_request_template.md b/openpype/modules/default_modules/slack/python2_vendor/python-slack-sdk-1/.github/pull_request_template.md
similarity index 100%
rename from openpype/modules/slack/python2_vendor/python-slack-sdk-1/.github/pull_request_template.md
rename to openpype/modules/default_modules/slack/python2_vendor/python-slack-sdk-1/.github/pull_request_template.md
diff --git a/openpype/modules/slack/python2_vendor/python-slack-sdk-1/.gitignore b/openpype/modules/default_modules/slack/python2_vendor/python-slack-sdk-1/.gitignore
similarity index 100%
rename from openpype/modules/slack/python2_vendor/python-slack-sdk-1/.gitignore
rename to openpype/modules/default_modules/slack/python2_vendor/python-slack-sdk-1/.gitignore
diff --git a/openpype/modules/slack/python2_vendor/python-slack-sdk-1/.travis.yml b/openpype/modules/default_modules/slack/python2_vendor/python-slack-sdk-1/.travis.yml
similarity index 100%
rename from openpype/modules/slack/python2_vendor/python-slack-sdk-1/.travis.yml
rename to openpype/modules/default_modules/slack/python2_vendor/python-slack-sdk-1/.travis.yml
diff --git a/openpype/modules/slack/python2_vendor/python-slack-sdk-1/LICENSE b/openpype/modules/default_modules/slack/python2_vendor/python-slack-sdk-1/LICENSE
similarity index 100%
rename from openpype/modules/slack/python2_vendor/python-slack-sdk-1/LICENSE
rename to openpype/modules/default_modules/slack/python2_vendor/python-slack-sdk-1/LICENSE
diff --git a/openpype/modules/slack/python2_vendor/python-slack-sdk-1/MANIFEST.in b/openpype/modules/default_modules/slack/python2_vendor/python-slack-sdk-1/MANIFEST.in
similarity index 100%
rename from openpype/modules/slack/python2_vendor/python-slack-sdk-1/MANIFEST.in
rename to openpype/modules/default_modules/slack/python2_vendor/python-slack-sdk-1/MANIFEST.in
diff --git a/openpype/modules/slack/python2_vendor/python-slack-sdk-1/README.rst b/openpype/modules/default_modules/slack/python2_vendor/python-slack-sdk-1/README.rst
similarity index 100%
rename from openpype/modules/slack/python2_vendor/python-slack-sdk-1/README.rst
rename to openpype/modules/default_modules/slack/python2_vendor/python-slack-sdk-1/README.rst
diff --git a/openpype/modules/slack/python2_vendor/python-slack-sdk-1/docs-src/.gitignore b/openpype/modules/default_modules/slack/python2_vendor/python-slack-sdk-1/docs-src/.gitignore
similarity index 100%
rename from openpype/modules/slack/python2_vendor/python-slack-sdk-1/docs-src/.gitignore
rename to openpype/modules/default_modules/slack/python2_vendor/python-slack-sdk-1/docs-src/.gitignore
diff --git a/openpype/modules/slack/python2_vendor/python-slack-sdk-1/docs-src/Makefile b/openpype/modules/default_modules/slack/python2_vendor/python-slack-sdk-1/docs-src/Makefile
similarity index 100%
rename from openpype/modules/slack/python2_vendor/python-slack-sdk-1/docs-src/Makefile
rename to openpype/modules/default_modules/slack/python2_vendor/python-slack-sdk-1/docs-src/Makefile
diff --git a/openpype/modules/slack/python2_vendor/python-slack-sdk-1/docs-src/_themes/slack/conf.py b/openpype/modules/default_modules/slack/python2_vendor/python-slack-sdk-1/docs-src/_themes/slack/conf.py
similarity index 100%
rename from openpype/modules/slack/python2_vendor/python-slack-sdk-1/docs-src/_themes/slack/conf.py
rename to openpype/modules/default_modules/slack/python2_vendor/python-slack-sdk-1/docs-src/_themes/slack/conf.py
diff --git a/openpype/modules/slack/python2_vendor/python-slack-sdk-1/docs-src/_themes/slack/layout.html b/openpype/modules/default_modules/slack/python2_vendor/python-slack-sdk-1/docs-src/_themes/slack/layout.html
similarity index 100%
rename from openpype/modules/slack/python2_vendor/python-slack-sdk-1/docs-src/_themes/slack/layout.html
rename to openpype/modules/default_modules/slack/python2_vendor/python-slack-sdk-1/docs-src/_themes/slack/layout.html
diff --git a/openpype/modules/slack/python2_vendor/python-slack-sdk-1/docs-src/_themes/slack/localtoc.html b/openpype/modules/default_modules/slack/python2_vendor/python-slack-sdk-1/docs-src/_themes/slack/localtoc.html
similarity index 100%
rename from openpype/modules/slack/python2_vendor/python-slack-sdk-1/docs-src/_themes/slack/localtoc.html
rename to openpype/modules/default_modules/slack/python2_vendor/python-slack-sdk-1/docs-src/_themes/slack/localtoc.html
diff --git a/openpype/modules/slack/python2_vendor/python-slack-sdk-1/docs-src/_themes/slack/relations.html b/openpype/modules/default_modules/slack/python2_vendor/python-slack-sdk-1/docs-src/_themes/slack/relations.html
similarity index 100%
rename from openpype/modules/slack/python2_vendor/python-slack-sdk-1/docs-src/_themes/slack/relations.html
rename to openpype/modules/default_modules/slack/python2_vendor/python-slack-sdk-1/docs-src/_themes/slack/relations.html
diff --git a/openpype/modules/slack/python2_vendor/python-slack-sdk-1/docs-src/_themes/slack/sidebar.html b/openpype/modules/default_modules/slack/python2_vendor/python-slack-sdk-1/docs-src/_themes/slack/sidebar.html
similarity index 100%
rename from openpype/modules/slack/python2_vendor/python-slack-sdk-1/docs-src/_themes/slack/sidebar.html
rename to openpype/modules/default_modules/slack/python2_vendor/python-slack-sdk-1/docs-src/_themes/slack/sidebar.html
diff --git a/openpype/modules/slack/python2_vendor/python-slack-sdk-1/docs-src/_themes/slack/static/default.css_t b/openpype/modules/default_modules/slack/python2_vendor/python-slack-sdk-1/docs-src/_themes/slack/static/default.css_t
similarity index 100%
rename from openpype/modules/slack/python2_vendor/python-slack-sdk-1/docs-src/_themes/slack/static/default.css_t
rename to openpype/modules/default_modules/slack/python2_vendor/python-slack-sdk-1/docs-src/_themes/slack/static/default.css_t
diff --git a/openpype/modules/slack/python2_vendor/python-slack-sdk-1/docs-src/_themes/slack/static/docs.css_t b/openpype/modules/default_modules/slack/python2_vendor/python-slack-sdk-1/docs-src/_themes/slack/static/docs.css_t
similarity index 100%
rename from openpype/modules/slack/python2_vendor/python-slack-sdk-1/docs-src/_themes/slack/static/docs.css_t
rename to openpype/modules/default_modules/slack/python2_vendor/python-slack-sdk-1/docs-src/_themes/slack/static/docs.css_t
diff --git a/openpype/modules/slack/python2_vendor/python-slack-sdk-1/docs-src/_themes/slack/static/pygments.css_t b/openpype/modules/default_modules/slack/python2_vendor/python-slack-sdk-1/docs-src/_themes/slack/static/pygments.css_t
similarity index 100%
rename from openpype/modules/slack/python2_vendor/python-slack-sdk-1/docs-src/_themes/slack/static/pygments.css_t
rename to openpype/modules/default_modules/slack/python2_vendor/python-slack-sdk-1/docs-src/_themes/slack/static/pygments.css_t
diff --git a/openpype/modules/slack/python2_vendor/python-slack-sdk-1/docs-src/_themes/slack/theme.conf b/openpype/modules/default_modules/slack/python2_vendor/python-slack-sdk-1/docs-src/_themes/slack/theme.conf
similarity index 100%
rename from openpype/modules/slack/python2_vendor/python-slack-sdk-1/docs-src/_themes/slack/theme.conf
rename to openpype/modules/default_modules/slack/python2_vendor/python-slack-sdk-1/docs-src/_themes/slack/theme.conf
diff --git a/openpype/modules/slack/python2_vendor/python-slack-sdk-1/docs-src/about.rst b/openpype/modules/default_modules/slack/python2_vendor/python-slack-sdk-1/docs-src/about.rst
similarity index 100%
rename from openpype/modules/slack/python2_vendor/python-slack-sdk-1/docs-src/about.rst
rename to openpype/modules/default_modules/slack/python2_vendor/python-slack-sdk-1/docs-src/about.rst
diff --git a/openpype/modules/slack/python2_vendor/python-slack-sdk-1/docs-src/auth.rst b/openpype/modules/default_modules/slack/python2_vendor/python-slack-sdk-1/docs-src/auth.rst
similarity index 100%
rename from openpype/modules/slack/python2_vendor/python-slack-sdk-1/docs-src/auth.rst
rename to openpype/modules/default_modules/slack/python2_vendor/python-slack-sdk-1/docs-src/auth.rst
diff --git a/openpype/modules/slack/python2_vendor/python-slack-sdk-1/docs-src/basic_usage.rst b/openpype/modules/default_modules/slack/python2_vendor/python-slack-sdk-1/docs-src/basic_usage.rst
similarity index 100%
rename from openpype/modules/slack/python2_vendor/python-slack-sdk-1/docs-src/basic_usage.rst
rename to openpype/modules/default_modules/slack/python2_vendor/python-slack-sdk-1/docs-src/basic_usage.rst
diff --git a/openpype/modules/slack/python2_vendor/python-slack-sdk-1/docs-src/changelog.rst b/openpype/modules/default_modules/slack/python2_vendor/python-slack-sdk-1/docs-src/changelog.rst
similarity index 100%
rename from openpype/modules/slack/python2_vendor/python-slack-sdk-1/docs-src/changelog.rst
rename to openpype/modules/default_modules/slack/python2_vendor/python-slack-sdk-1/docs-src/changelog.rst
diff --git a/openpype/modules/slack/python2_vendor/python-slack-sdk-1/docs-src/conf.py b/openpype/modules/default_modules/slack/python2_vendor/python-slack-sdk-1/docs-src/conf.py
similarity index 100%
rename from openpype/modules/slack/python2_vendor/python-slack-sdk-1/docs-src/conf.py
rename to openpype/modules/default_modules/slack/python2_vendor/python-slack-sdk-1/docs-src/conf.py
diff --git a/openpype/modules/slack/python2_vendor/python-slack-sdk-1/docs-src/conversations.rst b/openpype/modules/default_modules/slack/python2_vendor/python-slack-sdk-1/docs-src/conversations.rst
similarity index 100%
rename from openpype/modules/slack/python2_vendor/python-slack-sdk-1/docs-src/conversations.rst
rename to openpype/modules/default_modules/slack/python2_vendor/python-slack-sdk-1/docs-src/conversations.rst
diff --git a/openpype/modules/slack/python2_vendor/python-slack-sdk-1/docs-src/faq.rst b/openpype/modules/default_modules/slack/python2_vendor/python-slack-sdk-1/docs-src/faq.rst
similarity index 100%
rename from openpype/modules/slack/python2_vendor/python-slack-sdk-1/docs-src/faq.rst
rename to openpype/modules/default_modules/slack/python2_vendor/python-slack-sdk-1/docs-src/faq.rst
diff --git a/openpype/modules/slack/python2_vendor/python-slack-sdk-1/docs-src/index.rst b/openpype/modules/default_modules/slack/python2_vendor/python-slack-sdk-1/docs-src/index.rst
similarity index 100%
rename from openpype/modules/slack/python2_vendor/python-slack-sdk-1/docs-src/index.rst
rename to openpype/modules/default_modules/slack/python2_vendor/python-slack-sdk-1/docs-src/index.rst
diff --git a/openpype/modules/slack/python2_vendor/python-slack-sdk-1/docs-src/make.bat b/openpype/modules/default_modules/slack/python2_vendor/python-slack-sdk-1/docs-src/make.bat
similarity index 100%
rename from openpype/modules/slack/python2_vendor/python-slack-sdk-1/docs-src/make.bat
rename to openpype/modules/default_modules/slack/python2_vendor/python-slack-sdk-1/docs-src/make.bat
diff --git a/openpype/modules/slack/python2_vendor/python-slack-sdk-1/docs-src/metadata.rst b/openpype/modules/default_modules/slack/python2_vendor/python-slack-sdk-1/docs-src/metadata.rst
similarity index 100%
rename from openpype/modules/slack/python2_vendor/python-slack-sdk-1/docs-src/metadata.rst
rename to openpype/modules/default_modules/slack/python2_vendor/python-slack-sdk-1/docs-src/metadata.rst
diff --git a/openpype/modules/slack/python2_vendor/python-slack-sdk-1/docs-src/real_time_messaging.rst b/openpype/modules/default_modules/slack/python2_vendor/python-slack-sdk-1/docs-src/real_time_messaging.rst
similarity index 100%
rename from openpype/modules/slack/python2_vendor/python-slack-sdk-1/docs-src/real_time_messaging.rst
rename to openpype/modules/default_modules/slack/python2_vendor/python-slack-sdk-1/docs-src/real_time_messaging.rst
diff --git a/openpype/modules/slack/python2_vendor/python-slack-sdk-1/docs.sh b/openpype/modules/default_modules/slack/python2_vendor/python-slack-sdk-1/docs.sh
similarity index 100%
rename from openpype/modules/slack/python2_vendor/python-slack-sdk-1/docs.sh
rename to openpype/modules/default_modules/slack/python2_vendor/python-slack-sdk-1/docs.sh
diff --git a/openpype/modules/slack/python2_vendor/python-slack-sdk-1/docs/.buildinfo b/openpype/modules/default_modules/slack/python2_vendor/python-slack-sdk-1/docs/.buildinfo
similarity index 100%
rename from openpype/modules/slack/python2_vendor/python-slack-sdk-1/docs/.buildinfo
rename to openpype/modules/default_modules/slack/python2_vendor/python-slack-sdk-1/docs/.buildinfo
diff --git a/openpype/modules/slack/python2_vendor/python-slack-sdk-1/docs/.nojekyll b/openpype/modules/default_modules/slack/python2_vendor/python-slack-sdk-1/docs/.nojekyll
similarity index 100%
rename from openpype/modules/slack/python2_vendor/python-slack-sdk-1/docs/.nojekyll
rename to openpype/modules/default_modules/slack/python2_vendor/python-slack-sdk-1/docs/.nojekyll
diff --git a/openpype/modules/slack/python2_vendor/python-slack-sdk-1/docs/_static/ajax-loader.gif b/openpype/modules/default_modules/slack/python2_vendor/python-slack-sdk-1/docs/_static/ajax-loader.gif
similarity index 100%
rename from openpype/modules/slack/python2_vendor/python-slack-sdk-1/docs/_static/ajax-loader.gif
rename to openpype/modules/default_modules/slack/python2_vendor/python-slack-sdk-1/docs/_static/ajax-loader.gif
diff --git a/openpype/modules/slack/python2_vendor/python-slack-sdk-1/docs/_static/basic.css b/openpype/modules/default_modules/slack/python2_vendor/python-slack-sdk-1/docs/_static/basic.css
similarity index 100%
rename from openpype/modules/slack/python2_vendor/python-slack-sdk-1/docs/_static/basic.css
rename to openpype/modules/default_modules/slack/python2_vendor/python-slack-sdk-1/docs/_static/basic.css
diff --git a/openpype/modules/slack/python2_vendor/python-slack-sdk-1/docs/_static/classic.css b/openpype/modules/default_modules/slack/python2_vendor/python-slack-sdk-1/docs/_static/classic.css
similarity index 100%
rename from openpype/modules/slack/python2_vendor/python-slack-sdk-1/docs/_static/classic.css
rename to openpype/modules/default_modules/slack/python2_vendor/python-slack-sdk-1/docs/_static/classic.css
diff --git a/openpype/modules/slack/python2_vendor/python-slack-sdk-1/docs/_static/comment-bright.png b/openpype/modules/default_modules/slack/python2_vendor/python-slack-sdk-1/docs/_static/comment-bright.png
similarity index 100%
rename from openpype/modules/slack/python2_vendor/python-slack-sdk-1/docs/_static/comment-bright.png
rename to openpype/modules/default_modules/slack/python2_vendor/python-slack-sdk-1/docs/_static/comment-bright.png
diff --git a/openpype/modules/slack/python2_vendor/python-slack-sdk-1/docs/_static/comment-close.png b/openpype/modules/default_modules/slack/python2_vendor/python-slack-sdk-1/docs/_static/comment-close.png
similarity index 100%
rename from openpype/modules/slack/python2_vendor/python-slack-sdk-1/docs/_static/comment-close.png
rename to openpype/modules/default_modules/slack/python2_vendor/python-slack-sdk-1/docs/_static/comment-close.png
diff --git a/openpype/modules/slack/python2_vendor/python-slack-sdk-1/docs/_static/comment.png b/openpype/modules/default_modules/slack/python2_vendor/python-slack-sdk-1/docs/_static/comment.png
similarity index 100%
rename from openpype/modules/slack/python2_vendor/python-slack-sdk-1/docs/_static/comment.png
rename to openpype/modules/default_modules/slack/python2_vendor/python-slack-sdk-1/docs/_static/comment.png
diff --git a/openpype/modules/slack/python2_vendor/python-slack-sdk-1/docs/_static/default.css b/openpype/modules/default_modules/slack/python2_vendor/python-slack-sdk-1/docs/_static/default.css
similarity index 100%
rename from openpype/modules/slack/python2_vendor/python-slack-sdk-1/docs/_static/default.css
rename to openpype/modules/default_modules/slack/python2_vendor/python-slack-sdk-1/docs/_static/default.css
diff --git a/openpype/modules/slack/python2_vendor/python-slack-sdk-1/docs/_static/docs.css b/openpype/modules/default_modules/slack/python2_vendor/python-slack-sdk-1/docs/_static/docs.css
similarity index 100%
rename from openpype/modules/slack/python2_vendor/python-slack-sdk-1/docs/_static/docs.css
rename to openpype/modules/default_modules/slack/python2_vendor/python-slack-sdk-1/docs/_static/docs.css
diff --git a/openpype/modules/slack/python2_vendor/python-slack-sdk-1/docs/_static/doctools.js b/openpype/modules/default_modules/slack/python2_vendor/python-slack-sdk-1/docs/_static/doctools.js
similarity index 100%
rename from openpype/modules/slack/python2_vendor/python-slack-sdk-1/docs/_static/doctools.js
rename to openpype/modules/default_modules/slack/python2_vendor/python-slack-sdk-1/docs/_static/doctools.js
diff --git a/openpype/modules/slack/python2_vendor/python-slack-sdk-1/docs/_static/documentation_options.js b/openpype/modules/default_modules/slack/python2_vendor/python-slack-sdk-1/docs/_static/documentation_options.js
similarity index 100%
rename from openpype/modules/slack/python2_vendor/python-slack-sdk-1/docs/_static/documentation_options.js
rename to openpype/modules/default_modules/slack/python2_vendor/python-slack-sdk-1/docs/_static/documentation_options.js
diff --git a/openpype/modules/slack/python2_vendor/python-slack-sdk-1/docs/_static/down-pressed.png b/openpype/modules/default_modules/slack/python2_vendor/python-slack-sdk-1/docs/_static/down-pressed.png
similarity index 100%
rename from openpype/modules/slack/python2_vendor/python-slack-sdk-1/docs/_static/down-pressed.png
rename to openpype/modules/default_modules/slack/python2_vendor/python-slack-sdk-1/docs/_static/down-pressed.png
diff --git a/openpype/modules/slack/python2_vendor/python-slack-sdk-1/docs/_static/down.png b/openpype/modules/default_modules/slack/python2_vendor/python-slack-sdk-1/docs/_static/down.png
similarity index 100%
rename from openpype/modules/slack/python2_vendor/python-slack-sdk-1/docs/_static/down.png
rename to openpype/modules/default_modules/slack/python2_vendor/python-slack-sdk-1/docs/_static/down.png
diff --git a/openpype/modules/slack/python2_vendor/python-slack-sdk-1/docs/_static/file.png b/openpype/modules/default_modules/slack/python2_vendor/python-slack-sdk-1/docs/_static/file.png
similarity index 100%
rename from openpype/modules/slack/python2_vendor/python-slack-sdk-1/docs/_static/file.png
rename to openpype/modules/default_modules/slack/python2_vendor/python-slack-sdk-1/docs/_static/file.png
diff --git a/openpype/modules/slack/python2_vendor/python-slack-sdk-1/docs/_static/jquery-3.2.1.js b/openpype/modules/default_modules/slack/python2_vendor/python-slack-sdk-1/docs/_static/jquery-3.2.1.js
similarity index 100%
rename from openpype/modules/slack/python2_vendor/python-slack-sdk-1/docs/_static/jquery-3.2.1.js
rename to openpype/modules/default_modules/slack/python2_vendor/python-slack-sdk-1/docs/_static/jquery-3.2.1.js
diff --git a/openpype/modules/slack/python2_vendor/python-slack-sdk-1/docs/_static/jquery.js b/openpype/modules/default_modules/slack/python2_vendor/python-slack-sdk-1/docs/_static/jquery.js
similarity index 100%
rename from openpype/modules/slack/python2_vendor/python-slack-sdk-1/docs/_static/jquery.js
rename to openpype/modules/default_modules/slack/python2_vendor/python-slack-sdk-1/docs/_static/jquery.js
diff --git a/openpype/modules/slack/python2_vendor/python-slack-sdk-1/docs/_static/language_data.js b/openpype/modules/default_modules/slack/python2_vendor/python-slack-sdk-1/docs/_static/language_data.js
similarity index 100%
rename from openpype/modules/slack/python2_vendor/python-slack-sdk-1/docs/_static/language_data.js
rename to openpype/modules/default_modules/slack/python2_vendor/python-slack-sdk-1/docs/_static/language_data.js
diff --git a/openpype/modules/slack/python2_vendor/python-slack-sdk-1/docs/_static/minus.png b/openpype/modules/default_modules/slack/python2_vendor/python-slack-sdk-1/docs/_static/minus.png
similarity index 100%
rename from openpype/modules/slack/python2_vendor/python-slack-sdk-1/docs/_static/minus.png
rename to openpype/modules/default_modules/slack/python2_vendor/python-slack-sdk-1/docs/_static/minus.png
diff --git a/openpype/modules/slack/python2_vendor/python-slack-sdk-1/docs/_static/plus.png b/openpype/modules/default_modules/slack/python2_vendor/python-slack-sdk-1/docs/_static/plus.png
similarity index 100%
rename from openpype/modules/slack/python2_vendor/python-slack-sdk-1/docs/_static/plus.png
rename to openpype/modules/default_modules/slack/python2_vendor/python-slack-sdk-1/docs/_static/plus.png
diff --git a/openpype/modules/slack/python2_vendor/python-slack-sdk-1/docs/_static/pygments.css b/openpype/modules/default_modules/slack/python2_vendor/python-slack-sdk-1/docs/_static/pygments.css
similarity index 100%
rename from openpype/modules/slack/python2_vendor/python-slack-sdk-1/docs/_static/pygments.css
rename to openpype/modules/default_modules/slack/python2_vendor/python-slack-sdk-1/docs/_static/pygments.css
diff --git a/openpype/modules/slack/python2_vendor/python-slack-sdk-1/docs/_static/searchtools.js b/openpype/modules/default_modules/slack/python2_vendor/python-slack-sdk-1/docs/_static/searchtools.js
similarity index 100%
rename from openpype/modules/slack/python2_vendor/python-slack-sdk-1/docs/_static/searchtools.js
rename to openpype/modules/default_modules/slack/python2_vendor/python-slack-sdk-1/docs/_static/searchtools.js
diff --git a/openpype/modules/slack/python2_vendor/python-slack-sdk-1/docs/_static/sidebar.js b/openpype/modules/default_modules/slack/python2_vendor/python-slack-sdk-1/docs/_static/sidebar.js
similarity index 100%
rename from openpype/modules/slack/python2_vendor/python-slack-sdk-1/docs/_static/sidebar.js
rename to openpype/modules/default_modules/slack/python2_vendor/python-slack-sdk-1/docs/_static/sidebar.js
diff --git a/openpype/modules/slack/python2_vendor/python-slack-sdk-1/docs/_static/underscore-1.3.1.js b/openpype/modules/default_modules/slack/python2_vendor/python-slack-sdk-1/docs/_static/underscore-1.3.1.js
similarity index 100%
rename from openpype/modules/slack/python2_vendor/python-slack-sdk-1/docs/_static/underscore-1.3.1.js
rename to openpype/modules/default_modules/slack/python2_vendor/python-slack-sdk-1/docs/_static/underscore-1.3.1.js
diff --git a/openpype/modules/slack/python2_vendor/python-slack-sdk-1/docs/_static/underscore.js b/openpype/modules/default_modules/slack/python2_vendor/python-slack-sdk-1/docs/_static/underscore.js
similarity index 100%
rename from openpype/modules/slack/python2_vendor/python-slack-sdk-1/docs/_static/underscore.js
rename to openpype/modules/default_modules/slack/python2_vendor/python-slack-sdk-1/docs/_static/underscore.js
diff --git a/openpype/modules/slack/python2_vendor/python-slack-sdk-1/docs/_static/up-pressed.png b/openpype/modules/default_modules/slack/python2_vendor/python-slack-sdk-1/docs/_static/up-pressed.png
similarity index 100%
rename from openpype/modules/slack/python2_vendor/python-slack-sdk-1/docs/_static/up-pressed.png
rename to openpype/modules/default_modules/slack/python2_vendor/python-slack-sdk-1/docs/_static/up-pressed.png
diff --git a/openpype/modules/slack/python2_vendor/python-slack-sdk-1/docs/_static/up.png b/openpype/modules/default_modules/slack/python2_vendor/python-slack-sdk-1/docs/_static/up.png
similarity index 100%
rename from openpype/modules/slack/python2_vendor/python-slack-sdk-1/docs/_static/up.png
rename to openpype/modules/default_modules/slack/python2_vendor/python-slack-sdk-1/docs/_static/up.png
diff --git a/openpype/modules/slack/python2_vendor/python-slack-sdk-1/docs/_static/websupport.js b/openpype/modules/default_modules/slack/python2_vendor/python-slack-sdk-1/docs/_static/websupport.js
similarity index 100%
rename from openpype/modules/slack/python2_vendor/python-slack-sdk-1/docs/_static/websupport.js
rename to openpype/modules/default_modules/slack/python2_vendor/python-slack-sdk-1/docs/_static/websupport.js
diff --git a/openpype/modules/slack/python2_vendor/python-slack-sdk-1/docs/about.html b/openpype/modules/default_modules/slack/python2_vendor/python-slack-sdk-1/docs/about.html
similarity index 100%
rename from openpype/modules/slack/python2_vendor/python-slack-sdk-1/docs/about.html
rename to openpype/modules/default_modules/slack/python2_vendor/python-slack-sdk-1/docs/about.html
diff --git a/openpype/modules/slack/python2_vendor/python-slack-sdk-1/docs/auth.html b/openpype/modules/default_modules/slack/python2_vendor/python-slack-sdk-1/docs/auth.html
similarity index 100%
rename from openpype/modules/slack/python2_vendor/python-slack-sdk-1/docs/auth.html
rename to openpype/modules/default_modules/slack/python2_vendor/python-slack-sdk-1/docs/auth.html
diff --git a/openpype/modules/slack/python2_vendor/python-slack-sdk-1/docs/basic_usage.html b/openpype/modules/default_modules/slack/python2_vendor/python-slack-sdk-1/docs/basic_usage.html
similarity index 100%
rename from openpype/modules/slack/python2_vendor/python-slack-sdk-1/docs/basic_usage.html
rename to openpype/modules/default_modules/slack/python2_vendor/python-slack-sdk-1/docs/basic_usage.html
diff --git a/openpype/modules/slack/python2_vendor/python-slack-sdk-1/docs/changelog.html b/openpype/modules/default_modules/slack/python2_vendor/python-slack-sdk-1/docs/changelog.html
similarity index 100%
rename from openpype/modules/slack/python2_vendor/python-slack-sdk-1/docs/changelog.html
rename to openpype/modules/default_modules/slack/python2_vendor/python-slack-sdk-1/docs/changelog.html
diff --git a/openpype/modules/slack/python2_vendor/python-slack-sdk-1/docs/conversations.html b/openpype/modules/default_modules/slack/python2_vendor/python-slack-sdk-1/docs/conversations.html
similarity index 100%
rename from openpype/modules/slack/python2_vendor/python-slack-sdk-1/docs/conversations.html
rename to openpype/modules/default_modules/slack/python2_vendor/python-slack-sdk-1/docs/conversations.html
diff --git a/openpype/modules/slack/python2_vendor/python-slack-sdk-1/docs/faq.html b/openpype/modules/default_modules/slack/python2_vendor/python-slack-sdk-1/docs/faq.html
similarity index 100%
rename from openpype/modules/slack/python2_vendor/python-slack-sdk-1/docs/faq.html
rename to openpype/modules/default_modules/slack/python2_vendor/python-slack-sdk-1/docs/faq.html
diff --git a/openpype/modules/slack/python2_vendor/python-slack-sdk-1/docs/genindex.html b/openpype/modules/default_modules/slack/python2_vendor/python-slack-sdk-1/docs/genindex.html
similarity index 100%
rename from openpype/modules/slack/python2_vendor/python-slack-sdk-1/docs/genindex.html
rename to openpype/modules/default_modules/slack/python2_vendor/python-slack-sdk-1/docs/genindex.html
diff --git a/openpype/modules/slack/python2_vendor/python-slack-sdk-1/docs/index.html b/openpype/modules/default_modules/slack/python2_vendor/python-slack-sdk-1/docs/index.html
similarity index 100%
rename from openpype/modules/slack/python2_vendor/python-slack-sdk-1/docs/index.html
rename to openpype/modules/default_modules/slack/python2_vendor/python-slack-sdk-1/docs/index.html
diff --git a/openpype/modules/slack/python2_vendor/python-slack-sdk-1/docs/metadata.html b/openpype/modules/default_modules/slack/python2_vendor/python-slack-sdk-1/docs/metadata.html
similarity index 100%
rename from openpype/modules/slack/python2_vendor/python-slack-sdk-1/docs/metadata.html
rename to openpype/modules/default_modules/slack/python2_vendor/python-slack-sdk-1/docs/metadata.html
diff --git a/openpype/modules/slack/python2_vendor/python-slack-sdk-1/docs/objects.inv b/openpype/modules/default_modules/slack/python2_vendor/python-slack-sdk-1/docs/objects.inv
similarity index 100%
rename from openpype/modules/slack/python2_vendor/python-slack-sdk-1/docs/objects.inv
rename to openpype/modules/default_modules/slack/python2_vendor/python-slack-sdk-1/docs/objects.inv
diff --git a/openpype/modules/slack/python2_vendor/python-slack-sdk-1/docs/real_time_messaging.html b/openpype/modules/default_modules/slack/python2_vendor/python-slack-sdk-1/docs/real_time_messaging.html
similarity index 100%
rename from openpype/modules/slack/python2_vendor/python-slack-sdk-1/docs/real_time_messaging.html
rename to openpype/modules/default_modules/slack/python2_vendor/python-slack-sdk-1/docs/real_time_messaging.html
diff --git a/openpype/modules/slack/python2_vendor/python-slack-sdk-1/docs/search.html b/openpype/modules/default_modules/slack/python2_vendor/python-slack-sdk-1/docs/search.html
similarity index 100%
rename from openpype/modules/slack/python2_vendor/python-slack-sdk-1/docs/search.html
rename to openpype/modules/default_modules/slack/python2_vendor/python-slack-sdk-1/docs/search.html
diff --git a/openpype/modules/slack/python2_vendor/python-slack-sdk-1/docs/searchindex.js b/openpype/modules/default_modules/slack/python2_vendor/python-slack-sdk-1/docs/searchindex.js
similarity index 100%
rename from openpype/modules/slack/python2_vendor/python-slack-sdk-1/docs/searchindex.js
rename to openpype/modules/default_modules/slack/python2_vendor/python-slack-sdk-1/docs/searchindex.js
diff --git a/openpype/modules/slack/python2_vendor/python-slack-sdk-1/requirements.txt b/openpype/modules/default_modules/slack/python2_vendor/python-slack-sdk-1/requirements.txt
similarity index 100%
rename from openpype/modules/slack/python2_vendor/python-slack-sdk-1/requirements.txt
rename to openpype/modules/default_modules/slack/python2_vendor/python-slack-sdk-1/requirements.txt
diff --git a/openpype/modules/slack/python2_vendor/python-slack-sdk-1/setup.cfg b/openpype/modules/default_modules/slack/python2_vendor/python-slack-sdk-1/setup.cfg
similarity index 100%
rename from openpype/modules/slack/python2_vendor/python-slack-sdk-1/setup.cfg
rename to openpype/modules/default_modules/slack/python2_vendor/python-slack-sdk-1/setup.cfg
diff --git a/openpype/modules/slack/python2_vendor/python-slack-sdk-1/setup.py b/openpype/modules/default_modules/slack/python2_vendor/python-slack-sdk-1/setup.py
similarity index 100%
rename from openpype/modules/slack/python2_vendor/python-slack-sdk-1/setup.py
rename to openpype/modules/default_modules/slack/python2_vendor/python-slack-sdk-1/setup.py
diff --git a/openpype/modules/slack/python2_vendor/python-slack-sdk-1/slackclient/__init__.py b/openpype/modules/default_modules/slack/python2_vendor/python-slack-sdk-1/slackclient/__init__.py
similarity index 100%
rename from openpype/modules/slack/python2_vendor/python-slack-sdk-1/slackclient/__init__.py
rename to openpype/modules/default_modules/slack/python2_vendor/python-slack-sdk-1/slackclient/__init__.py
diff --git a/openpype/modules/slack/python2_vendor/python-slack-sdk-1/slackclient/channel.py b/openpype/modules/default_modules/slack/python2_vendor/python-slack-sdk-1/slackclient/channel.py
similarity index 100%
rename from openpype/modules/slack/python2_vendor/python-slack-sdk-1/slackclient/channel.py
rename to openpype/modules/default_modules/slack/python2_vendor/python-slack-sdk-1/slackclient/channel.py
diff --git a/openpype/modules/slack/python2_vendor/python-slack-sdk-1/slackclient/client.py b/openpype/modules/default_modules/slack/python2_vendor/python-slack-sdk-1/slackclient/client.py
similarity index 100%
rename from openpype/modules/slack/python2_vendor/python-slack-sdk-1/slackclient/client.py
rename to openpype/modules/default_modules/slack/python2_vendor/python-slack-sdk-1/slackclient/client.py
diff --git a/openpype/modules/slack/python2_vendor/python-slack-sdk-1/slackclient/exceptions.py b/openpype/modules/default_modules/slack/python2_vendor/python-slack-sdk-1/slackclient/exceptions.py
similarity index 100%
rename from openpype/modules/slack/python2_vendor/python-slack-sdk-1/slackclient/exceptions.py
rename to openpype/modules/default_modules/slack/python2_vendor/python-slack-sdk-1/slackclient/exceptions.py
diff --git a/openpype/modules/slack/python2_vendor/python-slack-sdk-1/slackclient/im.py b/openpype/modules/default_modules/slack/python2_vendor/python-slack-sdk-1/slackclient/im.py
similarity index 100%
rename from openpype/modules/slack/python2_vendor/python-slack-sdk-1/slackclient/im.py
rename to openpype/modules/default_modules/slack/python2_vendor/python-slack-sdk-1/slackclient/im.py
diff --git a/openpype/modules/slack/python2_vendor/python-slack-sdk-1/slackclient/server.py b/openpype/modules/default_modules/slack/python2_vendor/python-slack-sdk-1/slackclient/server.py
similarity index 100%
rename from openpype/modules/slack/python2_vendor/python-slack-sdk-1/slackclient/server.py
rename to openpype/modules/default_modules/slack/python2_vendor/python-slack-sdk-1/slackclient/server.py
diff --git a/openpype/modules/slack/python2_vendor/python-slack-sdk-1/slackclient/slackrequest.py b/openpype/modules/default_modules/slack/python2_vendor/python-slack-sdk-1/slackclient/slackrequest.py
similarity index 100%
rename from openpype/modules/slack/python2_vendor/python-slack-sdk-1/slackclient/slackrequest.py
rename to openpype/modules/default_modules/slack/python2_vendor/python-slack-sdk-1/slackclient/slackrequest.py
diff --git a/openpype/modules/slack/python2_vendor/python-slack-sdk-1/slackclient/user.py b/openpype/modules/default_modules/slack/python2_vendor/python-slack-sdk-1/slackclient/user.py
similarity index 100%
rename from openpype/modules/slack/python2_vendor/python-slack-sdk-1/slackclient/user.py
rename to openpype/modules/default_modules/slack/python2_vendor/python-slack-sdk-1/slackclient/user.py
diff --git a/openpype/modules/slack/python2_vendor/python-slack-sdk-1/slackclient/util.py b/openpype/modules/default_modules/slack/python2_vendor/python-slack-sdk-1/slackclient/util.py
similarity index 100%
rename from openpype/modules/slack/python2_vendor/python-slack-sdk-1/slackclient/util.py
rename to openpype/modules/default_modules/slack/python2_vendor/python-slack-sdk-1/slackclient/util.py
diff --git a/openpype/modules/slack/python2_vendor/python-slack-sdk-1/slackclient/version.py b/openpype/modules/default_modules/slack/python2_vendor/python-slack-sdk-1/slackclient/version.py
similarity index 100%
rename from openpype/modules/slack/python2_vendor/python-slack-sdk-1/slackclient/version.py
rename to openpype/modules/default_modules/slack/python2_vendor/python-slack-sdk-1/slackclient/version.py
diff --git a/openpype/modules/slack/python2_vendor/python-slack-sdk-1/test_requirements.txt b/openpype/modules/default_modules/slack/python2_vendor/python-slack-sdk-1/test_requirements.txt
similarity index 100%
rename from openpype/modules/slack/python2_vendor/python-slack-sdk-1/test_requirements.txt
rename to openpype/modules/default_modules/slack/python2_vendor/python-slack-sdk-1/test_requirements.txt
diff --git a/openpype/modules/slack/python2_vendor/python-slack-sdk-1/tests/conftest.py b/openpype/modules/default_modules/slack/python2_vendor/python-slack-sdk-1/tests/conftest.py
similarity index 100%
rename from openpype/modules/slack/python2_vendor/python-slack-sdk-1/tests/conftest.py
rename to openpype/modules/default_modules/slack/python2_vendor/python-slack-sdk-1/tests/conftest.py
diff --git a/openpype/modules/slack/python2_vendor/python-slack-sdk-1/tests/data/channel.created.json b/openpype/modules/default_modules/slack/python2_vendor/python-slack-sdk-1/tests/data/channel.created.json
similarity index 100%
rename from openpype/modules/slack/python2_vendor/python-slack-sdk-1/tests/data/channel.created.json
rename to openpype/modules/default_modules/slack/python2_vendor/python-slack-sdk-1/tests/data/channel.created.json
diff --git a/openpype/modules/slack/python2_vendor/python-slack-sdk-1/tests/data/im.created.json b/openpype/modules/default_modules/slack/python2_vendor/python-slack-sdk-1/tests/data/im.created.json
similarity index 100%
rename from openpype/modules/slack/python2_vendor/python-slack-sdk-1/tests/data/im.created.json
rename to openpype/modules/default_modules/slack/python2_vendor/python-slack-sdk-1/tests/data/im.created.json
diff --git a/openpype/modules/slack/python2_vendor/python-slack-sdk-1/tests/data/rtm.start.json b/openpype/modules/default_modules/slack/python2_vendor/python-slack-sdk-1/tests/data/rtm.start.json
similarity index 100%
rename from openpype/modules/slack/python2_vendor/python-slack-sdk-1/tests/data/rtm.start.json
rename to openpype/modules/default_modules/slack/python2_vendor/python-slack-sdk-1/tests/data/rtm.start.json
diff --git a/openpype/modules/slack/python2_vendor/python-slack-sdk-1/tests/data/slack_logo.png b/openpype/modules/default_modules/slack/python2_vendor/python-slack-sdk-1/tests/data/slack_logo.png
similarity index 100%
rename from openpype/modules/slack/python2_vendor/python-slack-sdk-1/tests/data/slack_logo.png
rename to openpype/modules/default_modules/slack/python2_vendor/python-slack-sdk-1/tests/data/slack_logo.png
diff --git a/openpype/modules/slack/python2_vendor/python-slack-sdk-1/tests/test_channel.py b/openpype/modules/default_modules/slack/python2_vendor/python-slack-sdk-1/tests/test_channel.py
similarity index 100%
rename from openpype/modules/slack/python2_vendor/python-slack-sdk-1/tests/test_channel.py
rename to openpype/modules/default_modules/slack/python2_vendor/python-slack-sdk-1/tests/test_channel.py
diff --git a/openpype/modules/slack/python2_vendor/python-slack-sdk-1/tests/test_server.py b/openpype/modules/default_modules/slack/python2_vendor/python-slack-sdk-1/tests/test_server.py
similarity index 100%
rename from openpype/modules/slack/python2_vendor/python-slack-sdk-1/tests/test_server.py
rename to openpype/modules/default_modules/slack/python2_vendor/python-slack-sdk-1/tests/test_server.py
diff --git a/openpype/modules/slack/python2_vendor/python-slack-sdk-1/tests/test_slackclient.py b/openpype/modules/default_modules/slack/python2_vendor/python-slack-sdk-1/tests/test_slackclient.py
similarity index 100%
rename from openpype/modules/slack/python2_vendor/python-slack-sdk-1/tests/test_slackclient.py
rename to openpype/modules/default_modules/slack/python2_vendor/python-slack-sdk-1/tests/test_slackclient.py
diff --git a/openpype/modules/slack/python2_vendor/python-slack-sdk-1/tests/test_slackrequest.py b/openpype/modules/default_modules/slack/python2_vendor/python-slack-sdk-1/tests/test_slackrequest.py
similarity index 100%
rename from openpype/modules/slack/python2_vendor/python-slack-sdk-1/tests/test_slackrequest.py
rename to openpype/modules/default_modules/slack/python2_vendor/python-slack-sdk-1/tests/test_slackrequest.py
diff --git a/openpype/modules/slack/python2_vendor/python-slack-sdk-1/tox.ini b/openpype/modules/default_modules/slack/python2_vendor/python-slack-sdk-1/tox.ini
similarity index 100%
rename from openpype/modules/slack/python2_vendor/python-slack-sdk-1/tox.ini
rename to openpype/modules/default_modules/slack/python2_vendor/python-slack-sdk-1/tox.ini
diff --git a/openpype/modules/slack/slack_module.py b/openpype/modules/default_modules/slack/slack_module.py
similarity index 80%
rename from openpype/modules/slack/slack_module.py
rename to openpype/modules/default_modules/slack/slack_module.py
index 9dd5a3d02b..e3f7b4ad19 100644
--- a/openpype/modules/slack/slack_module.py
+++ b/openpype/modules/default_modules/slack/slack_module.py
@@ -1,11 +1,14 @@
import os
-from openpype.modules import (
- PypeModule, IPluginPaths, ILaunchHookPaths)
+from openpype.modules import OpenPypeModule
+from openpype_interfaces import (
+ IPluginPaths,
+ ILaunchHookPaths
+)
SLACK_MODULE_DIR = os.path.dirname(os.path.abspath(__file__))
-class SlackIntegrationModule(PypeModule, IPluginPaths, ILaunchHookPaths):
+class SlackIntegrationModule(OpenPypeModule, IPluginPaths, ILaunchHookPaths):
"""Allows sending notification to Slack channels during publishing."""
name = "slack"
diff --git a/openpype/modules/standalonepublish_action.py b/openpype/modules/default_modules/standalonepublish_action.py
similarity index 90%
rename from openpype/modules/standalonepublish_action.py
rename to openpype/modules/default_modules/standalonepublish_action.py
index 4f87f9704c..9321a415a9 100644
--- a/openpype/modules/standalonepublish_action.py
+++ b/openpype/modules/default_modules/standalonepublish_action.py
@@ -2,10 +2,11 @@ import os
import platform
import subprocess
from openpype.lib import get_pype_execute_args
-from . import PypeModule, ITrayAction
+from openpype.modules import OpenPypeModule
+from openpype_interfaces import ITrayAction
-class StandAlonePublishAction(PypeModule, ITrayAction):
+class StandAlonePublishAction(OpenPypeModule, ITrayAction):
label = "Publish"
name = "standalonepublish_tool"
diff --git a/openpype/modules/sync_server/README.md b/openpype/modules/default_modules/sync_server/README.md
similarity index 100%
rename from openpype/modules/sync_server/README.md
rename to openpype/modules/default_modules/sync_server/README.md
diff --git a/openpype/modules/default_modules/sync_server/__init__.py b/openpype/modules/default_modules/sync_server/__init__.py
new file mode 100644
index 0000000000..430ab53c91
--- /dev/null
+++ b/openpype/modules/default_modules/sync_server/__init__.py
@@ -0,0 +1,6 @@
+from .sync_server_module import SyncServerModule
+
+
+__all__ = (
+ "SyncServerModule",
+)
diff --git a/openpype/modules/default_modules/sync_server/providers/__init__.py b/openpype/modules/default_modules/sync_server/providers/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
diff --git a/openpype/modules/sync_server/providers/abstract_provider.py b/openpype/modules/default_modules/sync_server/providers/abstract_provider.py
similarity index 100%
rename from openpype/modules/sync_server/providers/abstract_provider.py
rename to openpype/modules/default_modules/sync_server/providers/abstract_provider.py
diff --git a/openpype/modules/sync_server/providers/gdrive.py b/openpype/modules/default_modules/sync_server/providers/gdrive.py
similarity index 100%
rename from openpype/modules/sync_server/providers/gdrive.py
rename to openpype/modules/default_modules/sync_server/providers/gdrive.py
diff --git a/openpype/modules/sync_server/providers/lib.py b/openpype/modules/default_modules/sync_server/providers/lib.py
similarity index 100%
rename from openpype/modules/sync_server/providers/lib.py
rename to openpype/modules/default_modules/sync_server/providers/lib.py
diff --git a/openpype/modules/sync_server/providers/local_drive.py b/openpype/modules/default_modules/sync_server/providers/local_drive.py
similarity index 100%
rename from openpype/modules/sync_server/providers/local_drive.py
rename to openpype/modules/default_modules/sync_server/providers/local_drive.py
diff --git a/openpype/modules/sync_server/providers/resources/folder.png b/openpype/modules/default_modules/sync_server/providers/resources/folder.png
similarity index 100%
rename from openpype/modules/sync_server/providers/resources/folder.png
rename to openpype/modules/default_modules/sync_server/providers/resources/folder.png
diff --git a/openpype/modules/sync_server/providers/resources/gdrive.png b/openpype/modules/default_modules/sync_server/providers/resources/gdrive.png
similarity index 100%
rename from openpype/modules/sync_server/providers/resources/gdrive.png
rename to openpype/modules/default_modules/sync_server/providers/resources/gdrive.png
diff --git a/openpype/modules/sync_server/providers/resources/local_drive.png b/openpype/modules/default_modules/sync_server/providers/resources/local_drive.png
similarity index 100%
rename from openpype/modules/sync_server/providers/resources/local_drive.png
rename to openpype/modules/default_modules/sync_server/providers/resources/local_drive.png
diff --git a/openpype/modules/sync_server/providers/resources/studio.png b/openpype/modules/default_modules/sync_server/providers/resources/studio.png
similarity index 100%
rename from openpype/modules/sync_server/providers/resources/studio.png
rename to openpype/modules/default_modules/sync_server/providers/resources/studio.png
diff --git a/openpype/modules/sync_server/resources/paused.png b/openpype/modules/default_modules/sync_server/resources/paused.png
similarity index 100%
rename from openpype/modules/sync_server/resources/paused.png
rename to openpype/modules/default_modules/sync_server/resources/paused.png
diff --git a/openpype/modules/sync_server/resources/synced.png b/openpype/modules/default_modules/sync_server/resources/synced.png
similarity index 100%
rename from openpype/modules/sync_server/resources/synced.png
rename to openpype/modules/default_modules/sync_server/resources/synced.png
diff --git a/openpype/modules/sync_server/sync_server.py b/openpype/modules/default_modules/sync_server/sync_server.py
similarity index 100%
rename from openpype/modules/sync_server/sync_server.py
rename to openpype/modules/default_modules/sync_server/sync_server.py
diff --git a/openpype/modules/sync_server/sync_server_module.py b/openpype/modules/default_modules/sync_server/sync_server_module.py
similarity index 99%
rename from openpype/modules/sync_server/sync_server_module.py
rename to openpype/modules/default_modules/sync_server/sync_server_module.py
index 15de4b12e9..e65a410551 100644
--- a/openpype/modules/sync_server/sync_server_module.py
+++ b/openpype/modules/default_modules/sync_server/sync_server_module.py
@@ -7,7 +7,8 @@ import copy
from avalon.api import AvalonMongoDB
-from .. import PypeModule, ITrayModule
+from openpype.modules import OpenPypeModule
+from openpype_interfaces import ITrayModule
from openpype.api import (
Anatomy,
get_project_settings,
@@ -28,7 +29,7 @@ from .utils import time_function, SyncStatus, EditableScopes
log = PypeLogger().get_logger("SyncServer")
-class SyncServerModule(PypeModule, ITrayModule):
+class SyncServerModule(OpenPypeModule, ITrayModule):
"""
Synchronization server that is syncing published files from local to
any of implemented providers (like GDrive, S3 etc.)
diff --git a/openpype/modules/sync_server/tray/app.py b/openpype/modules/default_modules/sync_server/tray/app.py
similarity index 98%
rename from openpype/modules/sync_server/tray/app.py
rename to openpype/modules/default_modules/sync_server/tray/app.py
index dd2b4be749..106076d81c 100644
--- a/openpype/modules/sync_server/tray/app.py
+++ b/openpype/modules/default_modules/sync_server/tray/app.py
@@ -5,7 +5,7 @@ from openpype.tools.settings import style
from openpype.lib import PypeLogger
from openpype import resources
-from openpype.modules.sync_server.tray.widgets import (
+from .widgets import (
SyncProjectListWidget,
SyncRepresentationSummaryWidget
)
diff --git a/openpype/modules/sync_server/tray/delegates.py b/openpype/modules/default_modules/sync_server/tray/delegates.py
similarity index 98%
rename from openpype/modules/sync_server/tray/delegates.py
rename to openpype/modules/default_modules/sync_server/tray/delegates.py
index 9316ec2c3e..461b9fffb3 100644
--- a/openpype/modules/sync_server/tray/delegates.py
+++ b/openpype/modules/default_modules/sync_server/tray/delegates.py
@@ -2,7 +2,7 @@ import os
from Qt import QtCore, QtWidgets, QtGui
from openpype.lib import PypeLogger
-from openpype.modules.sync_server.tray import lib
+from . import lib
log = PypeLogger().get_logger("SyncServer")
diff --git a/openpype/modules/sync_server/tray/lib.py b/openpype/modules/default_modules/sync_server/tray/lib.py
similarity index 100%
rename from openpype/modules/sync_server/tray/lib.py
rename to openpype/modules/default_modules/sync_server/tray/lib.py
diff --git a/openpype/modules/sync_server/tray/models.py b/openpype/modules/default_modules/sync_server/tray/models.py
similarity index 99%
rename from openpype/modules/sync_server/tray/models.py
rename to openpype/modules/default_modules/sync_server/tray/models.py
index efef039b8b..8c86d3b98f 100644
--- a/openpype/modules/sync_server/tray/models.py
+++ b/openpype/modules/default_modules/sync_server/tray/models.py
@@ -11,7 +11,7 @@ from avalon.vendor import qtawesome
from openpype.lib import PypeLogger
from openpype.api import get_local_site_id
-from openpype.modules.sync_server.tray import lib
+from . import lib
log = PypeLogger().get_logger("SyncServer")
diff --git a/openpype/modules/sync_server/tray/widgets.py b/openpype/modules/default_modules/sync_server/tray/widgets.py
similarity index 99%
rename from openpype/modules/sync_server/tray/widgets.py
rename to openpype/modules/default_modules/sync_server/tray/widgets.py
index d38416fbce..c9160733a0 100644
--- a/openpype/modules/sync_server/tray/widgets.py
+++ b/openpype/modules/default_modules/sync_server/tray/widgets.py
@@ -17,13 +17,13 @@ from openpype.lib import PypeLogger
from avalon.tools.delegates import pretty_timestamp
from avalon.vendor import qtawesome
-from openpype.modules.sync_server.tray.models import (
+from .models import (
SyncRepresentationSummaryModel,
SyncRepresentationDetailModel
)
-from openpype.modules.sync_server.tray import lib
-from openpype.modules.sync_server.tray import delegates
+from . import lib
+from . import delegates
log = PypeLogger().get_logger("SyncServer")
@@ -187,7 +187,7 @@ class _SyncRepresentationWidget(QtWidgets.QWidget):
detail_window = SyncServerDetailWindow(
self.sync_server, _id, self.model.project, parent=self)
detail_window.exec()
-
+
def _on_context_menu(self, point):
"""
Shows menu with loader actions on Right-click.
diff --git a/openpype/modules/sync_server/utils.py b/openpype/modules/default_modules/sync_server/utils.py
similarity index 100%
rename from openpype/modules/sync_server/utils.py
rename to openpype/modules/default_modules/sync_server/utils.py
diff --git a/openpype/modules/timers_manager/__init__.py b/openpype/modules/default_modules/timers_manager/__init__.py
similarity index 51%
rename from openpype/modules/timers_manager/__init__.py
rename to openpype/modules/default_modules/timers_manager/__init__.py
index 1b565cc59a..5d7a4166d3 100644
--- a/openpype/modules/timers_manager/__init__.py
+++ b/openpype/modules/default_modules/timers_manager/__init__.py
@@ -1,9 +1,7 @@
from .timers_manager import (
- ITimersManager,
TimersManager
)
__all__ = (
- "ITimersManager",
- "TimersManager"
+ "TimersManager",
)
diff --git a/openpype/modules/default_modules/timers_manager/interfaces.py b/openpype/modules/default_modules/timers_manager/interfaces.py
new file mode 100644
index 0000000000..179013cffe
--- /dev/null
+++ b/openpype/modules/default_modules/timers_manager/interfaces.py
@@ -0,0 +1,26 @@
+from abc import abstractmethod
+from openpype.modules import OpenPypeInterface
+
+
+class ITimersManager(OpenPypeInterface):
+ timer_manager_module = None
+
+ @abstractmethod
+ def stop_timer(self):
+ pass
+
+ @abstractmethod
+ def start_timer(self, data):
+ pass
+
+ def timer_started(self, data):
+ if not self.timer_manager_module:
+ return
+
+ self.timer_manager_module.timer_started(self.id, data)
+
+ def timer_stopped(self):
+ if not self.timer_manager_module:
+ return
+
+ self.timer_manager_module.timer_stopped(self.id)
diff --git a/openpype/modules/timers_manager/rest_api.py b/openpype/modules/default_modules/timers_manager/rest_api.py
similarity index 100%
rename from openpype/modules/timers_manager/rest_api.py
rename to openpype/modules/default_modules/timers_manager/rest_api.py
diff --git a/openpype/modules/timers_manager/timers_manager.py b/openpype/modules/default_modules/timers_manager/timers_manager.py
similarity index 90%
rename from openpype/modules/timers_manager/timers_manager.py
rename to openpype/modules/default_modules/timers_manager/timers_manager.py
index 92edd5aeaa..80f448095f 100644
--- a/openpype/modules/timers_manager/timers_manager.py
+++ b/openpype/modules/default_modules/timers_manager/timers_manager.py
@@ -1,37 +1,18 @@
import os
import collections
-from abc import ABCMeta, abstractmethod
-import six
-from .. import PypeModule, ITrayService, IIdleManager, IWebServerRoutes
+from openpype.modules import OpenPypeModule
+from openpype_interfaces import (
+ ITimersManager,
+ ITrayService,
+ IIdleManager,
+ IWebServerRoutes
+)
from avalon.api import AvalonMongoDB
-@six.add_metaclass(ABCMeta)
-class ITimersManager:
- timer_manager_module = None
-
- @abstractmethod
- def stop_timer(self):
- pass
-
- @abstractmethod
- def start_timer(self, data):
- pass
-
- def timer_started(self, data):
- if not self.timer_manager_module:
- return
-
- self.timer_manager_module.timer_started(self.id, data)
-
- def timer_stopped(self):
- if not self.timer_manager_module:
- return
-
- self.timer_manager_module.timer_stopped(self.id)
-
-
-class TimersManager(PypeModule, ITrayService, IIdleManager, IWebServerRoutes):
+class TimersManager(
+ OpenPypeModule, ITrayService, IIdleManager, IWebServerRoutes
+):
""" Handles about Timers.
Should be able to start/stop all timers at once.
diff --git a/openpype/modules/timers_manager/widget_user_idle.py b/openpype/modules/default_modules/timers_manager/widget_user_idle.py
similarity index 100%
rename from openpype/modules/timers_manager/widget_user_idle.py
rename to openpype/modules/default_modules/timers_manager/widget_user_idle.py
diff --git a/openpype/modules/webserver/__init__.py b/openpype/modules/default_modules/webserver/__init__.py
similarity index 52%
rename from openpype/modules/webserver/__init__.py
rename to openpype/modules/default_modules/webserver/__init__.py
index defd115e57..899b97d6d4 100644
--- a/openpype/modules/webserver/__init__.py
+++ b/openpype/modules/default_modules/webserver/__init__.py
@@ -1,10 +1,8 @@
from .webserver_module import (
- WebServerModule,
- IWebServerRoutes
+ WebServerModule
)
__all__ = (
"WebServerModule",
- "IWebServerRoutes"
)
diff --git a/openpype/modules/webserver/base_routes.py b/openpype/modules/default_modules/webserver/base_routes.py
similarity index 100%
rename from openpype/modules/webserver/base_routes.py
rename to openpype/modules/default_modules/webserver/base_routes.py
diff --git a/openpype/modules/webserver/host_console_listener.py b/openpype/modules/default_modules/webserver/host_console_listener.py
similarity index 99%
rename from openpype/modules/webserver/host_console_listener.py
rename to openpype/modules/default_modules/webserver/host_console_listener.py
index 01a8af643e..bcf4cadf6a 100644
--- a/openpype/modules/webserver/host_console_listener.py
+++ b/openpype/modules/default_modules/webserver/host_console_listener.py
@@ -5,7 +5,7 @@ import logging
from concurrent.futures import CancelledError
from Qt import QtWidgets
-from openpype.modules import ITrayService
+from openpype_interfaces import ITrayService
log = logging.getLogger(__name__)
diff --git a/openpype/modules/default_modules/webserver/interfaces.py b/openpype/modules/default_modules/webserver/interfaces.py
new file mode 100644
index 0000000000..779361a9ec
--- /dev/null
+++ b/openpype/modules/default_modules/webserver/interfaces.py
@@ -0,0 +1,9 @@
+from abc import abstractmethod
+from openpype.modules import OpenPypeInterface
+
+
+class IWebServerRoutes(OpenPypeInterface):
+ """Other modules interface to register their routes."""
+ @abstractmethod
+ def webserver_initialization(self, server_manager):
+ pass
diff --git a/openpype/modules/webserver/server.py b/openpype/modules/default_modules/webserver/server.py
similarity index 92%
rename from openpype/modules/webserver/server.py
rename to openpype/modules/default_modules/webserver/server.py
index 65c5795995..83a29e074e 100644
--- a/openpype/modules/webserver/server.py
+++ b/openpype/modules/default_modules/webserver/server.py
@@ -10,8 +10,9 @@ log = PypeLogger.get_logger("WebServer")
class WebServerManager:
"""Manger that care about web server thread."""
- def __init__(self, module):
- self.module = module
+ def __init__(self, port=None, host=None):
+ self.port = port or 8079
+ self.host = host or "localhost"
self.client = None
self.handlers = {}
@@ -24,8 +25,8 @@ class WebServerManager:
self.webserver_thread = WebServerThread(self)
@property
- def port(self):
- return self.module.port
+ def url(self):
+ return "http://{}:{}".format(self.host, self.port)
def add_route(self, *args, **kwargs):
self.app.router.add_route(*args, **kwargs)
@@ -78,6 +79,10 @@ class WebServerThread(threading.Thread):
def port(self):
return self.manager.port
+ @property
+ def host(self):
+ return self.manager.host
+
def run(self):
self.is_running = True
@@ -110,7 +115,7 @@ class WebServerThread(threading.Thread):
""" Starts runner and TCPsite """
self.runner = web.AppRunner(self.manager.app)
await self.runner.setup()
- self.site = web.TCPSite(self.runner, 'localhost', self.port)
+ self.site = web.TCPSite(self.runner, self.host, self.port)
await self.site.start()
def stop(self):
diff --git a/openpype/modules/webserver/webserver_module.py b/openpype/modules/default_modules/webserver/webserver_module.py
similarity index 77%
rename from openpype/modules/webserver/webserver_module.py
rename to openpype/modules/default_modules/webserver/webserver_module.py
index b61619acde..5bfb2d6390 100644
--- a/openpype/modules/webserver/webserver_module.py
+++ b/openpype/modules/default_modules/webserver/webserver_module.py
@@ -1,31 +1,27 @@
import os
import socket
-from abc import ABCMeta, abstractmethod
-
-import six
from openpype import resources
-from .. import PypeModule, ITrayService
+from openpype.modules import OpenPypeModule
+from openpype_interfaces import (
+ ITrayService,
+ IWebServerRoutes
+)
-@six.add_metaclass(ABCMeta)
-class IWebServerRoutes:
- """Other modules interface to register their routes."""
- @abstractmethod
- def webserver_initialization(self, server_manager):
- pass
-
-
-class WebServerModule(PypeModule, ITrayService):
+class WebServerModule(OpenPypeModule, ITrayService):
name = "webserver"
label = "WebServer"
+ webserver_url_env = "OPENPYPE_WEBSERVER_URL"
+
def initialize(self, _module_settings):
self.enabled = True
self.server_manager = None
self._host_listener = None
self.port = self.find_free_port()
+ self.webserver_url = None
def connect_with_modules(self, enabled_modules):
if not self.server_manager:
@@ -50,14 +46,12 @@ class WebServerModule(PypeModule, ITrayService):
static_prefix = "/res"
self.server_manager.add_static(static_prefix, resources.RESOURCES_DIR)
- webserver_url = "http://localhost:{}".format(self.port)
- os.environ["OPENPYPE_WEBSERVER_URL"] = webserver_url
os.environ["OPENPYPE_STATICS_SERVER"] = "{}{}".format(
- webserver_url, static_prefix
+ self.webserver_url, static_prefix
)
def _add_listeners(self):
- from openpype.modules.webserver import host_console_listener
+ from openpype_modules.webserver import host_console_listener
self._host_listener = host_console_listener.HostListener(
self.server_manager, self
@@ -71,17 +65,34 @@ class WebServerModule(PypeModule, ITrayService):
if self.server_manager:
self.server_manager.stop_server()
+ @staticmethod
+ def create_new_server_manager(port=None, host=None):
+ """Create webserver manager for passed port and host.
+
+ Args:
+ port(int): Port on which wil webserver listen.
+ host(str): Host name or IP address. Default is 'localhost'.
+
+ Returns:
+ WebServerManager: Prepared manager.
+ """
+ from .server import WebServerManager
+
+ return WebServerManager(port, host)
+
def create_server_manager(self):
if self.server_manager:
return
- from .server import WebServerManager
-
- self.server_manager = WebServerManager(self)
+ self.server_manager = self.create_new_server_manager(self.port)
self.server_manager.on_stop_callbacks.append(
self.set_service_failed_icon
)
+ webserver_url = self.server_manager.url
+ os.environ[self.webserver_url_env] = str(webserver_url)
+ self.webserver_url = webserver_url
+
@staticmethod
def find_free_port(
port_from=None, port_to=None, exclude_ports=None, host=None
diff --git a/openpype/modules/sync_server/__init__.py b/openpype/modules/sync_server/__init__.py
deleted file mode 100644
index a814f0db62..0000000000
--- a/openpype/modules/sync_server/__init__.py
+++ /dev/null
@@ -1,5 +0,0 @@
-from openpype.modules.sync_server.sync_server_module import SyncServerModule
-
-
-def tray_init(tray_widget, main_widget):
- return SyncServerModule()
diff --git a/openpype/plugins/publish/collect_anatomy_context_data.py b/openpype/plugins/publish/collect_anatomy_context_data.py
index f121760e27..ec88d5669d 100644
--- a/openpype/plugins/publish/collect_anatomy_context_data.py
+++ b/openpype/plugins/publish/collect_anatomy_context_data.py
@@ -62,23 +62,10 @@ class CollectAnatomyContextData(pyblish.api.ContextPlugin):
"asset": asset_entity["name"],
"hierarchy": hierarchy.replace("\\", "/"),
"task": task_name,
- "username": context.data["user"]
+ "username": context.data["user"],
+ "app": context.data["hostName"]
}
- # Use AVALON_APP as first if available it is the same as host name
- # - only if is not defined use AVALON_APP_NAME (e.g. on Farm) and
- # set it back to AVALON_APP env variable
- host_name = os.environ.get("AVALON_APP")
- if not host_name:
- app_manager = ApplicationManager()
- app_name = os.environ.get("AVALON_APP_NAME")
- if app_name:
- app = app_manager.applications.get(app_name)
- if app:
- host_name = app.host_name
- os.environ["AVALON_APP"] = host_name
- context_data["app"] = host_name
-
datetime_data = context.data.get("datetimeData") or {}
context_data.update(datetime_data)
diff --git a/openpype/plugins/publish/collect_host_name.py b/openpype/plugins/publish/collect_host_name.py
new file mode 100644
index 0000000000..b731e3ed26
--- /dev/null
+++ b/openpype/plugins/publish/collect_host_name.py
@@ -0,0 +1,37 @@
+"""
+Requires:
+ None
+Provides:
+ context -> host (str)
+"""
+import os
+import pyblish.api
+
+from openpype.lib import ApplicationManager
+
+
+class CollectHostName(pyblish.api.ContextPlugin):
+ """Collect avalon host name to context."""
+
+ label = "Collect Host Name"
+ order = pyblish.api.CollectorOrder - 0.5
+
+ def process(self, context):
+ host_name = context.data.get("hostName")
+ # Don't override value if is already set
+ if host_name:
+ return
+
+ # Use AVALON_APP as first if available it is the same as host name
+ # - only if is not defined use AVALON_APP_NAME (e.g. on Farm) and
+ # set it back to AVALON_APP env variable
+ host_name = os.environ.get("AVALON_APP")
+ if not host_name:
+ app_name = os.environ.get("AVALON_APP_NAME")
+ if app_name:
+ app_manager = ApplicationManager()
+ app = app_manager.applications.get(app_name)
+ if app:
+ host_name = app.host_name
+
+ context.data["hostName"] = host_name
diff --git a/openpype/plugins/publish/collect_modules.py b/openpype/plugins/publish/collect_modules.py
new file mode 100644
index 0000000000..bec0c2b436
--- /dev/null
+++ b/openpype/plugins/publish/collect_modules.py
@@ -0,0 +1,15 @@
+# -*- coding: utf-8 -*-
+"""Collect OpenPype modules."""
+from openpype.modules import ModulesManager
+import pyblish.api
+
+
+class CollectModules(pyblish.api.ContextPlugin):
+ """Collect OpenPype modules."""
+
+ order = pyblish.api.CollectorOrder
+ label = "OpenPype Modules"
+
+ def process(self, context):
+ manager = ModulesManager()
+ context.data["openPypeModules"] = manager.modules_by_name
diff --git a/openpype/plugins/publish/collect_scene_version.py b/openpype/plugins/publish/collect_scene_version.py
index 669e6752f3..ca12f2900c 100644
--- a/openpype/plugins/publish/collect_scene_version.py
+++ b/openpype/plugins/publish/collect_scene_version.py
@@ -11,14 +11,22 @@ class CollectSceneVersion(pyblish.api.ContextPlugin):
order = pyblish.api.CollectorOrder
label = 'Collect Version'
+ hosts = [
+ "aftereffects",
+ "blender",
+ "celaction",
+ "fusion",
+ "harmony",
+ "hiero",
+ "houdini",
+ "maya",
+ "nuke",
+ "photoshop",
+ "resolve",
+ "tvpaint"
+ ]
def process(self, context):
- if "standalonepublisher" in context.data.get("host", []):
- return
-
- if "unreal" in pyblish.api.registered_hosts():
- return
-
assert context.data.get('currentFile'), "Cannot get current file"
filename = os.path.basename(context.data.get('currentFile'))
diff --git a/openpype/plugins/publish/extract_burnin.py b/openpype/plugins/publish/extract_burnin.py
index 91e0a0f3ec..625125321c 100644
--- a/openpype/plugins/publish/extract_burnin.py
+++ b/openpype/plugins/publish/extract_burnin.py
@@ -45,6 +45,7 @@ class ExtractBurnin(openpype.api.Extractor):
"fusion",
"aftereffects",
"tvpaint",
+ "webpublisher",
"aftereffects"
# "resolve"
]
@@ -96,7 +97,7 @@ class ExtractBurnin(openpype.api.Extractor):
def main_process(self, instance):
# TODO get these data from context
- host_name = os.environ["AVALON_APP"]
+ host_name = instance.context.data["hostName"]
task_name = os.environ["AVALON_TASK"]
family = self.main_family_from_instance(instance)
diff --git a/openpype/plugins/publish/extract_jpeg.py b/openpype/plugins/publish/extract_jpeg_exr.py
similarity index 96%
rename from openpype/plugins/publish/extract_jpeg.py
rename to openpype/plugins/publish/extract_jpeg_exr.py
index b1289217e6..ae691285b5 100644
--- a/openpype/plugins/publish/extract_jpeg.py
+++ b/openpype/plugins/publish/extract_jpeg_exr.py
@@ -17,7 +17,7 @@ class ExtractJpegEXR(pyblish.api.InstancePlugin):
"imagesequence", "render", "render2d",
"source", "plate", "take"
]
- hosts = ["shell", "fusion", "resolve"]
+ hosts = ["shell", "fusion", "resolve", "webpublisher"]
enabled = False
# presetable attribute
@@ -95,7 +95,7 @@ class ExtractJpegEXR(pyblish.api.InstancePlugin):
# use same input args like with mov
jpeg_items.extend(ffmpeg_args.get("input") or [])
# input file
- jpeg_items.append("-i {}".format(full_input_path))
+ jpeg_items.append("-i \"{}\"".format(full_input_path))
# output arguments from presets
jpeg_items.extend(ffmpeg_args.get("output") or [])
@@ -104,7 +104,7 @@ class ExtractJpegEXR(pyblish.api.InstancePlugin):
jpeg_items.append("-vframes 1")
# output file
- jpeg_items.append(full_output_path)
+ jpeg_items.append("\"{}\"".format(full_output_path))
subprocess_jpeg = " ".join(jpeg_items)
diff --git a/openpype/plugins/publish/extract_review.py b/openpype/plugins/publish/extract_review.py
index bdcd3b8e60..78cbea10be 100644
--- a/openpype/plugins/publish/extract_review.py
+++ b/openpype/plugins/publish/extract_review.py
@@ -45,6 +45,7 @@ class ExtractReview(pyblish.api.InstancePlugin):
"fusion",
"tvpaint",
"resolve",
+ "webpublisher",
"aftereffects"
]
@@ -89,7 +90,7 @@ class ExtractReview(pyblish.api.InstancePlugin):
instance.data["representations"].remove(repre)
def main_process(self, instance):
- host_name = os.environ["AVALON_APP"]
+ host_name = instance.context.data["hostName"]
task_name = os.environ["AVALON_TASK"]
family = self.main_family_from_instance(instance)
diff --git a/openpype/plugins/publish/integrate_new.py b/openpype/plugins/publish/integrate_new.py
index 3504206fe1..f9e9b43f08 100644
--- a/openpype/plugins/publish/integrate_new.py
+++ b/openpype/plugins/publish/integrate_new.py
@@ -97,7 +97,8 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin):
"background",
"camerarig",
"redshiftproxy",
- "effect"
+ "effect",
+ "xgen"
]
exclude_families = ["clip"]
db_representation_context_keys = [
@@ -300,7 +301,9 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin):
task_name = io.Session.get("AVALON_TASK")
family = self.main_family_from_instance(instance)
- key_values = {"families": family, "tasks": task_name}
+ key_values = {"families": family,
+ "tasks": task_name,
+ "hosts": instance.data["anatomyData"]["app"]}
profile = filter_profiles(self.template_name_profiles, key_values,
logger=self.log)
diff --git a/openpype/plugins/publish/start_timer.py b/openpype/plugins/publish/start_timer.py
new file mode 100644
index 0000000000..6312294bf1
--- /dev/null
+++ b/openpype/plugins/publish/start_timer.py
@@ -0,0 +1,15 @@
+import pyblish.api
+
+from openpype.api import get_system_settings
+from openpype.lib import change_timer_to_current_context
+
+
+class StartTimer(pyblish.api.ContextPlugin):
+ label = "Start Timer"
+ order = pyblish.api.IntegratorOrder + 1
+ hosts = ["*"]
+
+ def process(self, context):
+ modules_settings = get_system_settings()["modules"]
+ if modules_settings["timers_manager"]["disregard_publishing"]:
+ change_timer_to_current_context()
diff --git a/openpype/plugins/publish/stop_timer.py b/openpype/plugins/publish/stop_timer.py
new file mode 100644
index 0000000000..81afd16378
--- /dev/null
+++ b/openpype/plugins/publish/stop_timer.py
@@ -0,0 +1,19 @@
+import os
+import requests
+
+import pyblish.api
+
+from openpype.api import get_system_settings
+
+
+class StopTimer(pyblish.api.ContextPlugin):
+ label = "Stop Timer"
+ order = pyblish.api.ExtractorOrder - 0.5
+ hosts = ["*"]
+
+ def process(self, context):
+ modules_settings = get_system_settings()["modules"]
+ if modules_settings["timers_manager"]["disregard_publishing"]:
+ webserver_url = os.environ.get("OPENPYPE_WEBSERVER_URL")
+ rest_api_url = "{}/timers_manager/stop_timer".format(webserver_url)
+ requests.post(rest_api_url)
diff --git a/openpype/pype_commands.py b/openpype/pype_commands.py
index 7c47d8c613..c18fe36667 100644
--- a/openpype/pype_commands.py
+++ b/openpype/pype_commands.py
@@ -3,7 +3,7 @@
import os
import sys
import json
-from pathlib import Path
+from datetime import datetime
from openpype.lib import PypeLogger
from openpype.api import get_app_environments_for_context
@@ -35,11 +35,17 @@ class PypeCommands:
@staticmethod
def launch_eventservercli(*args):
- from openpype.modules.ftrack.ftrack_server.event_server_cli import (
+ from openpype_modules.ftrack.ftrack_server.event_server_cli import (
run_event_server
)
return run_event_server(*args)
+ @staticmethod
+ def launch_webpublisher_webservercli(*args, **kwargs):
+ from openpype.hosts.webpublisher.webserver_service.webserver_cli \
+ import (run_webserver)
+ return run_webserver(*args, **kwargs)
+
@staticmethod
def launch_standalone_publisher():
from openpype.tools import standalonepublish
@@ -104,6 +110,123 @@ class PypeCommands:
log.info("Publish finished.")
uninstall()
+ @staticmethod
+ def remotepublish(project, batch_path, host, user, targets=None):
+ """Start headless publishing.
+
+ Publish use json from passed paths argument.
+
+ Args:
+ project (str): project to publish (only single context is expected
+ per call of remotepublish
+ batch_path (str): Path batch folder. Contains subfolders with
+ resources (workfile, another subfolder 'renders' etc.)
+ targets (string): What module should be targeted
+ (to choose validator for example)
+ host (string)
+ user (string): email address for webpublisher
+
+ Raises:
+ RuntimeError: When there is no path to process.
+ """
+ if not batch_path:
+ raise RuntimeError("No publish paths specified")
+
+ from openpype import install, uninstall
+ from openpype.api import Logger
+ from openpype.lib import OpenPypeMongoConnection
+
+ # Register target and host
+ import pyblish.api
+ import pyblish.util
+
+ log = Logger.get_logger()
+
+ log.info("remotepublish command")
+
+ install()
+
+ if host:
+ pyblish.api.register_host(host)
+
+ if targets:
+ if isinstance(targets, str):
+ targets = [targets]
+ for target in targets:
+ pyblish.api.register_target(target)
+
+ os.environ["OPENPYPE_PUBLISH_DATA"] = batch_path
+ os.environ["AVALON_PROJECT"] = project
+ os.environ["AVALON_APP"] = host
+
+ import avalon.api
+ from openpype.hosts.webpublisher import api as webpublisher
+
+ avalon.api.install(webpublisher)
+
+ log.info("Running publish ...")
+
+ # Error exit as soon as any error occurs.
+ error_format = "Failed {plugin.__name__}: {error} -- {error.traceback}"
+
+ mongo_client = OpenPypeMongoConnection.get_mongo_client()
+ database_name = os.environ["OPENPYPE_DATABASE_NAME"]
+ dbcon = mongo_client[database_name]["webpublishes"]
+
+ _, batch_id = os.path.split(batch_path)
+ _id = dbcon.insert_one({
+ "batch_id": batch_id,
+ "start_date": datetime.now(),
+ "user": user,
+ "status": "in_progress"
+ }).inserted_id
+
+ log_lines = []
+ for result in pyblish.util.publish_iter():
+ for record in result["records"]:
+ log_lines.append("{}: {}".format(
+ result["plugin"].label, record.msg))
+
+ if result["error"]:
+ log.error(error_format.format(**result))
+ uninstall()
+ log_lines.append(error_format.format(**result))
+ dbcon.update_one(
+ {"_id": _id},
+ {"$set":
+ {
+ "finish_date": datetime.now(),
+ "status": "error",
+ "log": os.linesep.join(log_lines)
+
+ }}
+ )
+ sys.exit(1)
+ else:
+ dbcon.update_one(
+ {"_id": _id},
+ {"$set":
+ {
+ "progress": max(result["progress"], 0.95),
+ "log": os.linesep.join(log_lines)
+ }}
+ )
+
+ dbcon.update_one(
+ {"_id": _id},
+ {"$set":
+ {
+ "finish_date": datetime.now(),
+ "status": "finished_ok",
+ "progress": 1,
+ "log": os.linesep.join(log_lines)
+ }}
+ )
+
+ log.info("Publish finished.")
+ uninstall()
+
+ @staticmethod
def extractenvironments(output_json_path, project, asset, task, app):
env = os.environ.copy()
if all((project, asset, task, app)):
diff --git a/openpype/settings/defaults/project_settings/deadline.json b/openpype/settings/defaults/project_settings/deadline.json
index efeafbb1ac..9fb964b494 100644
--- a/openpype/settings/defaults/project_settings/deadline.json
+++ b/openpype/settings/defaults/project_settings/deadline.json
@@ -1,4 +1,5 @@
{
+ "deadline_servers": [],
"publish": {
"ValidateExpectedFiles": {
"enabled": true,
diff --git a/openpype/settings/defaults/project_settings/ftrack.json b/openpype/settings/defaults/project_settings/ftrack.json
index 9fa78ac588..692176a585 100644
--- a/openpype/settings/defaults/project_settings/ftrack.json
+++ b/openpype/settings/defaults/project_settings/ftrack.json
@@ -304,7 +304,8 @@
"aftereffects"
],
"families": [
- "render"
+ "render",
+ "workfile"
],
"tasks": [],
"add_ftrack_family": true,
diff --git a/openpype/settings/defaults/project_settings/global.json b/openpype/settings/defaults/project_settings/global.json
index aab8c2196c..a53ae14914 100644
--- a/openpype/settings/defaults/project_settings/global.json
+++ b/openpype/settings/defaults/project_settings/global.json
@@ -151,6 +151,7 @@
"template_name_profiles": [
{
"families": [],
+ "hosts": [],
"tasks": [],
"template_name": "publish"
},
@@ -160,6 +161,7 @@
"render",
"prerender"
],
+ "hosts": [],
"tasks": [],
"template_name": "render"
}
@@ -249,6 +251,13 @@
]
},
"Workfiles": {
+ "workfile_template_profiles": [
+ {
+ "task_types": [],
+ "hosts": [],
+ "workfile_template": "work"
+ }
+ ],
"last_workfile_on_startup": [
{
"hosts": [],
diff --git a/openpype/settings/defaults/project_settings/houdini.json b/openpype/settings/defaults/project_settings/houdini.json
index 811a446e59..809c732d6f 100644
--- a/openpype/settings/defaults/project_settings/houdini.json
+++ b/openpype/settings/defaults/project_settings/houdini.json
@@ -1,4 +1,46 @@
{
+ "create": {
+ "CreateAlembicCamera": {
+ "enabled": true,
+ "defaults": []
+ },
+ "CreateCompositeSequence": {
+ "enabled": true,
+ "defaults": []
+ },
+ "CreatePointCache": {
+ "enabled": true,
+ "defaults": []
+ },
+ "CreateRedshiftROP": {
+ "enabled": true,
+ "defaults": []
+ },
+ "CreateRemotePublish": {
+ "enabled": true,
+ "defaults": []
+ },
+ "CreateVDBCache": {
+ "enabled": true,
+ "defaults": []
+ },
+ "CreateUSD": {
+ "enabled": false,
+ "defaults": []
+ },
+ "CreateUSDModel": {
+ "enabled": false,
+ "defaults": []
+ },
+ "USDCreateShadingWorkspace": {
+ "enabled": false,
+ "defaults": []
+ },
+ "CreateUSDRender": {
+ "enabled": false,
+ "defaults": []
+ }
+ },
"publish": {
"ValidateContainers": {
"enabled": true,
diff --git a/openpype/settings/defaults/project_settings/maya.json b/openpype/settings/defaults/project_settings/maya.json
index 592b424fd8..f9911897d7 100644
--- a/openpype/settings/defaults/project_settings/maya.json
+++ b/openpype/settings/defaults/project_settings/maya.json
@@ -44,6 +44,12 @@
"Main"
]
},
+ "CreateRender": {
+ "enabled": true,
+ "defaults": [
+ "Main"
+ ]
+ },
"CreateAnimation": {
"enabled": true,
"defaults": [
@@ -94,12 +100,6 @@
"Main"
]
},
- "CreateRender": {
- "enabled": true,
- "defaults": [
- "Main"
- ]
- },
"CreateRenderSetup": {
"enabled": true,
"defaults": [
diff --git a/openpype/settings/defaults/project_settings/webpublisher.json b/openpype/settings/defaults/project_settings/webpublisher.json
new file mode 100644
index 0000000000..f57b79a609
--- /dev/null
+++ b/openpype/settings/defaults/project_settings/webpublisher.json
@@ -0,0 +1,120 @@
+{
+ "publish": {
+ "CollectPublishedFiles": {
+ "task_type_to_family": {
+ "Animation": {
+ "workfile": {
+ "is_sequence": false,
+ "extensions": [
+ "tvp"
+ ],
+ "families": [],
+ "tags": [],
+ "subset_template_name": ""
+ },
+ "render": {
+ "is_sequence": true,
+ "extensions": [
+ "png",
+ "exr",
+ "tiff",
+ "tif"
+ ],
+ "families": [
+ "review"
+ ],
+ "tags": [
+ "review"
+ ],
+ "subset_template_name": ""
+ }
+ },
+ "Compositing": {
+ "workfile": {
+ "is_sequence": false,
+ "extensions": [
+ "aep"
+ ],
+ "families": [],
+ "tags": [],
+ "subset_template_name": ""
+ },
+ "render": {
+ "is_sequence": true,
+ "extensions": [
+ "png",
+ "exr",
+ "tiff",
+ "tif"
+ ],
+ "families": [
+ "review"
+ ],
+ "tags": [
+ "review"
+ ],
+ "subset_template_name": ""
+ }
+ },
+ "Layout": {
+ "workfile": {
+ "is_sequence": false,
+ "extensions": [
+ "psd"
+ ],
+ "families": [],
+ "tags": [],
+ "subset_template_name": ""
+ },
+ "image": {
+ "is_sequence": false,
+ "extensions": [
+ "png",
+ "jpg",
+ "jpeg",
+ "tiff",
+ "tif"
+ ],
+ "families": [
+ "review"
+ ],
+ "tags": [
+ "review"
+ ],
+ "subset_template_name": ""
+ }
+ },
+ "default_task_type": {
+ "workfile": {
+ "is_sequence": false,
+ "extensions": [
+ "tvp"
+ ],
+ "families": [],
+ "tags": [],
+ "subset_template_name": "{family}{Variant}"
+ },
+ "render": {
+ "is_sequence": true,
+ "extensions": [
+ "png",
+ "exr",
+ "tiff",
+ "tif"
+ ],
+ "families": [
+ "review"
+ ],
+ "tags": [
+ "review"
+ ],
+ "subset_template_name": "{family}{Variant}"
+ }
+ },
+ "__dynamic_keys_labels__": {
+ "default_task_type": "Default task type"
+ }
+ }
+ }
+ }
+}
\ No newline at end of file
diff --git a/openpype/settings/defaults/system_settings/modules.json b/openpype/settings/defaults/system_settings/modules.json
index 1b74b4695c..a0ba607edc 100644
--- a/openpype/settings/defaults/system_settings/modules.json
+++ b/openpype/settings/defaults/system_settings/modules.json
@@ -128,7 +128,8 @@
"enabled": true,
"auto_stop": true,
"full_time": 15.0,
- "message_time": 0.5
+ "message_time": 0.5,
+ "disregard_publishing": false
},
"clockify": {
"enabled": false,
@@ -140,7 +141,9 @@
},
"deadline": {
"enabled": true,
- "DEADLINE_REST_URL": "http://localhost:8082"
+ "deadline_urls": {
+ "default": "http://127.0.0.1:8082"
+ }
},
"muster": {
"enabled": false,
@@ -171,4 +174,4 @@
"slack": {
"enabled": false
}
-}
\ No newline at end of file
+}
diff --git a/openpype/settings/entities/__init__.py b/openpype/settings/entities/__init__.py
index c0eef15e69..8c30d5044c 100644
--- a/openpype/settings/entities/__init__.py
+++ b/openpype/settings/entities/__init__.py
@@ -105,7 +105,9 @@ from .enum_entity import (
AppsEnumEntity,
ToolsEnumEntity,
TaskTypeEnumEntity,
- ProvidersEnum
+ ProvidersEnum,
+ DeadlineUrlEnumEntity,
+ AnatomyTemplatesEnumEntity
)
from .list_entity import ListEntity
@@ -160,6 +162,8 @@ __all__ = (
"ToolsEnumEntity",
"TaskTypeEnumEntity",
"ProvidersEnum",
+ "DeadlineUrlEnumEntity",
+ "AnatomyTemplatesEnumEntity",
"ListEntity",
diff --git a/openpype/settings/entities/base_entity.py b/openpype/settings/entities/base_entity.py
index b4ebe885f5..851684520b 100644
--- a/openpype/settings/entities/base_entity.py
+++ b/openpype/settings/entities/base_entity.py
@@ -174,6 +174,14 @@ class BaseItemEntity(BaseEntity):
roles = [roles]
self.roles = roles
+ @abstractmethod
+ def collect_static_entities_by_path(self):
+ """Collect all paths of all static path entities.
+
+ Static path is entity which is not dynamic or under dynamic entity.
+ """
+ pass
+
@property
def require_restart_on_change(self):
return self._require_restart_on_change
diff --git a/openpype/settings/entities/dict_conditional.py b/openpype/settings/entities/dict_conditional.py
index b7c64f173f..988464d059 100644
--- a/openpype/settings/entities/dict_conditional.py
+++ b/openpype/settings/entities/dict_conditional.py
@@ -327,6 +327,11 @@ class DictConditionalEntity(ItemEntity):
self.non_gui_children[item_key][child_obj.key] = child_obj
+ def collect_static_entities_by_path(self):
+ if self.is_dynamic_item or self.is_in_dynamic_item:
+ return {}
+ return {self.path: self}
+
def get_child_path(self, child_obj):
"""Get hierarchical path of child entity.
diff --git a/openpype/settings/entities/dict_immutable_keys_entity.py b/openpype/settings/entities/dict_immutable_keys_entity.py
index bde5304787..73b08f101a 100644
--- a/openpype/settings/entities/dict_immutable_keys_entity.py
+++ b/openpype/settings/entities/dict_immutable_keys_entity.py
@@ -203,6 +203,18 @@ class DictImmutableKeysEntity(ItemEntity):
)
self.show_borders = self.schema_data.get("show_borders", True)
+ def collect_static_entities_by_path(self):
+ output = {}
+ if self.is_dynamic_item or self.is_in_dynamic_item:
+ return output
+
+ output[self.path] = self
+ for children in self.non_gui_children.values():
+ result = children.collect_static_entities_by_path()
+ if result:
+ output.update(result)
+ return output
+
def get_child_path(self, child_obj):
"""Get hierarchical path of child entity.
diff --git a/openpype/settings/entities/enum_entity.py b/openpype/settings/entities/enum_entity.py
index 361ad38dc5..cb532c5ae0 100644
--- a/openpype/settings/entities/enum_entity.py
+++ b/openpype/settings/entities/enum_entity.py
@@ -419,7 +419,7 @@ class ProvidersEnum(BaseEnumEntity):
self.placeholder = None
def _get_enum_values(self):
- from openpype.modules.sync_server.providers import lib as lib_providers
+ from openpype_modules.sync_server.providers import lib as lib_providers
providers = lib_providers.factory.providers
@@ -443,3 +443,120 @@ class ProvidersEnum(BaseEnumEntity):
self._current_value = value_on_not_set
self.value_on_not_set = value_on_not_set
+
+
+class DeadlineUrlEnumEntity(BaseEnumEntity):
+ schema_types = ["deadline_url-enum"]
+
+ def _item_initalization(self):
+ self.multiselection = self.schema_data.get("multiselection", True)
+
+ self.enum_items = []
+ self.valid_keys = set()
+
+ if self.multiselection:
+ self.valid_value_types = (list,)
+ self.value_on_not_set = []
+ else:
+ self.valid_value_types = (STRING_TYPE,)
+ self.value_on_not_set = ""
+
+ # GUI attribute
+ self.placeholder = self.schema_data.get("placeholder")
+
+ def _get_enum_values(self):
+ deadline_urls_entity = self.get_entity_from_path(
+ "system_settings/modules/deadline/deadline_urls"
+ )
+
+ valid_keys = set()
+ enum_items_list = []
+ for server_name, url_entity in deadline_urls_entity.items():
+ enum_items_list.append(
+ {server_name: "{}: {}".format(server_name, url_entity.value)})
+ valid_keys.add(server_name)
+ return enum_items_list, valid_keys
+
+ def set_override_state(self, *args, **kwargs):
+ super(DeadlineUrlEnumEntity, self).set_override_state(*args, **kwargs)
+
+ self.enum_items, self.valid_keys = self._get_enum_values()
+ if self.multiselection:
+ new_value = []
+ for key in self._current_value:
+ if key in self.valid_keys:
+ new_value.append(key)
+ self._current_value = new_value
+
+ else:
+ if not self.valid_keys:
+ self._current_value = ""
+
+ elif self._current_value not in self.valid_keys:
+ self._current_value = tuple(self.valid_keys)[0]
+
+
+class AnatomyTemplatesEnumEntity(BaseEnumEntity):
+ schema_types = ["anatomy-templates-enum"]
+
+ def _item_initalization(self):
+ self.multiselection = False
+
+ self.enum_items = []
+ self.valid_keys = set()
+
+ enum_default = self.schema_data.get("default") or "work"
+
+ self.value_on_not_set = enum_default
+ self.valid_value_types = (STRING_TYPE,)
+
+ # GUI attribute
+ self.placeholder = self.schema_data.get("placeholder")
+
+ def _get_enum_values(self):
+ templates_entity = self.get_entity_from_path(
+ "project_anatomy/templates"
+ )
+
+ valid_keys = set()
+ enum_items_list = []
+
+ others_entity = None
+ for key, entity in templates_entity.items():
+ # Skip defaults key
+ if key == "defaults":
+ continue
+
+ if key == "others":
+ others_entity = entity
+ continue
+
+ label = key
+ if hasattr(entity, "label"):
+ label = entity.label or label
+
+ enum_items_list.append({key: label})
+ valid_keys.add(key)
+
+ if others_entity is not None:
+ get_child_label_func = getattr(
+ others_entity, "get_child_label", None
+ )
+ for key, child_entity in others_entity.items():
+ label = key
+ if callable(get_child_label_func):
+ label = get_child_label_func(child_entity) or label
+
+ enum_items_list.append({key: label})
+ valid_keys.add(key)
+
+ return enum_items_list, valid_keys
+
+ def set_override_state(self, *args, **kwargs):
+ super(AnatomyTemplatesEnumEntity, self).set_override_state(
+ *args, **kwargs
+ )
+
+ self.enum_items, self.valid_keys = self._get_enum_values()
+ if self._current_value not in self.valid_keys:
+ self._current_value = self.value_on_not_set
diff --git a/openpype/settings/entities/input_entities.py b/openpype/settings/entities/input_entities.py
index 6952529963..336d1f5c1e 100644
--- a/openpype/settings/entities/input_entities.py
+++ b/openpype/settings/entities/input_entities.py
@@ -53,6 +53,11 @@ class EndpointEntity(ItemEntity):
def _settings_value(self):
pass
+ def collect_static_entities_by_path(self):
+ if self.is_dynamic_item or self.is_in_dynamic_item:
+ return {}
+ return {self.path: self}
+
def settings_value(self):
if self._override_state is OverrideState.NOT_DEFINED:
return NOT_SET
diff --git a/openpype/settings/entities/item_entities.py b/openpype/settings/entities/item_entities.py
index 7e84f8c801..ac6b3e76dd 100644
--- a/openpype/settings/entities/item_entities.py
+++ b/openpype/settings/entities/item_entities.py
@@ -106,6 +106,9 @@ class PathEntity(ItemEntity):
self.valid_value_types = valid_value_types
self.child_obj = self.create_schema_object(item_schema, self)
+ def collect_static_entities_by_path(self):
+ return self.child_obj.collect_static_entities_by_path()
+
def get_child_path(self, _child_obj):
return self.path
@@ -192,6 +195,24 @@ class PathEntity(ItemEntity):
class ListStrictEntity(ItemEntity):
schema_types = ["list-strict"]
+ def __getitem__(self, idx):
+ if not isinstance(idx, int):
+ idx = int(idx)
+ return self.children[idx]
+
+ def __setitem__(self, idx, value):
+ if not isinstance(idx, int):
+ idx = int(idx)
+ self.children[idx].set(value)
+
+ def get(self, idx, default=None):
+ if not isinstance(idx, int):
+ idx = int(idx)
+
+ if idx < len(self.children):
+ return self.children[idx]
+ return default
+
def _item_initalization(self):
self.valid_value_types = (list, )
self.require_key = True
@@ -222,6 +243,18 @@ class ListStrictEntity(ItemEntity):
super(ListStrictEntity, self).schema_validations()
+ def collect_static_entities_by_path(self):
+ output = {}
+ if self.is_dynamic_item or self.is_in_dynamic_item:
+ return output
+
+ output[self.path] = self
+ for child_obj in self.children:
+ result = child_obj.collect_static_entities_by_path()
+ if result:
+ output.update(result)
+ return output
+
def get_child_path(self, child_obj):
result_idx = None
for idx, _child_obj in enumerate(self.children):
diff --git a/openpype/settings/entities/list_entity.py b/openpype/settings/entities/list_entity.py
index b07441251a..b06f4d7a2e 100644
--- a/openpype/settings/entities/list_entity.py
+++ b/openpype/settings/entities/list_entity.py
@@ -45,6 +45,24 @@ class ListEntity(EndpointEntity):
return True
return False
+ def __getitem__(self, idx):
+ if not isinstance(idx, int):
+ idx = int(idx)
+ return self.children[idx]
+
+ def __setitem__(self, idx, value):
+ if not isinstance(idx, int):
+ idx = int(idx)
+ self.children[idx].set(value)
+
+ def get(self, idx, default=None):
+ if not isinstance(idx, int):
+ idx = int(idx)
+
+ if idx < len(self.children):
+ return self.children[idx]
+ return default
+
def index(self, item):
if isinstance(item, BaseEntity):
for idx, child_entity in enumerate(self.children):
diff --git a/openpype/settings/entities/root_entities.py b/openpype/settings/entities/root_entities.py
index 00677480e8..4a06d2d591 100644
--- a/openpype/settings/entities/root_entities.py
+++ b/openpype/settings/entities/root_entities.py
@@ -242,6 +242,14 @@ class RootEntity(BaseItemEntity):
"""Whan any children has changed."""
self.on_change()
+ def collect_static_entities_by_path(self):
+ output = {}
+ for child_obj in self.non_gui_children.values():
+ result = child_obj.collect_static_entities_by_path()
+ if result:
+ output.update(result)
+ return output
+
def get_child_path(self, child_entity):
"""Return path of children entity"""
for key, _child_entity in self.non_gui_children.items():
diff --git a/openpype/settings/entities/schemas/README.md b/openpype/settings/entities/schemas/README.md
index 2034d4e463..05605f8ce1 100644
--- a/openpype/settings/entities/schemas/README.md
+++ b/openpype/settings/entities/schemas/README.md
@@ -380,6 +380,20 @@ How output of the schema could look like on save:
}
```
+### anatomy-templates-enum
+- enumeration of all available anatomy template keys
+- have only single selection mode
+- it is possible to define default value `default`
+ - `"work"` is used if default value is not specified
+```
+{
+ "key": "host",
+ "label": "Host name",
+ "type": "anatomy-templates-enum",
+ "default": "publish"
+}
+```
+
### hosts-enum
- enumeration of available hosts
- multiselection can be allowed with setting key `"multiselection"` to `True` (Default: `False`)
diff --git a/openpype/settings/entities/schemas/projects_schema/schema_main.json b/openpype/settings/entities/schemas/projects_schema/schema_main.json
index 4a8a9d496e..575cfc9e72 100644
--- a/openpype/settings/entities/schemas/projects_schema/schema_main.json
+++ b/openpype/settings/entities/schemas/projects_schema/schema_main.json
@@ -118,6 +118,10 @@
"type": "schema",
"name": "schema_project_standalonepublisher"
},
+ {
+ "type": "schema",
+ "name": "schema_project_webpublisher"
+ },
{
"type": "schema",
"name": "schema_project_unreal"
diff --git a/openpype/settings/entities/schemas/projects_schema/schema_project_deadline.json b/openpype/settings/entities/schemas/projects_schema/schema_project_deadline.json
index 53c6bf48c0..eb9eeb5448 100644
--- a/openpype/settings/entities/schemas/projects_schema/schema_project_deadline.json
+++ b/openpype/settings/entities/schemas/projects_schema/schema_project_deadline.json
@@ -5,6 +5,12 @@
"collapsible": true,
"is_file": true,
"children": [
+ {
+ "type": "deadline_url-enum",
+ "key": "deadline_servers",
+ "label": "Deadline Webservice URLs",
+ "multiselect": true
+ },
{
"type": "dict",
"collapsible": true,
diff --git a/openpype/settings/entities/schemas/projects_schema/schema_project_houdini.json b/openpype/settings/entities/schemas/projects_schema/schema_project_houdini.json
index c6de257a61..cad99dde22 100644
--- a/openpype/settings/entities/schemas/projects_schema/schema_project_houdini.json
+++ b/openpype/settings/entities/schemas/projects_schema/schema_project_houdini.json
@@ -5,6 +5,10 @@
"label": "Houdini",
"is_file": true,
"children": [
+ {
+ "type": "schema",
+ "name": "schema_houdini_create"
+ },
{
"type": "dict",
"collapsible": true,
diff --git a/openpype/settings/entities/schemas/projects_schema/schema_project_webpublisher.json b/openpype/settings/entities/schemas/projects_schema/schema_project_webpublisher.json
new file mode 100644
index 0000000000..91337da2b2
--- /dev/null
+++ b/openpype/settings/entities/schemas/projects_schema/schema_project_webpublisher.json
@@ -0,0 +1,69 @@
+{
+ "type": "dict",
+ "collapsible": true,
+ "key": "webpublisher",
+ "label": "Web Publisher",
+ "is_file": true,
+ "children": [
+ {
+ "type": "dict",
+ "collapsible": true,
+ "key": "publish",
+ "label": "Publish plugins",
+ "children": [
+ {
+ "type": "dict",
+ "collapsible": true,
+ "key": "CollectPublishedFiles",
+ "label": "Collect Published Files",
+ "children": [
+ {
+ "type": "dict-modifiable",
+ "collapsible": true,
+ "key": "task_type_to_family",
+ "label": "Task type to family mapping",
+ "collapsible_key": true,
+ "object_type": {
+ "type": "dict-modifiable",
+ "collapsible": false,
+ "key": "task_type",
+ "collapsible_key": false,
+ "object_type": {
+ "type": "dict",
+ "children": [
+ {
+ "type": "boolean",
+ "key": "is_sequence",
+ "label": "Is Sequence"
+ },
+ {
+ "type": "list",
+ "key": "extensions",
+ "label": "Extensions",
+ "object_type": "text"
+ },
+ {
+ "type": "list",
+ "key": "families",
+ "label": "Families",
+ "object_type": "text"
+ },
+ {
+ "type": "schema",
+ "name": "schema_representation_tags"
+ },
+ {
+ "type": "text",
+ "key": "subset_template_name",
+ "label": "Subset template name"
+ }
+ ]
+ }
+ }
+ }
+ ]
+ }
+ ]
+ }
+ ]
+}
\ No newline at end of file
diff --git a/openpype/settings/entities/schemas/projects_schema/schemas/schema_global_publish.json b/openpype/settings/entities/schemas/projects_schema/schemas/schema_global_publish.json
index d265988534..4b91072eb6 100644
--- a/openpype/settings/entities/schemas/projects_schema/schemas/schema_global_publish.json
+++ b/openpype/settings/entities/schemas/projects_schema/schemas/schema_global_publish.json
@@ -496,6 +496,12 @@
"type": "list",
"object_type": "text"
},
+ {
+ "type": "hosts-enum",
+ "key": "hosts",
+ "label": "Hosts",
+ "multiselection": true
+ },
{
"key": "tasks",
"label": "Task names",
diff --git a/openpype/settings/entities/schemas/projects_schema/schemas/schema_global_tools.json b/openpype/settings/entities/schemas/projects_schema/schemas/schema_global_tools.json
index 9e39eeb39e..245560f115 100644
--- a/openpype/settings/entities/schemas/projects_schema/schemas/schema_global_tools.json
+++ b/openpype/settings/entities/schemas/projects_schema/schemas/schema_global_tools.json
@@ -65,6 +65,37 @@
"key": "Workfiles",
"label": "Workfiles",
"children": [
+ {
+ "type": "list",
+ "key": "workfile_template_profiles",
+ "label": "Workfile template profiles",
+ "use_label_wrap": true,
+ "object_type": {
+ "type": "dict",
+ "children": [
+ {
+ "key": "task_types",
+ "label": "Task types",
+ "type": "task-types-enum"
+ },
+ {
+ "type": "hosts-enum",
+ "key": "hosts",
+ "label": "Hosts",
+ "multiselection": true
+ },
+ {
+ "type": "splitter"
+ },
+ {
+ "key": "workfile_template",
+ "label": "Workfile template",
+ "type": "anatomy-templates-enum",
+ "multiselection": false
+ }
+ ]
+ }
+ },
{
"type": "list",
"key": "last_workfile_on_startup",
diff --git a/openpype/settings/entities/schemas/projects_schema/schemas/schema_houdini_create.json b/openpype/settings/entities/schemas/projects_schema/schemas/schema_houdini_create.json
new file mode 100644
index 0000000000..72b8032d4b
--- /dev/null
+++ b/openpype/settings/entities/schemas/projects_schema/schemas/schema_houdini_create.json
@@ -0,0 +1,54 @@
+{
+ "type": "dict",
+ "collapsible": true,
+ "key": "create",
+ "label": "Creator plugins",
+ "children": [
+ {
+ "type": "schema_template",
+ "name": "template_create_plugin",
+ "template_data": [
+ {
+ "key": "CreateAlembicCamera",
+ "label": "Create Alembic Camera"
+ },
+ {
+ "key": "CreateCompositeSequence",
+ "label": "Create Composite (Image Sequence)"
+ },
+ {
+ "key": "CreatePointCache",
+ "label": "Create Point Cache"
+ },
+ {
+ "key": "CreateRedshiftROP",
+ "label": "Create Redshift ROP"
+ },
+ {
+ "key": "CreateRemotePublish",
+ "label": "Create Remote Publish"
+ },
+ {
+ "key": "CreateVDBCache",
+ "label": "Create VDB Cache"
+ },
+ {
+ "key": "CreateUSD",
+ "label": "Create USD"
+ },
+ {
+ "key": "CreateUSDModel",
+ "label": "Create USD Model"
+ },
+ {
+ "key": "USDCreateShadingWorkspace",
+ "label": "Create USD Shading Workspace"
+ },
+ {
+ "key": "CreateUSDRender",
+ "label": "Create USD Render"
+ }
+ ]
+ }
+ ]
+}
\ No newline at end of file
diff --git a/openpype/settings/entities/schemas/projects_schema/schemas/schema_maya_create.json b/openpype/settings/entities/schemas/projects_schema/schemas/schema_maya_create.json
index d728f1def3..44a35af7c1 100644
--- a/openpype/settings/entities/schemas/projects_schema/schemas/schema_maya_create.json
+++ b/openpype/settings/entities/schemas/projects_schema/schemas/schema_maya_create.json
@@ -29,6 +29,26 @@
}
]
},
+ {
+ "type": "dict",
+ "collapsible": true,
+ "key": "CreateRender",
+ "label": "Create Render",
+ "checkbox_key": "enabled",
+ "children": [
+ {
+ "type": "boolean",
+ "key": "enabled",
+ "label": "Enabled"
+ },
+ {
+ "type": "list",
+ "key": "defaults",
+ "label": "Default Subsets",
+ "object_type": "text"
+ }
+ ]
+ },
{
"type": "schema_template",
"name": "template_create_plugin",
@@ -65,10 +85,6 @@
"key": "CreatePointCache",
"label": "Create Cache"
},
- {
- "key": "CreateRender",
- "label": "Create Render"
- },
{
"key": "CreateRenderSetup",
"label": "Create Render Setup"
diff --git a/openpype/settings/entities/schemas/projects_schema/schemas/schema_representation_tags.json b/openpype/settings/entities/schemas/projects_schema/schemas/schema_representation_tags.json
index b65de747e5..7607e1a8c1 100644
--- a/openpype/settings/entities/schemas/projects_schema/schemas/schema_representation_tags.json
+++ b/openpype/settings/entities/schemas/projects_schema/schemas/schema_representation_tags.json
@@ -8,7 +8,10 @@
"burnin": "Add burnins"
},
{
- "ftrackreview": "Add to Ftrack"
+ "review": "Create review"
+ },
+ {
+ "ftrackreview": "Add review to Ftrack"
},
{
"delete": "Delete output"
diff --git a/openpype/settings/entities/schemas/system_schema/schema_modules.json b/openpype/settings/entities/schemas/system_schema/schema_modules.json
index 7d734ff4fd..dd85f9351a 100644
--- a/openpype/settings/entities/schemas/system_schema/schema_modules.json
+++ b/openpype/settings/entities/schemas/system_schema/schema_modules.json
@@ -60,6 +60,11 @@
"decimal": 2,
"key": "message_time",
"label": "When dialog will show"
+ },
+ {
+ "type": "boolean",
+ "key": "disregard_publishing",
+ "label": "Disregard Publishing"
}
]
},
@@ -130,9 +135,11 @@
"label": "Enabled"
},
{
- "type": "text",
- "key": "DEADLINE_REST_URL",
- "label": "Deadline Resl URL"
+ "type": "dict-modifiable",
+ "object_type": "text",
+ "key": "deadline_urls",
+ "required_keys": ["default"],
+ "label": "Deadline Webservice URLs"
}
]
},
diff --git a/openpype/settings/lib.py b/openpype/settings/lib.py
index 5c2c0dcd94..4a363910b8 100644
--- a/openpype/settings/lib.py
+++ b/openpype/settings/lib.py
@@ -114,7 +114,8 @@ def save_studio_settings(data):
SaveWarningExc: If any module raises the exception.
"""
# Notify Pype modules
- from openpype.modules import ModulesManager, ISettingsChangeListener
+ from openpype.modules import ModulesManager
+ from openpype_interfaces import ISettingsChangeListener
old_data = get_system_settings()
default_values = get_default_settings()[SYSTEM_SETTINGS_KEY]
@@ -161,7 +162,8 @@ def save_project_settings(project_name, overrides):
SaveWarningExc: If any module raises the exception.
"""
# Notify Pype modules
- from openpype.modules import ModulesManager, ISettingsChangeListener
+ from openpype.modules import ModulesManager
+ from openpype_interfaces import ISettingsChangeListener
default_values = get_default_settings()[PROJECT_SETTINGS_KEY]
if project_name:
@@ -222,7 +224,8 @@ def save_project_anatomy(project_name, anatomy_data):
SaveWarningExc: If any module raises the exception.
"""
# Notify Pype modules
- from openpype.modules import ModulesManager, ISettingsChangeListener
+ from openpype.modules import ModulesManager
+ from openpype_interfaces import ISettingsChangeListener
default_values = get_default_settings()[PROJECT_ANATOMY_KEY]
if project_name:
diff --git a/openpype/style/__init__.py b/openpype/style/__init__.py
index 89a210bee9..87547b1a90 100644
--- a/openpype/style/__init__.py
+++ b/openpype/style/__init__.py
@@ -65,6 +65,7 @@ def _load_font():
font_dirs = []
font_dirs.append(os.path.join(fonts_dirpath, "Montserrat"))
font_dirs.append(os.path.join(fonts_dirpath, "Spartan"))
+ font_dirs.append(os.path.join(fonts_dirpath, "RobotoMono", "static"))
loaded_fonts = []
for font_dir in font_dirs:
diff --git a/openpype/style/fonts/RobotoMono/LICENSE.txt b/openpype/style/fonts/RobotoMono/LICENSE.txt
new file mode 100644
index 0000000000..d645695673
--- /dev/null
+++ b/openpype/style/fonts/RobotoMono/LICENSE.txt
@@ -0,0 +1,202 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/openpype/style/fonts/RobotoMono/README.txt b/openpype/style/fonts/RobotoMono/README.txt
new file mode 100644
index 0000000000..1bc1b1cfa2
--- /dev/null
+++ b/openpype/style/fonts/RobotoMono/README.txt
@@ -0,0 +1,77 @@
+Roboto Mono Variable Font
+=========================
+
+This download contains Roboto Mono as both variable fonts and static fonts.
+
+Roboto Mono is a variable font with this axis:
+ wght
+
+This means all the styles are contained in these files:
+ RobotoMono-VariableFont_wght.ttf
+ RobotoMono-Italic-VariableFont_wght.ttf
+
+If your app fully supports variable fonts, you can now pick intermediate styles
+that arenβt available as static fonts. Not all apps support variable fonts, and
+in those cases you can use the static font files for Roboto Mono:
+ static/RobotoMono-Thin.ttf
+ static/RobotoMono-ExtraLight.ttf
+ static/RobotoMono-Light.ttf
+ static/RobotoMono-Regular.ttf
+ static/RobotoMono-Medium.ttf
+ static/RobotoMono-SemiBold.ttf
+ static/RobotoMono-Bold.ttf
+ static/RobotoMono-ThinItalic.ttf
+ static/RobotoMono-ExtraLightItalic.ttf
+ static/RobotoMono-LightItalic.ttf
+ static/RobotoMono-Italic.ttf
+ static/RobotoMono-MediumItalic.ttf
+ static/RobotoMono-SemiBoldItalic.ttf
+ static/RobotoMono-BoldItalic.ttf
+
+Get started
+-----------
+
+1. Install the font files you want to use
+
+2. Use your app's font picker to view the font family and all the
+available styles
+
+Learn more about variable fonts
+-------------------------------
+
+ https://developers.google.com/web/fundamentals/design-and-ux/typography/variable-fonts
+ https://variablefonts.typenetwork.com
+ https://medium.com/variable-fonts
+
+In desktop apps
+
+ https://theblog.adobe.com/can-variable-fonts-illustrator-cc
+ https://helpx.adobe.com/nz/photoshop/using/fonts.html#variable_fonts
+
+Online
+
+ https://developers.google.com/fonts/docs/getting_started
+ https://developer.mozilla.org/en-US/docs/Web/CSS/CSS_Fonts/Variable_Fonts_Guide
+ https://developer.microsoft.com/en-us/microsoft-edge/testdrive/demos/variable-fonts
+
+Installing fonts
+
+ MacOS: https://support.apple.com/en-us/HT201749
+ Linux: https://www.google.com/search?q=how+to+install+a+font+on+gnu%2Blinux
+ Windows: https://support.microsoft.com/en-us/help/314960/how-to-install-or-remove-a-font-in-windows
+
+Android Apps
+
+ https://developers.google.com/fonts/docs/android
+ https://developer.android.com/guide/topics/ui/look-and-feel/downloadable-fonts
+
+License
+-------
+Please read the full license text (LICENSE.txt) to understand the permissions,
+restrictions and requirements for usage, redistribution, and modification.
+
+You can use them freely in your products & projects - print or digital,
+commercial or otherwise.
+
+This isn't legal advice, please consider consulting a lawyer and see the full
+license for all details.
diff --git a/openpype/style/fonts/RobotoMono/RobotoMono-Italic-VariableFont_wght.ttf b/openpype/style/fonts/RobotoMono/RobotoMono-Italic-VariableFont_wght.ttf
new file mode 100644
index 0000000000..d30055a9e8
Binary files /dev/null and b/openpype/style/fonts/RobotoMono/RobotoMono-Italic-VariableFont_wght.ttf differ
diff --git a/openpype/style/fonts/RobotoMono/RobotoMono-VariableFont_wght.ttf b/openpype/style/fonts/RobotoMono/RobotoMono-VariableFont_wght.ttf
new file mode 100644
index 0000000000..d2b4746196
Binary files /dev/null and b/openpype/style/fonts/RobotoMono/RobotoMono-VariableFont_wght.ttf differ
diff --git a/openpype/style/fonts/RobotoMono/static/RobotoMono-Bold.ttf b/openpype/style/fonts/RobotoMono/static/RobotoMono-Bold.ttf
new file mode 100644
index 0000000000..900fce6848
Binary files /dev/null and b/openpype/style/fonts/RobotoMono/static/RobotoMono-Bold.ttf differ
diff --git a/openpype/style/fonts/RobotoMono/static/RobotoMono-BoldItalic.ttf b/openpype/style/fonts/RobotoMono/static/RobotoMono-BoldItalic.ttf
new file mode 100644
index 0000000000..4bfe29ae89
Binary files /dev/null and b/openpype/style/fonts/RobotoMono/static/RobotoMono-BoldItalic.ttf differ
diff --git a/openpype/style/fonts/RobotoMono/static/RobotoMono-ExtraLight.ttf b/openpype/style/fonts/RobotoMono/static/RobotoMono-ExtraLight.ttf
new file mode 100644
index 0000000000..d535884553
Binary files /dev/null and b/openpype/style/fonts/RobotoMono/static/RobotoMono-ExtraLight.ttf differ
diff --git a/openpype/style/fonts/RobotoMono/static/RobotoMono-ExtraLightItalic.ttf b/openpype/style/fonts/RobotoMono/static/RobotoMono-ExtraLightItalic.ttf
new file mode 100644
index 0000000000..b28960a0ee
Binary files /dev/null and b/openpype/style/fonts/RobotoMono/static/RobotoMono-ExtraLightItalic.ttf differ
diff --git a/openpype/style/fonts/RobotoMono/static/RobotoMono-Italic.ttf b/openpype/style/fonts/RobotoMono/static/RobotoMono-Italic.ttf
new file mode 100644
index 0000000000..4ee4dc49b4
Binary files /dev/null and b/openpype/style/fonts/RobotoMono/static/RobotoMono-Italic.ttf differ
diff --git a/openpype/style/fonts/RobotoMono/static/RobotoMono-Light.ttf b/openpype/style/fonts/RobotoMono/static/RobotoMono-Light.ttf
new file mode 100644
index 0000000000..276af4c55a
Binary files /dev/null and b/openpype/style/fonts/RobotoMono/static/RobotoMono-Light.ttf differ
diff --git a/openpype/style/fonts/RobotoMono/static/RobotoMono-LightItalic.ttf b/openpype/style/fonts/RobotoMono/static/RobotoMono-LightItalic.ttf
new file mode 100644
index 0000000000..a2801c2168
Binary files /dev/null and b/openpype/style/fonts/RobotoMono/static/RobotoMono-LightItalic.ttf differ
diff --git a/openpype/style/fonts/RobotoMono/static/RobotoMono-Medium.ttf b/openpype/style/fonts/RobotoMono/static/RobotoMono-Medium.ttf
new file mode 100644
index 0000000000..8461be77a3
Binary files /dev/null and b/openpype/style/fonts/RobotoMono/static/RobotoMono-Medium.ttf differ
diff --git a/openpype/style/fonts/RobotoMono/static/RobotoMono-MediumItalic.ttf b/openpype/style/fonts/RobotoMono/static/RobotoMono-MediumItalic.ttf
new file mode 100644
index 0000000000..a3bfaa115a
Binary files /dev/null and b/openpype/style/fonts/RobotoMono/static/RobotoMono-MediumItalic.ttf differ
diff --git a/openpype/style/fonts/RobotoMono/static/RobotoMono-Regular.ttf b/openpype/style/fonts/RobotoMono/static/RobotoMono-Regular.ttf
new file mode 100644
index 0000000000..7c4ce36a44
Binary files /dev/null and b/openpype/style/fonts/RobotoMono/static/RobotoMono-Regular.ttf differ
diff --git a/openpype/style/fonts/RobotoMono/static/RobotoMono-SemiBold.ttf b/openpype/style/fonts/RobotoMono/static/RobotoMono-SemiBold.ttf
new file mode 100644
index 0000000000..15ee6c6e40
Binary files /dev/null and b/openpype/style/fonts/RobotoMono/static/RobotoMono-SemiBold.ttf differ
diff --git a/openpype/style/fonts/RobotoMono/static/RobotoMono-SemiBoldItalic.ttf b/openpype/style/fonts/RobotoMono/static/RobotoMono-SemiBoldItalic.ttf
new file mode 100644
index 0000000000..8e21497793
Binary files /dev/null and b/openpype/style/fonts/RobotoMono/static/RobotoMono-SemiBoldItalic.ttf differ
diff --git a/openpype/style/fonts/RobotoMono/static/RobotoMono-Thin.ttf b/openpype/style/fonts/RobotoMono/static/RobotoMono-Thin.ttf
new file mode 100644
index 0000000000..ee8a3fd41a
Binary files /dev/null and b/openpype/style/fonts/RobotoMono/static/RobotoMono-Thin.ttf differ
diff --git a/openpype/style/fonts/RobotoMono/static/RobotoMono-ThinItalic.ttf b/openpype/style/fonts/RobotoMono/static/RobotoMono-ThinItalic.ttf
new file mode 100644
index 0000000000..40b01e40de
Binary files /dev/null and b/openpype/style/fonts/RobotoMono/static/RobotoMono-ThinItalic.ttf differ
diff --git a/openpype/style/style.css b/openpype/style/style.css
index b955bdc2a6..830ed85f9b 100644
--- a/openpype/style/style.css
+++ b/openpype/style/style.css
@@ -271,37 +271,38 @@ QTabWidget::tab-bar {
}
QTabBar::tab {
- border-top-left-radius: 4px;
- border-top-right-radius: 4px;
padding: 5px;
-
+ border-left: 3px solid transparent;
+ border-top: 1px solid {color:border};
+ border-right: 1px solid {color:border};
+ background: qlineargradient(
+ x1: 0, y1: 1, x2: 0, y2: 0,
+ stop: 0.5 {color:bg}, stop: 1.0 {color:bg-inputs}
+ );
}
QTabBar::tab:selected {
background: {color:grey-lighter};
- /* background: qradialgradient(
- cx:0.5, cy:0.5, radius: 2,
- fx:0.5, fy:1,
- stop:0.3 {color:bg}, stop:1 white
- ) */
- /* background: qlineargradient(
- x1: 0, y1: 0, x2: 0, y2: 1,
- stop: 0 {color:bg-inputs}, stop: 1.0 {color:bg}
- ); */
+ border-left: 3px solid {color:border-focus};
+ background: qlineargradient(
+ x1: 0, y1: 1, x2: 0, y2: 0,
+ stop: 0.5 {color:bg}, stop: 1.0 {color:border}
+ );
}
QTabBar::tab:!selected {
- /* Make it smaller*/
- margin-top: 3px;
background: {color:grey-light};
}
QTabBar::tab:!selected:hover {
background: {color:grey-lighter};
}
-
+QTabBar::tab:first {
+ border-left: 1px solid {color:border};
+}
QTabBar::tab:first:selected {
margin-left: 0;
+ border-left: 3px solid {color:border-focus};
}
QTabBar::tab:last:selected {
@@ -623,3 +624,8 @@ QScrollBar::add-page:vertical, QScrollBar::sub-page:vertical {
border: 1px solid {color:border};
border-radius: 0.1em;
}
+
+/* Python console interpreter */
+#PythonInterpreterOutput, #PythonCodeEditor {
+ font-family: "Roboto Mono";
+}
diff --git a/openpype/tools/launcher/constants.py b/openpype/tools/launcher/constants.py
index e6dbbb6e19..7f394cb5ac 100644
--- a/openpype/tools/launcher/constants.py
+++ b/openpype/tools/launcher/constants.py
@@ -8,5 +8,5 @@ ACTION_ID_ROLE = QtCore.Qt.UserRole + 3
ANIMATION_START_ROLE = QtCore.Qt.UserRole + 4
ANIMATION_STATE_ROLE = QtCore.Qt.UserRole + 5
-
-ANIMATION_LEN = 10
+# Animation length in seconds
+ANIMATION_LEN = 7
diff --git a/openpype/tools/launcher/lib.py b/openpype/tools/launcher/lib.py
index 65d40cd0df..d6374f49d2 100644
--- a/openpype/tools/launcher/lib.py
+++ b/openpype/tools/launcher/lib.py
@@ -44,9 +44,12 @@ class ProjectHandler(QtCore.QObject):
# Signal emmited when project has changed
project_changed = QtCore.Signal(str)
+ projects_refreshed = QtCore.Signal()
+ timer_timeout = QtCore.Signal()
def __init__(self, dbcon, model):
super(ProjectHandler, self).__init__()
+ self._active = False
# Store project model for usage
self.model = model
# Store dbcon
@@ -54,6 +57,28 @@ class ProjectHandler(QtCore.QObject):
self.current_project = dbcon.Session.get("AVALON_PROJECT")
+ refresh_timer = QtCore.QTimer()
+ refresh_timer.setInterval(self.refresh_interval)
+ refresh_timer.timeout.connect(self._on_timeout)
+
+ self.refresh_timer = refresh_timer
+
+ def _on_timeout(self):
+ if self._active:
+ self.timer_timeout.emit()
+ self.refresh_model()
+
+ def set_active(self, active):
+ self._active = active
+
+ def start_timer(self, trigger=False):
+ self.refresh_timer.start()
+ if trigger:
+ self._on_timeout()
+
+ def stop_timer(self):
+ self.refresh_timer.stop()
+
def set_project(self, project_name):
# Change current project of this handler
self.current_project = project_name
@@ -66,6 +91,7 @@ class ProjectHandler(QtCore.QObject):
def refresh_model(self):
self.model.refresh()
+ self.projects_refreshed.emit()
def get_action_icon(action):
diff --git a/openpype/tools/launcher/models.py b/openpype/tools/launcher/models.py
index 846a07e081..4988829c11 100644
--- a/openpype/tools/launcher/models.py
+++ b/openpype/tools/launcher/models.py
@@ -122,7 +122,6 @@ class ActionModel(QtGui.QStandardItemModel):
self.application_manager = ApplicationManager()
- self._groups = {}
self.default_icon = qtawesome.icon("fa.cube", color="white")
# Cache of available actions
self._registered_actions = list()
@@ -138,14 +137,18 @@ class ActionModel(QtGui.QStandardItemModel):
actions.extend(app_actions)
self._registered_actions = actions
- self.items_by_id.clear()
+
+ self.filter_actions()
def get_application_actions(self):
actions = []
if not self.dbcon.Session.get("AVALON_PROJECT"):
return actions
- project_doc = self.dbcon.find_one({"type": "project"})
+ project_doc = self.dbcon.find_one(
+ {"type": "project"},
+ {"config.apps": True}
+ )
if not project_doc:
return actions
@@ -182,16 +185,12 @@ class ActionModel(QtGui.QStandardItemModel):
return icon
def filter_actions(self):
+ self.items_by_id.clear()
# Validate actions based on compatibility
self.clear()
- self.items_by_id.clear()
- self._groups.clear()
-
actions = self.filter_compatible_actions(self._registered_actions)
- self.beginResetModel()
-
single_actions = []
varianted_actions = collections.defaultdict(list)
grouped_actions = collections.defaultdict(list)
@@ -274,12 +273,17 @@ class ActionModel(QtGui.QStandardItemModel):
items_by_order[order].append(item)
+ self.beginResetModel()
+
+ items = []
for order in sorted(items_by_order.keys()):
for item in items_by_order[order]:
item_id = str(uuid.uuid4())
item.setData(item_id, ACTION_ID_ROLE)
self.items_by_id[item_id] = item
- self.appendRow(item)
+ items.append(item)
+
+ self.invisibleRootItem().appendRows(items)
self.endResetModel()
diff --git a/openpype/tools/launcher/widgets.py b/openpype/tools/launcher/widgets.py
index 048210115c..35c7d98be1 100644
--- a/openpype/tools/launcher/widgets.py
+++ b/openpype/tools/launcher/widgets.py
@@ -40,16 +40,11 @@ class ProjectBar(QtWidgets.QWidget):
QtWidgets.QSizePolicy.Maximum
)
- refresh_timer = QtCore.QTimer()
- refresh_timer.setInterval(project_handler.refresh_interval)
-
self.project_handler = project_handler
self.project_delegate = project_delegate
self.project_combobox = project_combobox
- self.refresh_timer = refresh_timer
# Signals
- refresh_timer.timeout.connect(self._on_refresh_timeout)
self.project_combobox.currentIndexChanged.connect(self.on_index_change)
project_handler.project_changed.connect(self._on_project_change)
@@ -58,20 +53,6 @@ class ProjectBar(QtWidgets.QWidget):
if project_name:
self.set_project(project_name)
- def showEvent(self, event):
- if not self.refresh_timer.isActive():
- self.refresh_timer.start()
- super(ProjectBar, self).showEvent(event)
-
- def _on_refresh_timeout(self):
- if not self.isVisible():
- # Stop timer if widget is not visible
- self.refresh_timer.stop()
-
- elif self.isActiveWindow():
- # Refresh projects if window is active
- self.project_handler.refresh_model()
-
def _on_project_change(self, project_name):
if self.get_current_project() == project_name:
return
@@ -103,9 +84,10 @@ class ActionBar(QtWidgets.QWidget):
action_clicked = QtCore.Signal(object)
- def __init__(self, dbcon, parent=None):
+ def __init__(self, project_handler, dbcon, parent=None):
super(ActionBar, self).__init__(parent)
+ self.project_handler = project_handler
self.dbcon = dbcon
layout = QtWidgets.QHBoxLayout(self)
@@ -152,17 +134,25 @@ class ActionBar(QtWidgets.QWidget):
self.set_row_height(1)
+ project_handler.projects_refreshed.connect(self._on_projects_refresh)
view.clicked.connect(self.on_clicked)
def discover_actions(self):
+ if self._animation_timer.isActive():
+ self._animation_timer.stop()
self.model.discover()
def filter_actions(self):
+ if self._animation_timer.isActive():
+ self._animation_timer.stop()
self.model.filter_actions()
def set_row_height(self, rows):
self.setMinimumHeight(rows * 75)
+ def _on_projects_refresh(self):
+ self.discover_actions()
+
def _on_animation(self):
time_now = time.time()
for action_id in tuple(self._animated_items):
@@ -182,6 +172,8 @@ class ActionBar(QtWidgets.QWidget):
self.update()
def _start_animation(self, index):
+ # Offset refresh timout
+ self.project_handler.start_timer()
action_id = index.data(ACTION_ID_ROLE)
item = self.model.items_by_id.get(action_id)
if item:
@@ -202,6 +194,9 @@ class ActionBar(QtWidgets.QWidget):
self.action_clicked.emit(action)
return
+ # Offset refresh timout
+ self.project_handler.start_timer()
+
actions = index.data(ACTION_ROLE)
menu = QtWidgets.QMenu(self)
diff --git a/openpype/tools/launcher/window.py b/openpype/tools/launcher/window.py
index 979aab42cf..bd37a9b89c 100644
--- a/openpype/tools/launcher/window.py
+++ b/openpype/tools/launcher/window.py
@@ -103,14 +103,9 @@ class ProjectsPanel(QtWidgets.QWidget):
layout.addWidget(view)
- refresh_timer = QtCore.QTimer()
- refresh_timer.setInterval(project_handler.refresh_interval)
-
- refresh_timer.timeout.connect(self._on_refresh_timeout)
view.clicked.connect(self.on_clicked)
self.view = view
- self.refresh_timer = refresh_timer
self.project_handler = project_handler
def on_clicked(self, index):
@@ -118,21 +113,6 @@ class ProjectsPanel(QtWidgets.QWidget):
project_name = index.data(QtCore.Qt.DisplayRole)
self.project_handler.set_project(project_name)
- def showEvent(self, event):
- self.project_handler.refresh_model()
- if not self.refresh_timer.isActive():
- self.refresh_timer.start()
- super(ProjectsPanel, self).showEvent(event)
-
- def _on_refresh_timeout(self):
- if not self.isVisible():
- # Stop timer if widget is not visible
- self.refresh_timer.stop()
-
- elif self.isActiveWindow():
- # Refresh projects if window is active
- self.project_handler.refresh_model()
-
class AssetsPanel(QtWidgets.QWidget):
"""Assets page"""
@@ -268,8 +248,6 @@ class AssetsPanel(QtWidgets.QWidget):
class LauncherWindow(QtWidgets.QDialog):
"""Launcher interface"""
- # Refresh actions each 10000msecs
- actions_refresh_timeout = 10000
def __init__(self, parent=None):
super(LauncherWindow, self).__init__(parent)
@@ -304,7 +282,7 @@ class LauncherWindow(QtWidgets.QDialog):
page_slider.addWidget(asset_panel)
# actions
- actions_bar = ActionBar(self.dbcon, self)
+ actions_bar = ActionBar(project_handler, self.dbcon, self)
# statusbar
statusbar = QtWidgets.QWidget()
@@ -342,10 +320,6 @@ class LauncherWindow(QtWidgets.QDialog):
layout.setSpacing(0)
layout.setContentsMargins(0, 0, 0, 0)
- actions_refresh_timer = QtCore.QTimer()
- actions_refresh_timer.setInterval(self.actions_refresh_timeout)
-
- self.actions_refresh_timer = actions_refresh_timer
self.project_handler = project_handler
self.message_label = message_label
@@ -357,22 +331,31 @@ class LauncherWindow(QtWidgets.QDialog):
self._page = 0
# signals
- actions_refresh_timer.timeout.connect(self._on_action_timer)
actions_bar.action_clicked.connect(self.on_action_clicked)
action_history.trigger_history.connect(self.on_history_action)
project_handler.project_changed.connect(self.on_project_change)
+ project_handler.timer_timeout.connect(self._on_refresh_timeout)
asset_panel.back_clicked.connect(self.on_back_clicked)
asset_panel.session_changed.connect(self.on_session_changed)
self.resize(520, 740)
def showEvent(self, event):
- if not self.actions_refresh_timer.isActive():
- self.actions_refresh_timer.start()
- self.discover_actions()
+ self.project_handler.set_active(True)
+ self.project_handler.start_timer(True)
super(LauncherWindow, self).showEvent(event)
+ def _on_refresh_timeout(self):
+ # Stop timer if widget is not visible
+ if not self.isVisible():
+ self.project_handler.stop_timer()
+
+ def changeEvent(self, event):
+ if event.type() == QtCore.QEvent.ActivationChange:
+ self.project_handler.set_active(self.isActiveWindow())
+ super(LauncherWindow, self).changeEvent(event)
+
def set_page(self, page):
current = self.page_slider.currentIndex()
if current == page and self._page == page:
@@ -392,20 +375,10 @@ class LauncherWindow(QtWidgets.QDialog):
def discover_actions(self):
self.actions_bar.discover_actions()
- self.filter_actions()
def filter_actions(self):
self.actions_bar.filter_actions()
- def _on_action_timer(self):
- if not self.isVisible():
- # Stop timer if widget is not visible
- self.actions_refresh_timer.stop()
-
- elif self.isActiveWindow():
- # Refresh projects if window is active
- self.discover_actions()
-
def on_project_change(self, project_name):
# Update the Action plug-ins available for the current project
self.set_page(1)
diff --git a/openpype/tools/mayalookassigner/commands.py b/openpype/tools/mayalookassigner/commands.py
index 2add5d3499..a53251cdef 100644
--- a/openpype/tools/mayalookassigner/commands.py
+++ b/openpype/tools/mayalookassigner/commands.py
@@ -103,12 +103,19 @@ def create_asset_id_hash(nodes):
"""
node_id_hash = defaultdict(list)
for node in nodes:
- value = lib.get_id(node)
- if value is None:
- continue
+ # iterate over content of reference node
+ if cmds.nodeType(node) == "reference":
+ ref_hashes = create_asset_id_hash(
+ cmds.referenceQuery(node, nodes=True))
+ for asset_id, ref_nodes in ref_hashes.items():
+ node_id_hash[asset_id] += ref_nodes
+ else:
+ value = lib.get_id(node)
+ if value is None:
+ continue
- asset_id = value.split(":")[0]
- node_id_hash[asset_id].append(node)
+ asset_id = value.split(":")[0]
+ node_id_hash[asset_id].append(node)
return dict(node_id_hash)
@@ -135,18 +142,19 @@ def create_items_from_nodes(nodes):
id_hashes = create_asset_id_hash(nodes)
# get ids from alembic
- vray_proxy_nodes = cmds.ls(nodes, type="VRayProxy")
- for vp in vray_proxy_nodes:
- path = cmds.getAttr("{}.fileName".format(vp))
- ids = vray_proxies.get_alembic_ids_cache(path)
- parent_id = {}
- for k, _ in ids.items():
- pid = k.split(":")[0]
- if not parent_id.get(pid):
- parent_id.update({pid: [vp]})
+ if cmds.pluginInfo('vrayformaya', query=True, loaded=True):
+ vray_proxy_nodes = cmds.ls(nodes, type="VRayProxy")
+ for vp in vray_proxy_nodes:
+ path = cmds.getAttr("{}.fileName".format(vp))
+ ids = vray_proxies.get_alembic_ids_cache(path)
+ parent_id = {}
+ for k, _ in ids.items():
+ pid = k.split(":")[0]
+ if not parent_id.get(pid):
+ parent_id.update({pid: [vp]})
- print("Adding ids from alembic {}".format(path))
- id_hashes.update(parent_id)
+ print("Adding ids from alembic {}".format(path))
+ id_hashes.update(parent_id)
if not id_hashes:
return asset_view_items
diff --git a/openpype/tools/settings/settings/base.py b/openpype/tools/settings/settings/base.py
index eb5f82ab9a..8235cf8642 100644
--- a/openpype/tools/settings/settings/base.py
+++ b/openpype/tools/settings/settings/base.py
@@ -25,6 +25,38 @@ class BaseWidget(QtWidgets.QWidget):
self.label_widget = None
self.create_ui()
+ def scroll_to(self, widget):
+ self.category_widget.scroll_to(widget)
+
+ def set_path(self, path):
+ self.category_widget.set_path(path)
+
+ def set_focus(self, scroll_to=False):
+ """Set focus of a widget.
+
+ Args:
+ scroll_to(bool): Also scroll to widget in category widget.
+ """
+ if scroll_to:
+ self.scroll_to(self)
+ self.setFocus()
+
+ def make_sure_is_visible(self, path, scroll_to):
+ """Make a widget of entity visible by it's path.
+
+ Args:
+ path(str): Path to entity.
+ scroll_to(bool): Should be scrolled to entity.
+
+ Returns:
+ bool: Entity with path was found.
+ """
+ raise NotImplementedError(
+ "{} not implemented `make_sure_is_visible`".format(
+ self.__class__.__name__
+ )
+ )
+
def trigger_hierarchical_style_update(self):
self.category_widget.hierarchical_style_update()
@@ -277,11 +309,23 @@ class BaseWidget(QtWidgets.QWidget):
if to_run:
to_run()
+ def focused_in(self):
+ if self.entity is not None:
+ self.set_path(self.entity.path)
+
def mouseReleaseEvent(self, event):
if self.allow_actions and event.button() == QtCore.Qt.RightButton:
return self.show_actions_menu()
- return super(BaseWidget, self).mouseReleaseEvent(event)
+ focused_in = False
+ if event.button() == QtCore.Qt.LeftButton:
+ focused_in = True
+ self.focused_in()
+
+ result = super(BaseWidget, self).mouseReleaseEvent(event)
+ if focused_in and not event.isAccepted():
+ event.accept()
+ return result
class InputWidget(BaseWidget):
@@ -337,6 +381,14 @@ class InputWidget(BaseWidget):
)
)
+ def make_sure_is_visible(self, path, scroll_to):
+ if path:
+ entity_path = self.entity.path
+ if entity_path == path:
+ self.set_focus(scroll_to)
+ return True
+ return False
+
def update_style(self):
has_unsaved_changes = self.entity.has_unsaved_changes
if not has_unsaved_changes and self.entity.group_item:
@@ -422,11 +474,20 @@ class GUIWidget(BaseWidget):
layout.addWidget(splitter_item)
def set_entity_value(self):
- return
+ pass
def hierarchical_style_update(self):
pass
+ def make_sure_is_visible(self, *args, **kwargs):
+ return False
+
+ def focused_in(self):
+ pass
+
+ def set_path(self, *args, **kwargs):
+ pass
+
def get_invalid(self):
return []
diff --git a/openpype/tools/settings/settings/breadcrumbs_widget.py b/openpype/tools/settings/settings/breadcrumbs_widget.py
new file mode 100644
index 0000000000..d25cbdc8cb
--- /dev/null
+++ b/openpype/tools/settings/settings/breadcrumbs_widget.py
@@ -0,0 +1,494 @@
+from Qt import QtWidgets, QtGui, QtCore
+
+PREFIX_ROLE = QtCore.Qt.UserRole + 1
+LAST_SEGMENT_ROLE = QtCore.Qt.UserRole + 2
+
+
+class BreadcrumbItem(QtGui.QStandardItem):
+ def __init__(self, *args, **kwargs):
+ self._display_value = None
+ self._edit_value = None
+ super(BreadcrumbItem, self).__init__(*args, **kwargs)
+
+ def data(self, role=None):
+ if role == QtCore.Qt.DisplayRole:
+ return self._display_value
+
+ if role == QtCore.Qt.EditRole:
+ return self._edit_value
+
+ if role is None:
+ args = tuple()
+ else:
+ args = (role, )
+ return super(BreadcrumbItem, self).data(*args)
+
+ def setData(self, value, role):
+ if role == QtCore.Qt.DisplayRole:
+ self._display_value = value
+ return True
+
+ if role == QtCore.Qt.EditRole:
+ self._edit_value = value
+ return True
+
+ if role is None:
+ args = (value, )
+ else:
+ args = (value, role)
+ return super(BreadcrumbItem, self).setData(*args)
+
+
+class BreadcrumbsModel(QtGui.QStandardItemModel):
+ def __init__(self):
+ super(BreadcrumbsModel, self).__init__()
+ self.current_path = ""
+
+ self.reset()
+
+ def reset(self):
+ return
+
+
+class SettingsBreadcrumbs(BreadcrumbsModel):
+ def __init__(self):
+ self.entity = None
+
+ self.entities_by_path = {}
+ self.dynamic_paths = set()
+
+ super(SettingsBreadcrumbs, self).__init__()
+
+ def set_entity(self, entity):
+ self.entities_by_path = {}
+ self.dynamic_paths = set()
+ self.entity = entity
+ self.reset()
+
+ def has_children(self, path):
+ for key in self.entities_by_path.keys():
+ if key.startswith(path):
+ return True
+ return False
+
+ def is_valid_path(self, path):
+ if not path:
+ return True
+
+ path_items = path.split("/")
+ try:
+ entity = self.entity
+ for item in path_items:
+ entity = entity[item]
+ except Exception:
+ return False
+ return True
+
+
+class SystemSettingsBreadcrumbs(SettingsBreadcrumbs):
+ def reset(self):
+ root_item = self.invisibleRootItem()
+ rows = root_item.rowCount()
+ if rows > 0:
+ root_item.removeRows(0, rows)
+
+ if self.entity is None:
+ return
+
+ entities_by_path = self.entity.collect_static_entities_by_path()
+ self.entities_by_path = entities_by_path
+ items = []
+ for path in entities_by_path.keys():
+ if not path:
+ continue
+ path_items = path.split("/")
+ value = path
+ label = path_items.pop(-1)
+ prefix = "/".join(path_items)
+ if prefix:
+ prefix += "/"
+
+ item = QtGui.QStandardItem(value)
+ item.setData(label, LAST_SEGMENT_ROLE)
+ item.setData(prefix, PREFIX_ROLE)
+
+ items.append(item)
+
+ root_item.appendRows(items)
+
+
+class ProjectSettingsBreadcrumbs(SettingsBreadcrumbs):
+ def reset(self):
+ root_item = self.invisibleRootItem()
+ rows = root_item.rowCount()
+ if rows > 0:
+ root_item.removeRows(0, rows)
+
+ if self.entity is None:
+ return
+
+ entities_by_path = self.entity.collect_static_entities_by_path()
+ self.entities_by_path = entities_by_path
+ items = []
+ for path in entities_by_path.keys():
+ if not path:
+ continue
+ path_items = path.split("/")
+ value = path
+ label = path_items.pop(-1)
+ prefix = "/".join(path_items)
+ if prefix:
+ prefix += "/"
+
+ item = QtGui.QStandardItem(value)
+ item.setData(label, LAST_SEGMENT_ROLE)
+ item.setData(prefix, PREFIX_ROLE)
+
+ items.append(item)
+
+ root_item.appendRows(items)
+
+
+class BreadcrumbsProxy(QtCore.QSortFilterProxyModel):
+ def __init__(self, *args, **kwargs):
+ super(BreadcrumbsProxy, self).__init__(*args, **kwargs)
+
+ self._current_path = ""
+
+ def set_path_prefix(self, prefix):
+ path = prefix
+ if not prefix.endswith("/"):
+ path_items = path.split("/")
+ if len(path_items) == 1:
+ path = ""
+ else:
+ path_items.pop(-1)
+ path = "/".join(path_items) + "/"
+
+ if path == self._current_path:
+ return
+
+ self._current_path = prefix
+
+ self.invalidateFilter()
+
+ def filterAcceptsRow(self, row, parent):
+ index = self.sourceModel().index(row, 0, parent)
+ prefix_path = index.data(PREFIX_ROLE)
+ return prefix_path == self._current_path
+
+
+class BreadcrumbsHintMenu(QtWidgets.QMenu):
+ def __init__(self, model, path_prefix, parent):
+ super(BreadcrumbsHintMenu, self).__init__(parent)
+
+ self._path_prefix = path_prefix
+ self._model = model
+
+ def showEvent(self, event):
+ self.clear()
+
+ self._model.set_path_prefix(self._path_prefix)
+
+ row_count = self._model.rowCount()
+ if row_count == 0:
+ action = self.addAction("* Nothing")
+ action.setData(".")
+ else:
+ for row in range(self._model.rowCount()):
+ index = self._model.index(row, 0)
+ label = index.data(LAST_SEGMENT_ROLE)
+ value = index.data(QtCore.Qt.EditRole)
+ action = self.addAction(label)
+ action.setData(value)
+
+ super(BreadcrumbsHintMenu, self).showEvent(event)
+
+
+class ClickableWidget(QtWidgets.QWidget):
+ clicked = QtCore.Signal()
+
+ def mouseReleaseEvent(self, event):
+ if event.button() == QtCore.Qt.LeftButton:
+ self.clicked.emit()
+ super(ClickableWidget, self).mouseReleaseEvent(event)
+
+
+class BreadcrumbsPathInput(QtWidgets.QLineEdit):
+ cancelled = QtCore.Signal()
+ confirmed = QtCore.Signal()
+
+ def __init__(self, model, proxy_model, parent):
+ super(BreadcrumbsPathInput, self).__init__(parent)
+
+ self.setObjectName("BreadcrumbsPathInput")
+
+ self.setFrame(False)
+
+ completer = QtWidgets.QCompleter(self)
+ completer.setCaseSensitivity(QtCore.Qt.CaseInsensitive)
+ completer.setModel(proxy_model)
+
+ popup = completer.popup()
+ popup.setUniformItemSizes(True)
+ popup.setLayoutMode(QtWidgets.QListView.Batched)
+
+ self.setCompleter(completer)
+
+ completer.activated.connect(self._on_completer_activated)
+ self.textEdited.connect(self._on_text_change)
+
+ self._completer = completer
+ self._model = model
+ self._proxy_model = proxy_model
+
+ self._context_menu_visible = False
+
+ def set_model(self, model):
+ self._model = model
+
+ def event(self, event):
+ if (
+ event.type() == QtCore.QEvent.KeyPress
+ and event.key() == QtCore.Qt.Key_Tab
+ ):
+ if self._model:
+ find_value = self.text() + "/"
+ if self._model.has_children(find_value):
+ self.insert("/")
+ else:
+ self._completer.popup().hide()
+ event.accept()
+ return True
+
+ return super(BreadcrumbsPathInput, self).event(event)
+
+ def keyPressEvent(self, event):
+ if event.key() == QtCore.Qt.Key_Escape:
+ self.cancelled.emit()
+ return
+
+ if event.key() in (QtCore.Qt.Key_Return, QtCore.Qt.Key_Enter):
+ self.confirmed.emit()
+ return
+
+ super(BreadcrumbsPathInput, self).keyPressEvent(event)
+
+ def focusOutEvent(self, event):
+ if not self._context_menu_visible:
+ self.cancelled.emit()
+
+ self._context_menu_visible = False
+ super(BreadcrumbsPathInput, self).focusOutEvent(event)
+
+ def contextMenuEvent(self, event):
+ self._context_menu_visible = True
+ super(BreadcrumbsPathInput, self).contextMenuEvent(event)
+
+ def _on_completer_activated(self, path):
+ self.confirmed.emit()
+
+ def _on_text_change(self, path):
+ self._proxy_model.set_path_prefix(path)
+
+
+class BreadcrumbsButton(QtWidgets.QToolButton):
+ path_selected = QtCore.Signal(str)
+
+ def __init__(self, path, model, parent):
+ super(BreadcrumbsButton, self).__init__(parent)
+
+ self.setObjectName("BreadcrumbsButton")
+
+ path_prefix = path
+ if path:
+ path_prefix += "/"
+
+ self.setAutoRaise(True)
+ self.setPopupMode(QtWidgets.QToolButton.MenuButtonPopup)
+
+ self.setMouseTracking(True)
+
+ if path:
+ self.setText(path.split("/")[-1])
+ else:
+ self.setProperty("empty", "1")
+
+ menu = BreadcrumbsHintMenu(model, path_prefix, self)
+
+ self.setMenu(menu)
+
+ # fixed size breadcrumbs
+ self.setMinimumSize(self.minimumSizeHint())
+ size_policy = self.sizePolicy()
+ size_policy.setVerticalPolicy(size_policy.Minimum)
+ self.setSizePolicy(size_policy)
+
+ menu.triggered.connect(self._on_menu_click)
+ # Don't allow to go to root with mouse click
+ if path:
+ self.clicked.connect(self._on_click)
+
+ self._path = path
+ self._path_prefix = path_prefix
+ self._model = model
+ self._menu = menu
+
+ def _on_click(self):
+ self.path_selected.emit(self._path)
+
+ def _on_menu_click(self, action):
+ item = action.data()
+ self.path_selected.emit(item)
+
+
+class BreadcrumbsAddressBar(QtWidgets.QFrame):
+ "Windows Explorer-like address bar"
+ path_changed = QtCore.Signal(str)
+ path_edited = QtCore.Signal(str)
+
+ def __init__(self, parent=None):
+ super(BreadcrumbsAddressBar, self).__init__(parent)
+
+ self.setAutoFillBackground(True)
+ self.setFrameShape(self.StyledPanel)
+
+ # Edit presented path textually
+ proxy_model = BreadcrumbsProxy()
+ path_input = BreadcrumbsPathInput(None, proxy_model, self)
+ path_input.setVisible(False)
+
+ path_input.cancelled.connect(self._on_input_cancel)
+ path_input.confirmed.connect(self._on_input_confirm)
+
+ # Container for `crumbs_panel`
+ crumbs_container = QtWidgets.QWidget(self)
+
+ # Container for breadcrumbs
+ crumbs_panel = QtWidgets.QWidget(crumbs_container)
+ crumbs_panel.setObjectName("BreadcrumbsPanel")
+
+ crumbs_layout = QtWidgets.QHBoxLayout()
+ crumbs_layout.setContentsMargins(0, 0, 0, 0)
+ crumbs_layout.setSpacing(0)
+
+ crumbs_cont_layout = QtWidgets.QHBoxLayout(crumbs_container)
+ crumbs_cont_layout.setContentsMargins(0, 0, 0, 0)
+ crumbs_cont_layout.setSpacing(0)
+ crumbs_cont_layout.addWidget(crumbs_panel)
+
+ # Clicking on empty space to the right puts the bar into edit mode
+ switch_space = ClickableWidget(self)
+
+ crumb_panel_layout = QtWidgets.QHBoxLayout(crumbs_panel)
+ crumb_panel_layout.setContentsMargins(0, 0, 0, 0)
+ crumb_panel_layout.setSpacing(0)
+ crumb_panel_layout.addLayout(crumbs_layout, 0)
+ crumb_panel_layout.addWidget(switch_space, 1)
+
+ switch_space.clicked.connect(self.switch_space_mouse_up)
+
+ layout = QtWidgets.QHBoxLayout(self)
+ layout.setContentsMargins(0, 0, 0, 0)
+ layout.setSpacing(0)
+ layout.addWidget(path_input)
+ layout.addWidget(crumbs_container)
+
+ self.setMaximumHeight(path_input.height())
+
+ self.crumbs_layout = crumbs_layout
+ self.crumbs_panel = crumbs_panel
+ self.switch_space = switch_space
+ self.path_input = path_input
+ self.crumbs_container = crumbs_container
+
+ self._model = None
+ self._proxy_model = proxy_model
+
+ self._current_path = None
+
+ def set_model(self, model):
+ self._model = model
+ self.path_input.set_model(model)
+ self._proxy_model.setSourceModel(model)
+
+ def _on_input_confirm(self):
+ self.change_path(self.path_input.text())
+
+ def _on_input_cancel(self):
+ self._cancel_edit()
+
+ def _clear_crumbs(self):
+ while self.crumbs_layout.count():
+ widget = self.crumbs_layout.takeAt(0).widget()
+ if widget:
+ widget.deleteLater()
+
+ def _insert_crumb(self, path):
+ btn = BreadcrumbsButton(path, self._proxy_model, self.crumbs_panel)
+
+ self.crumbs_layout.insertWidget(0, btn)
+
+ btn.path_selected.connect(self._on_crumb_clicked)
+
+ def _on_crumb_clicked(self, path):
+ "Breadcrumb was clicked"
+ self.change_path(path)
+
+ def change_path(self, path):
+ if self._model and not self._model.is_valid_path(path):
+ self._show_address_field()
+ else:
+ self.set_path(path)
+ self.path_edited.emit(path)
+
+ def set_path(self, path):
+ if path is None or path == ".":
+ path = self._current_path
+
+ # exit edit mode
+ self._cancel_edit()
+
+ self._clear_crumbs()
+ self._current_path = path
+ self.path_input.setText(path)
+ path_items = [
+ item
+ for item in path.split("/")
+ if item
+ ]
+ while path_items:
+ item = "/".join(path_items)
+ self._insert_crumb(item)
+ path_items.pop(-1)
+ self._insert_crumb("")
+
+ self.path_changed.emit(self._current_path)
+
+ def _cancel_edit(self):
+ "Set edit line text back to current path and switch to view mode"
+ # revert path
+ self.path_input.setText(self.path())
+ # switch back to breadcrumbs view
+ self._show_address_field(False)
+
+ def path(self):
+ "Get path displayed in this BreadcrumbsAddressBar"
+ return self._current_path
+
+ def switch_space_mouse_up(self):
+ "EVENT: switch_space mouse clicked"
+ self._show_address_field(True)
+
+ def _show_address_field(self, show=True):
+ "Show text address field"
+ self.crumbs_container.setVisible(not show)
+ self.path_input.setVisible(show)
+ if show:
+ self.path_input.setFocus()
+ self.path_input.selectAll()
+
+ def minimumSizeHint(self):
+ result = super(BreadcrumbsAddressBar, self).minimumSizeHint()
+ result.setHeight(self.path_input.minimumSizeHint().height())
+ return result
diff --git a/openpype/tools/settings/settings/categories.py b/openpype/tools/settings/settings/categories.py
index 8be3eddfa8..c420a8cdc5 100644
--- a/openpype/tools/settings/settings/categories.py
+++ b/openpype/tools/settings/settings/categories.py
@@ -31,6 +31,11 @@ from openpype.settings.entities import (
from openpype.settings import SaveWarningExc
from .widgets import ProjectListWidget
+from .breadcrumbs_widget import (
+ BreadcrumbsAddressBar,
+ SystemSettingsBreadcrumbs,
+ ProjectSettingsBreadcrumbs
+)
from .base import GUIWidget
from .list_item_widget import ListWidget
@@ -175,6 +180,16 @@ class SettingsCategoryWidget(QtWidgets.QWidget):
scroll_widget = QtWidgets.QScrollArea(self)
scroll_widget.setObjectName("GroupWidget")
content_widget = QtWidgets.QWidget(scroll_widget)
+
+ breadcrumbs_label = QtWidgets.QLabel("Path:", content_widget)
+ breadcrumbs_widget = BreadcrumbsAddressBar(content_widget)
+
+ breadcrumbs_layout = QtWidgets.QHBoxLayout()
+ breadcrumbs_layout.setContentsMargins(5, 5, 5, 5)
+ breadcrumbs_layout.setSpacing(5)
+ breadcrumbs_layout.addWidget(breadcrumbs_label)
+ breadcrumbs_layout.addWidget(breadcrumbs_widget)
+
content_layout = QtWidgets.QVBoxLayout(content_widget)
content_layout.setContentsMargins(3, 3, 3, 3)
content_layout.setSpacing(5)
@@ -183,40 +198,44 @@ class SettingsCategoryWidget(QtWidgets.QWidget):
scroll_widget.setWidgetResizable(True)
scroll_widget.setWidget(content_widget)
- configurations_widget = QtWidgets.QWidget(self)
-
- footer_widget = QtWidgets.QWidget(configurations_widget)
- footer_layout = QtWidgets.QHBoxLayout(footer_widget)
-
refresh_icon = qtawesome.icon("fa.refresh", color="white")
- refresh_btn = QtWidgets.QPushButton(footer_widget)
+ refresh_btn = QtWidgets.QPushButton(self)
refresh_btn.setIcon(refresh_icon)
- footer_layout.addWidget(refresh_btn, 0)
-
+ footer_layout = QtWidgets.QHBoxLayout()
+ footer_layout.setContentsMargins(5, 5, 5, 5)
if self.user_role == "developer":
self._add_developer_ui(footer_layout)
- save_btn = QtWidgets.QPushButton("Save", footer_widget)
- require_restart_label = QtWidgets.QLabel(footer_widget)
+ save_btn = QtWidgets.QPushButton("Save", self)
+ require_restart_label = QtWidgets.QLabel(self)
require_restart_label.setAlignment(QtCore.Qt.AlignCenter)
+
+ footer_layout.addWidget(refresh_btn, 0)
footer_layout.addWidget(require_restart_label, 1)
footer_layout.addWidget(save_btn, 0)
- configurations_layout = QtWidgets.QVBoxLayout(configurations_widget)
+ configurations_layout = QtWidgets.QVBoxLayout()
configurations_layout.setContentsMargins(0, 0, 0, 0)
configurations_layout.setSpacing(0)
configurations_layout.addWidget(scroll_widget, 1)
- configurations_layout.addWidget(footer_widget, 0)
+ configurations_layout.addLayout(footer_layout, 0)
- main_layout = QtWidgets.QHBoxLayout(self)
+ conf_wrapper_layout = QtWidgets.QHBoxLayout()
+ conf_wrapper_layout.setContentsMargins(0, 0, 0, 0)
+ conf_wrapper_layout.setSpacing(0)
+ conf_wrapper_layout.addLayout(configurations_layout, 1)
+
+ main_layout = QtWidgets.QVBoxLayout(self)
main_layout.setContentsMargins(0, 0, 0, 0)
main_layout.setSpacing(0)
- main_layout.addWidget(configurations_widget, 1)
+ main_layout.addLayout(breadcrumbs_layout, 0)
+ main_layout.addLayout(conf_wrapper_layout, 1)
save_btn.clicked.connect(self._save)
refresh_btn.clicked.connect(self._on_refresh)
+ breadcrumbs_widget.path_edited.connect(self._on_path_edit)
self.save_btn = save_btn
self.refresh_btn = refresh_btn
@@ -224,7 +243,9 @@ class SettingsCategoryWidget(QtWidgets.QWidget):
self.scroll_widget = scroll_widget
self.content_layout = content_layout
self.content_widget = content_widget
- self.configurations_widget = configurations_widget
+ self.breadcrumbs_widget = breadcrumbs_widget
+ self.breadcrumbs_model = None
+ self.conf_wrapper_layout = conf_wrapper_layout
self.main_layout = main_layout
self.ui_tweaks()
@@ -232,6 +253,23 @@ class SettingsCategoryWidget(QtWidgets.QWidget):
def ui_tweaks(self):
return
+ def _on_path_edit(self, path):
+ for input_field in self.input_fields:
+ if input_field.make_sure_is_visible(path, True):
+ break
+
+ def scroll_to(self, widget):
+ if widget:
+ # Process events which happened before ensurence
+ # - that is because some widgets could be not visible before
+ # this method was called and have incorrect size
+ QtWidgets.QApplication.processEvents()
+ # Scroll to widget
+ self.scroll_widget.ensureWidgetVisible(widget)
+
+ def set_path(self, path):
+ self.breadcrumbs_widget.set_path(path)
+
def _add_developer_ui(self, footer_layout):
modify_defaults_widget = QtWidgets.QWidget()
modify_defaults_checkbox = QtWidgets.QCheckBox(modify_defaults_widget)
@@ -427,10 +465,19 @@ class SettingsCategoryWidget(QtWidgets.QWidget):
def _on_reset_crash(self):
self.save_btn.setEnabled(False)
+ if self.breadcrumbs_model is not None:
+ self.breadcrumbs_model.set_entity(None)
+
def _on_reset_success(self):
if not self.save_btn.isEnabled():
self.save_btn.setEnabled(True)
+ if self.breadcrumbs_model is not None:
+ path = self.breadcrumbs_widget.path()
+ self.breadcrumbs_widget.set_path("")
+ self.breadcrumbs_model.set_entity(self.entity)
+ self.breadcrumbs_widget.change_path(path)
+
def add_children_gui(self):
for child_obj in self.entity.children:
item = self.create_ui_for_entity(self, child_obj, self)
@@ -521,6 +568,10 @@ class SystemWidget(SettingsCategoryWidget):
self.modify_defaults_checkbox.setChecked(True)
self.modify_defaults_checkbox.setEnabled(False)
+ def ui_tweaks(self):
+ self.breadcrumbs_model = SystemSettingsBreadcrumbs()
+ self.breadcrumbs_widget.set_model(self.breadcrumbs_model)
+
def _on_modify_defaults(self):
if self.modify_defaults_checkbox.isChecked():
if not self.entity.is_in_defaults_state():
@@ -535,9 +586,12 @@ class ProjectWidget(SettingsCategoryWidget):
self.project_name = None
def ui_tweaks(self):
+ self.breadcrumbs_model = ProjectSettingsBreadcrumbs()
+ self.breadcrumbs_widget.set_model(self.breadcrumbs_model)
+
project_list_widget = ProjectListWidget(self)
- self.main_layout.insertWidget(0, project_list_widget, 0)
+ self.conf_wrapper_layout.insertWidget(0, project_list_widget, 0)
project_list_widget.project_changed.connect(self._on_project_change)
diff --git a/openpype/tools/settings/settings/dict_conditional.py b/openpype/tools/settings/settings/dict_conditional.py
index 31a4fa9fab..3e3270cac9 100644
--- a/openpype/tools/settings/settings/dict_conditional.py
+++ b/openpype/tools/settings/settings/dict_conditional.py
@@ -213,6 +213,26 @@ class DictConditionalWidget(BaseWidget):
else:
body_widget.hide_toolbox(hide_content=False)
+ def make_sure_is_visible(self, path, scroll_to):
+ if not path:
+ return False
+
+ entity_path = self.entity.path
+ if entity_path == path:
+ self.set_focus(scroll_to)
+ return True
+
+ if not path.startswith(entity_path):
+ return False
+
+ if self.body_widget and not self.body_widget.is_expanded():
+ self.body_widget.toggle_content(True)
+
+ for input_field in self.input_fields:
+ if input_field.make_sure_is_visible(path, scroll_to):
+ return True
+ return False
+
def add_widget_to_layout(self, widget, label=None):
if not widget.entity:
map_id = widget.id
diff --git a/openpype/tools/settings/settings/dict_mutable_widget.py b/openpype/tools/settings/settings/dict_mutable_widget.py
index 3526dc60b5..ba86fe82dd 100644
--- a/openpype/tools/settings/settings/dict_mutable_widget.py
+++ b/openpype/tools/settings/settings/dict_mutable_widget.py
@@ -421,6 +421,9 @@ class ModifiableDictItem(QtWidgets.QWidget):
self.category_widget, self.entity, self
)
+ def make_sure_is_visible(self, *args, **kwargs):
+ return self.input_field.make_sure_is_visible(*args, **kwargs)
+
def get_style_state(self):
if self.is_invalid:
return "invalid"
@@ -961,6 +964,26 @@ class DictMutableKeysWidget(BaseWidget):
if changed:
self.on_shuffle()
+ def make_sure_is_visible(self, path, scroll_to):
+ if not path:
+ return False
+
+ entity_path = self.entity.path
+ if entity_path == path:
+ self.set_focus(scroll_to)
+ return True
+
+ if not path.startswith(entity_path):
+ return False
+
+ if self.body_widget and not self.body_widget.is_expanded():
+ self.body_widget.toggle_content(True)
+
+ for input_field in self.input_fields:
+ if input_field.make_sure_is_visible(path, scroll_to):
+ return True
+ return False
+
def set_entity_value(self):
while self.input_fields:
self.remove_row(self.input_fields[0])
diff --git a/openpype/tools/settings/settings/item_widgets.py b/openpype/tools/settings/settings/item_widgets.py
index 82afbb0a13..b2b129da86 100644
--- a/openpype/tools/settings/settings/item_widgets.py
+++ b/openpype/tools/settings/settings/item_widgets.py
@@ -6,8 +6,10 @@ from .widgets import (
ExpandingWidget,
NumberSpinBox,
GridLabelWidget,
- ComboBox,
- NiceCheckbox
+ SettingsComboBox,
+ NiceCheckbox,
+ SettingsPlainTextEdit,
+ SettingsLineEdit
)
from .multiselection_combobox import MultiSelectionComboBox
from .wrapper_widgets import (
@@ -46,6 +48,11 @@ class DictImmutableKeysWidget(BaseWidget):
self._ui_item_base()
label = self.entity.label
+ # Set stretch of second column to 1
+ if isinstance(self.content_layout, QtWidgets.QGridLayout):
+ self.content_layout.setColumnStretch(1, 1)
+
+ self._direct_children_widgets = []
self._parent_widget_by_entity_id = {}
self._added_wrapper_ids = set()
self._prepare_entity_layouts(
@@ -86,6 +93,25 @@ class DictImmutableKeysWidget(BaseWidget):
self._prepare_entity_layouts(child["children"], wrapper)
+ def set_focus(self, scroll_to=False):
+ """Set focus of a widget.
+
+ Args:
+ scroll_to(bool): Also scroll to widget in category widget.
+ """
+ if self.body_widget:
+ if scroll_to:
+ self.scroll_to(self.body_widget.top_part)
+ self.body_widget.top_part.setFocus()
+
+ else:
+ if scroll_to:
+ if not self.input_fields:
+ self.scroll_to(self)
+ else:
+ self.scroll_to(self.input_fields[0])
+ self.setFocus()
+
def _ui_item_base(self):
self.setObjectName("DictInvisible")
@@ -154,9 +180,41 @@ class DictImmutableKeysWidget(BaseWidget):
else:
body_widget.hide_toolbox(hide_content=False)
+ def make_sure_is_visible(self, path, scroll_to):
+ if not path:
+ return False
+
+ entity_path = self.entity.path
+ if entity_path == path:
+ self.set_focus(scroll_to)
+ return True
+
+ if not path.startswith(entity_path):
+ return False
+
+ is_checkbox_child = False
+ changed = False
+ for direct_child in self._direct_children_widgets:
+ if direct_child.make_sure_is_visible(path, scroll_to):
+ changed = True
+ if direct_child.entity is self.checkbox_child:
+ is_checkbox_child = True
+ break
+
+ # Change scroll to this widget
+ if is_checkbox_child:
+ self.scroll_to(self)
+
+ elif self.body_widget and not self.body_widget.is_expanded():
+ # Expand widget if is callapsible
+ self.body_widget.toggle_content(True)
+
+ return changed
+
def add_widget_to_layout(self, widget, label=None):
if self.checkbox_child and widget.entity is self.checkbox_child:
self.body_widget.add_widget_before_label(widget)
+ self._direct_children_widgets.append(widget)
return
if not widget.entity:
@@ -172,6 +230,8 @@ class DictImmutableKeysWidget(BaseWidget):
self._added_wrapper_ids.add(wrapper.id)
return
+ self._direct_children_widgets.append(widget)
+
row = self.content_layout.rowCount()
if not label or isinstance(widget, WrapperWidget):
self.content_layout.addWidget(widget, row, 0, 1, 2)
@@ -270,16 +330,17 @@ class BoolWidget(InputWidget):
height=checkbox_height, parent=self.content_widget
)
- spacer = QtWidgets.QWidget(self.content_widget)
- spacer.setAttribute(QtCore.Qt.WA_TranslucentBackground)
-
self.content_layout.addWidget(self.input_field, 0)
- self.content_layout.addWidget(spacer, 1)
+ self.content_layout.addStretch(1)
self.setFocusProxy(self.input_field)
+ self.input_field.focused_in.connect(self._on_input_focus)
self.input_field.stateChanged.connect(self._on_value_change)
+ def _on_input_focus(self):
+ self.focused_in()
+
def _on_entity_change(self):
if self.entity.value != self.input_field.isChecked():
self.set_entity_value()
@@ -297,9 +358,9 @@ class TextWidget(InputWidget):
def _add_inputs_to_layout(self):
multiline = self.entity.multiline
if multiline:
- self.input_field = QtWidgets.QPlainTextEdit(self.content_widget)
+ self.input_field = SettingsPlainTextEdit(self.content_widget)
else:
- self.input_field = QtWidgets.QLineEdit(self.content_widget)
+ self.input_field = SettingsLineEdit(self.content_widget)
placeholder_text = self.entity.placeholder_text
if placeholder_text:
@@ -313,8 +374,12 @@ class TextWidget(InputWidget):
self.content_layout.addWidget(self.input_field, 1, **layout_kwargs)
+ self.input_field.focused_in.connect(self._on_input_focus)
self.input_field.textChanged.connect(self._on_value_change)
+ def _on_input_focus(self):
+ self.focused_in()
+
def _on_entity_change(self):
if self.entity.value != self.input_value():
self.set_entity_value()
@@ -352,6 +417,10 @@ class NumberWidget(InputWidget):
self.content_layout.addWidget(self.input_field, 1)
self.input_field.valueChanged.connect(self._on_value_change)
+ self.input_field.focused_in.connect(self._on_input_focus)
+
+ def _on_input_focus(self):
+ self.focused_in()
def _on_entity_change(self):
if self.entity.value != self.input_field.value():
@@ -366,7 +435,7 @@ class NumberWidget(InputWidget):
self.entity.set(self.input_field.value())
-class RawJsonInput(QtWidgets.QPlainTextEdit):
+class RawJsonInput(SettingsPlainTextEdit):
tab_length = 4
def __init__(self, valid_type, *args, **kwargs):
@@ -428,15 +497,18 @@ class RawJsonWidget(InputWidget):
QtWidgets.QSizePolicy.Minimum,
QtWidgets.QSizePolicy.MinimumExpanding
)
-
self.setFocusProxy(self.input_field)
self.content_layout.addWidget(
self.input_field, 1, alignment=QtCore.Qt.AlignTop
)
+ self.input_field.focused_in.connect(self._on_input_focus)
self.input_field.textChanged.connect(self._on_value_change)
+ def _on_input_focus(self):
+ self.focused_in()
+
def set_entity_value(self):
self.input_field.set_value(self.entity.value)
self._is_invalid = self.input_field.has_invalid_value()
@@ -470,7 +542,7 @@ class EnumeratorWidget(InputWidget):
)
else:
- self.input_field = ComboBox(self.content_widget)
+ self.input_field = SettingsComboBox(self.content_widget)
for enum_item in self.entity.enum_items:
for value, label in enum_item.items():
@@ -480,8 +552,12 @@ class EnumeratorWidget(InputWidget):
self.setFocusProxy(self.input_field)
+ self.input_field.focused_in.connect(self._on_input_focus)
self.input_field.value_changed.connect(self._on_value_change)
+ def _on_input_focus(self):
+ self.focused_in()
+
def _on_entity_change(self):
if self.entity.value != self.input_field.value():
self.set_entity_value()
@@ -562,6 +638,9 @@ class PathWidget(BaseWidget):
def set_entity_value(self):
self.input_field.set_entity_value()
+ def make_sure_is_visible(self, *args, **kwargs):
+ return self.input_field.make_sure_is_visible(*args, **kwargs)
+
def hierarchical_style_update(self):
self.update_style()
self.input_field.hierarchical_style_update()
@@ -632,14 +711,19 @@ class PathWidget(BaseWidget):
class PathInputWidget(InputWidget):
def _add_inputs_to_layout(self):
- self.input_field = QtWidgets.QLineEdit(self.content_widget)
+ self.input_field = SettingsLineEdit(self.content_widget)
placeholder = self.entity.placeholder_text
if placeholder:
self.input_field.setPlaceholderText(placeholder)
self.setFocusProxy(self.input_field)
self.content_layout.addWidget(self.input_field)
+
self.input_field.textChanged.connect(self._on_value_change)
+ self.input_field.focused_in.connect(self._on_input_focus)
+
+ def _on_input_focus(self):
+ self.focused_in()
def _on_entity_change(self):
if self.entity.value != self.input_value():
diff --git a/openpype/tools/settings/settings/list_item_widget.py b/openpype/tools/settings/settings/list_item_widget.py
index c9df5caf01..17412a30b9 100644
--- a/openpype/tools/settings/settings/list_item_widget.py
+++ b/openpype/tools/settings/settings/list_item_widget.py
@@ -18,8 +18,6 @@ class EmptyListItem(QtWidgets.QWidget):
add_btn = QtWidgets.QPushButton("+", self)
remove_btn = QtWidgets.QPushButton("-", self)
- spacer_widget = QtWidgets.QWidget(self)
- spacer_widget.setAttribute(QtCore.Qt.WA_TranslucentBackground)
add_btn.setFocusPolicy(QtCore.Qt.ClickFocus)
remove_btn.setEnabled(False)
@@ -35,13 +33,12 @@ class EmptyListItem(QtWidgets.QWidget):
layout.setSpacing(3)
layout.addWidget(add_btn, 0)
layout.addWidget(remove_btn, 0)
- layout.addWidget(spacer_widget, 1)
+ layout.addStretch(1)
add_btn.clicked.connect(self._on_add_clicked)
self.add_btn = add_btn
self.remove_btn = remove_btn
- self.spacer_widget = spacer_widget
def _on_add_clicked(self):
self.entity_widget.add_new_item()
@@ -101,12 +98,6 @@ class ListItem(QtWidgets.QWidget):
self.category_widget, self.entity, self
)
- spacer_widget = QtWidgets.QWidget(self)
- spacer_widget.setAttribute(QtCore.Qt.WA_TranslucentBackground)
- spacer_widget.setVisible(False)
-
- layout.addWidget(spacer_widget, 1)
-
layout.addWidget(up_btn, 0)
layout.addWidget(down_btn, 0)
@@ -115,8 +106,6 @@ class ListItem(QtWidgets.QWidget):
self.up_btn = up_btn
self.down_btn = down_btn
- self.spacer_widget = spacer_widget
-
self._row = -1
self._is_last = False
@@ -129,6 +118,9 @@ class ListItem(QtWidgets.QWidget):
*args, **kwargs
)
+ def make_sure_is_visible(self, *args, **kwargs):
+ return self.input_field.make_sure_is_visible(*args, **kwargs)
+
@property
def is_invalid(self):
return self.input_field.is_invalid
@@ -275,6 +267,26 @@ class ListWidget(InputWidget):
invalid.extend(input_field.get_invalid())
return invalid
+ def make_sure_is_visible(self, path, scroll_to):
+ if not path:
+ return False
+
+ entity_path = self.entity.path
+ if entity_path == path:
+ self.set_focus(scroll_to)
+ return True
+
+ if not path.startswith(entity_path):
+ return False
+
+ if self.body_widget and not self.body_widget.is_expanded():
+ self.body_widget.toggle_content(True)
+
+ for input_field in self.input_fields:
+ if input_field.make_sure_is_visible(path, scroll_to):
+ return True
+ return False
+
def _on_entity_change(self):
# TODO do less inefficient
childen_order = []
diff --git a/openpype/tools/settings/settings/list_strict_widget.py b/openpype/tools/settings/settings/list_strict_widget.py
index 340db2e8c6..046b6992f6 100644
--- a/openpype/tools/settings/settings/list_strict_widget.py
+++ b/openpype/tools/settings/settings/list_strict_widget.py
@@ -65,6 +65,21 @@ class ListStrictWidget(BaseWidget):
invalid.extend(input_field.get_invalid())
return invalid
+ def make_sure_is_visible(self, path, scroll_to):
+ if not path:
+ return False
+
+ entity_path = self.entity.path
+ if entity_path == path:
+ self.set_focus(scroll_to)
+ return True
+
+ if path.startswith(entity_path):
+ for input_field in self.input_fields:
+ if input_field.make_sure_is_visible(path, scroll_to):
+ return True
+ return False
+
def add_widget_to_layout(self, widget, label=None):
# Horizontally added children
if self.entity.is_horizontal:
diff --git a/openpype/tools/settings/settings/multiselection_combobox.py b/openpype/tools/settings/settings/multiselection_combobox.py
index 30ecb7b84b..176f4cab8c 100644
--- a/openpype/tools/settings/settings/multiselection_combobox.py
+++ b/openpype/tools/settings/settings/multiselection_combobox.py
@@ -21,6 +21,8 @@ class ComboItemDelegate(QtWidgets.QStyledItemDelegate):
class MultiSelectionComboBox(QtWidgets.QComboBox):
value_changed = QtCore.Signal()
+ focused_in = QtCore.Signal()
+
ignored_keys = {
QtCore.Qt.Key_Up,
QtCore.Qt.Key_Down,
@@ -56,6 +58,10 @@ class MultiSelectionComboBox(QtWidgets.QComboBox):
self.lines = {}
self.item_height = None
+ def focusInEvent(self, event):
+ self.focused_in.emit()
+ return super(MultiSelectionComboBox, self).focusInEvent(event)
+
def mousePressEvent(self, event):
"""Reimplemented."""
self._popup_is_shown = False
diff --git a/openpype/tools/settings/settings/style/style.css b/openpype/tools/settings/settings/style/style.css
index 3ce9837a8b..250c15063f 100644
--- a/openpype/tools/settings/settings/style/style.css
+++ b/openpype/tools/settings/settings/style/style.css
@@ -388,4 +388,32 @@ QTableView::item:pressed, QListView::item:pressed, QTreeView::item:pressed {
QTableView::item:selected:active, QTreeView::item:selected:active, QListView::item:selected:active {
background: #3d8ec9;
-}
\ No newline at end of file
+}
+
+#BreadcrumbsPathInput {
+ padding: 2px;
+ font-size: 9pt;
+}
+
+#BreadcrumbsButton {
+ padding-right: 12px;
+ font-size: 9pt;
+}
+
+#BreadcrumbsButton[empty="1"] {
+ padding-right: 0px;
+}
+
+#BreadcrumbsButton::menu-button {
+ width: 12px;
+ background: rgba(127, 127, 127, 60);
+}
+#BreadcrumbsButton::menu-button:hover {
+ background: rgba(127, 127, 127, 90);
+}
+
+#BreadcrumbsPanel {
+ border: 1px solid #4e5254;
+ border-radius: 5px;
+ background: #21252B;;
+}
diff --git a/openpype/tools/settings/settings/widgets.py b/openpype/tools/settings/settings/widgets.py
index b20ce5ed66..b821c3bb2c 100644
--- a/openpype/tools/settings/settings/widgets.py
+++ b/openpype/tools/settings/settings/widgets.py
@@ -9,6 +9,22 @@ from avalon.mongodb import (
from openpype.settings.lib import get_system_settings
+class SettingsLineEdit(QtWidgets.QLineEdit):
+ focused_in = QtCore.Signal()
+
+ def focusInEvent(self, event):
+ super(SettingsLineEdit, self).focusInEvent(event)
+ self.focused_in.emit()
+
+
+class SettingsPlainTextEdit(QtWidgets.QPlainTextEdit):
+ focused_in = QtCore.Signal()
+
+ def focusInEvent(self, event):
+ super(SettingsPlainTextEdit, self).focusInEvent(event)
+ self.focused_in.emit()
+
+
class ShadowWidget(QtWidgets.QWidget):
def __init__(self, message, parent):
super(ShadowWidget, self).__init__(parent)
@@ -70,6 +86,8 @@ class IconButton(QtWidgets.QPushButton):
class NumberSpinBox(QtWidgets.QDoubleSpinBox):
+ focused_in = QtCore.Signal()
+
def __init__(self, *args, **kwargs):
min_value = kwargs.pop("minimum", -99999)
max_value = kwargs.pop("maximum", 99999)
@@ -80,6 +98,10 @@ class NumberSpinBox(QtWidgets.QDoubleSpinBox):
self.setMinimum(min_value)
self.setMaximum(max_value)
+ def focusInEvent(self, event):
+ super(NumberSpinBox, self).focusInEvent(event)
+ self.focused_in.emit()
+
def wheelEvent(self, event):
if self.hasFocus():
super(NumberSpinBox, self).wheelEvent(event)
@@ -93,18 +115,23 @@ class NumberSpinBox(QtWidgets.QDoubleSpinBox):
return output
-class ComboBox(QtWidgets.QComboBox):
+class SettingsComboBox(QtWidgets.QComboBox):
value_changed = QtCore.Signal()
+ focused_in = QtCore.Signal()
def __init__(self, *args, **kwargs):
- super(ComboBox, self).__init__(*args, **kwargs)
+ super(SettingsComboBox, self).__init__(*args, **kwargs)
self.currentIndexChanged.connect(self._on_change)
self.setFocusPolicy(QtCore.Qt.StrongFocus)
def wheelEvent(self, event):
if self.hasFocus():
- return super(ComboBox, self).wheelEvent(event)
+ return super(SettingsComboBox, self).wheelEvent(event)
+
+ def focusInEvent(self, event):
+ self.focused_in.emit()
+ return super(SettingsComboBox, self).focusInEvent(event)
def _on_change(self, *args, **kwargs):
self.value_changed.emit()
@@ -160,15 +187,13 @@ class ExpandingWidget(QtWidgets.QWidget):
after_label_layout = QtWidgets.QHBoxLayout(after_label_widget)
after_label_layout.setContentsMargins(0, 0, 0, 0)
- spacer_widget = QtWidgets.QWidget(side_line_widget)
-
side_line_layout = QtWidgets.QHBoxLayout(side_line_widget)
side_line_layout.setContentsMargins(5, 10, 0, 10)
side_line_layout.addWidget(button_toggle)
side_line_layout.addWidget(before_label_widget)
side_line_layout.addWidget(label_widget)
side_line_layout.addWidget(after_label_widget)
- side_line_layout.addWidget(spacer_widget, 1)
+ side_line_layout.addStretch(1)
top_part_layout = QtWidgets.QHBoxLayout(top_part)
top_part_layout.setContentsMargins(0, 0, 0, 0)
@@ -176,7 +201,6 @@ class ExpandingWidget(QtWidgets.QWidget):
before_label_widget.setAttribute(QtCore.Qt.WA_TranslucentBackground)
after_label_widget.setAttribute(QtCore.Qt.WA_TranslucentBackground)
- spacer_widget.setAttribute(QtCore.Qt.WA_TranslucentBackground)
label_widget.setAttribute(QtCore.Qt.WA_TranslucentBackground)
self.setAttribute(QtCore.Qt.WA_TranslucentBackground)
@@ -197,6 +221,8 @@ class ExpandingWidget(QtWidgets.QWidget):
self.main_layout.setSpacing(0)
self.main_layout.addWidget(top_part)
+ self.top_part = top_part
+
def hide_toolbox(self, hide_content=False):
self.button_toggle.setArrowType(QtCore.Qt.NoArrow)
self.toolbox_hidden = True
@@ -215,6 +241,9 @@ class ExpandingWidget(QtWidgets.QWidget):
self.main_layout.addWidget(content_widget)
self.content_widget = content_widget
+ def is_expanded(self):
+ return self.button_toggle.isChecked()
+
def _btn_clicked(self):
self.toggle_content(self.button_toggle.isChecked())
@@ -341,31 +370,21 @@ class GridLabelWidget(QtWidgets.QWidget):
self.properties = {}
+ label_widget = QtWidgets.QLabel(label, self)
+
+ label_proxy_layout = QtWidgets.QHBoxLayout()
+ label_proxy_layout.setContentsMargins(0, 0, 0, 0)
+ label_proxy_layout.setSpacing(0)
+
+ label_proxy_layout.addWidget(label_widget, 0, QtCore.Qt.AlignRight)
+
layout = QtWidgets.QVBoxLayout(self)
layout.setContentsMargins(0, 2, 0, 0)
layout.setSpacing(0)
- label_proxy = QtWidgets.QWidget(self)
+ layout.addLayout(label_proxy_layout, 0)
+ layout.addStretch(1)
- label_proxy_layout = QtWidgets.QHBoxLayout(label_proxy)
- label_proxy_layout.setContentsMargins(0, 0, 0, 0)
- label_proxy_layout.setSpacing(0)
-
- label_widget = QtWidgets.QLabel(label, label_proxy)
- spacer_widget_h = SpacerWidget(label_proxy)
- label_proxy_layout.addWidget(
- spacer_widget_h, 0, alignment=QtCore.Qt.AlignRight
- )
- label_proxy_layout.addWidget(
- label_widget, 0, alignment=QtCore.Qt.AlignRight
- )
-
- spacer_widget_v = SpacerWidget(self)
-
- layout.addWidget(label_proxy, 0)
- layout.addWidget(spacer_widget_v, 1)
-
- label_proxy.setAttribute(QtCore.Qt.WA_TranslucentBackground)
label_widget.setAttribute(QtCore.Qt.WA_TranslucentBackground)
self.label_widget = label_widget
@@ -380,6 +399,8 @@ class GridLabelWidget(QtWidgets.QWidget):
def mouseReleaseEvent(self, event):
if self.input_field:
+ if event and event.button() == QtCore.Qt.LeftButton:
+ self.input_field.focused_in()
return self.input_field.show_actions_menu(event)
return super(GridLabelWidget, self).mouseReleaseEvent(event)
@@ -440,6 +461,7 @@ class NiceCheckbox(QtWidgets.QFrame):
stateChanged = QtCore.Signal(int)
checked_bg_color = QtGui.QColor(69, 128, 86)
unchecked_bg_color = QtGui.QColor(170, 80, 80)
+ focused_in = QtCore.Signal()
def set_bg_color(self, color):
self._bg_color = color
@@ -564,6 +586,10 @@ class NiceCheckbox(QtWidgets.QFrame):
self._on_checkstate_change()
+ def mousePressEvent(self, event):
+ self.focused_in.emit()
+ super(NiceCheckbox, self).mousePressEvent(event)
+
def mouseReleaseEvent(self, event):
if event.button() == QtCore.Qt.LeftButton:
self.setCheckState()
diff --git a/openpype/tools/settings/settings/wrapper_widgets.py b/openpype/tools/settings/settings/wrapper_widgets.py
index 915a2cf875..b14a226912 100644
--- a/openpype/tools/settings/settings/wrapper_widgets.py
+++ b/openpype/tools/settings/settings/wrapper_widgets.py
@@ -19,6 +19,14 @@ class WrapperWidget(QtWidgets.QWidget):
self.create_ui()
+ def make_sure_is_visible(self, *args, **kwargs):
+ changed = False
+ for input_field in self.input_fields:
+ if input_field.make_sure_is_visible(*args, **kwargs):
+ changed = True
+ break
+ return changed
+
def create_ui(self):
raise NotImplementedError(
"{} does not have implemented `create_ui`.".format(
@@ -89,6 +97,14 @@ class CollapsibleWrapper(WrapperWidget):
else:
body_widget.hide_toolbox(hide_content=False)
+ def make_sure_is_visible(self, *args, **kwargs):
+ result = super(CollapsibleWrapper, self).make_sure_is_visible(
+ *args, **kwargs
+ )
+ if result:
+ self.body_widget.toggle_content(True)
+ return result
+
def add_widget_to_layout(self, widget, label=None):
self.input_fields.append(widget)
diff --git a/openpype/tools/tray/pype_tray.py b/openpype/tools/tray/pype_tray.py
index 794312f389..ed66f1a80f 100644
--- a/openpype/tools/tray/pype_tray.py
+++ b/openpype/tools/tray/pype_tray.py
@@ -15,11 +15,7 @@ from openpype.api import (
get_system_settings
)
from openpype.lib import get_pype_execute_args
-from openpype.modules import (
- TrayModulesManager,
- ITrayAction,
- ITrayService
-)
+from openpype.modules import TrayModulesManager
from openpype import style
from .pype_info_widget import PypeInfoWidget
@@ -80,6 +76,10 @@ class TrayManager:
def initialize_modules(self):
"""Add modules to tray."""
+ from openpype_interfaces import (
+ ITrayAction,
+ ITrayService
+ )
self.modules_manager.initialize(self, self.tray_widget.menu)
diff --git a/openpype/tools/tray_app/app.py b/openpype/tools/tray_app/app.py
index 339e6343f8..03f8321464 100644
--- a/openpype/tools/tray_app/app.py
+++ b/openpype/tools/tray_app/app.py
@@ -9,7 +9,7 @@ import itertools
from datetime import datetime
from avalon import style
-from openpype.modules.webserver import host_console_listener
+from openpype_modules.webserver import host_console_listener
from Qt import QtWidgets, QtCore
diff --git a/openpype/tools/workfiles/app.py b/openpype/tools/workfiles/app.py
index 42f0e422ae..b542e6e718 100644
--- a/openpype/tools/workfiles/app.py
+++ b/openpype/tools/workfiles/app.py
@@ -12,10 +12,15 @@ from avalon import style, io, api, pipeline
from avalon.tools import lib as tools_lib
from avalon.tools.widgets import AssetWidget
-from avalon.tools.models import TasksModel
from avalon.tools.delegates import PrettyTimeDelegate
-from .model import FilesModel
+from .model import (
+ TASK_NAME_ROLE,
+ TASK_TYPE_ROLE,
+ FilesModel,
+ TasksModel,
+ TasksProxyModel
+)
from .view import FilesView
from openpype.lib import (
@@ -23,7 +28,8 @@ from openpype.lib import (
get_workdir,
get_workfile_doc,
create_workfile_doc,
- save_workfile_data_to_doc
+ save_workfile_data_to_doc,
+ get_workfile_template_key
)
log = logging.getLogger(__name__)
@@ -55,9 +61,13 @@ class NameWindow(QtWidgets.QDialog):
# Set work file data for template formatting
asset_name = session["AVALON_ASSET"]
- project_doc = io.find_one({
- "type": "project"
- })
+ project_doc = io.find_one(
+ {"type": "project"},
+ {
+ "name": True,
+ "data.code": True
+ }
+ )
self.data = {
"project": {
"name": project_doc["name"],
@@ -126,10 +136,14 @@ class NameWindow(QtWidgets.QDialog):
# for "{version".
if "{version" in self.template:
inputs_layout.addRow("Version:", version_widget)
+ else:
+ version_widget.setVisible(False)
# Add subversion only if template containt `{comment}`
if "{comment}" in self.template:
inputs_layout.addRow("Subversion:", subversion_input)
+ else:
+ subversion_input.setVisible(False)
inputs_layout.addRow("Extension:", ext_combo)
inputs_layout.addRow("Preview:", preview_label)
@@ -305,48 +319,46 @@ class TasksWidget(QtWidgets.QWidget):
task_changed = QtCore.Signal()
- def __init__(self, parent=None):
+ def __init__(self, dbcon=None, parent=None):
super(TasksWidget, self).__init__(parent)
- self.setContentsMargins(0, 0, 0, 0)
- view = QtWidgets.QTreeView()
- view.setIndentation(0)
- model = TasksModel(io)
- view.setModel(model)
+ tasks_view = QtWidgets.QTreeView(self)
+ tasks_view.setIndentation(0)
+ tasks_view.setSortingEnabled(True)
+ if dbcon is None:
+ dbcon = io
+
+ tasks_model = TasksModel(dbcon)
+ tasks_proxy = TasksProxyModel()
+ tasks_proxy.setSourceModel(tasks_model)
+ tasks_view.setModel(tasks_proxy)
layout = QtWidgets.QVBoxLayout(self)
layout.setContentsMargins(0, 0, 0, 0)
- layout.addWidget(view)
+ layout.addWidget(tasks_view)
- # Hide the default tasks "count" as we don't need that data here.
- view.setColumnHidden(1, True)
+ selection_model = tasks_view.selectionModel()
+ selection_model.currentChanged.connect(self.task_changed)
- selection = view.selectionModel()
- selection.currentChanged.connect(self.task_changed)
-
- self.models = {
- "tasks": model
- }
-
- self.widgets = {
- "view": view,
- }
+ self._tasks_model = tasks_model
+ self._tasks_proxy = tasks_proxy
+ self._tasks_view = tasks_view
self._last_selected_task = None
- def set_asset(self, asset):
- if asset is None:
- # Asset deselected
+ def set_asset(self, asset_doc):
+ # Asset deselected
+ if asset_doc is None:
return
# Try and preserve the last selected task and reselect it
# after switching assets. If there's no currently selected
# asset keep whatever the "last selected" was prior to it.
- current = self.get_current_task()
+ current = self.get_current_task_name()
if current:
self._last_selected_task = current
- self.models["tasks"].set_assets(asset_docs=[asset])
+ self._tasks_model.set_asset(asset_doc)
if self._last_selected_task:
self.select_task(self._last_selected_task)
@@ -354,7 +366,7 @@ class TasksWidget(QtWidgets.QWidget):
# Force a task changed emit.
self.task_changed.emit()
- def select_task(self, task):
+ def select_task(self, task_name):
"""Select a task by name.
If the task does not exist in the current model then selection is only
@@ -366,39 +378,40 @@ class TasksWidget(QtWidgets.QWidget):
"""
# Clear selection
- view = self.widgets["view"]
- model = view.model()
- selection_model = view.selectionModel()
+ selection_model = self._tasks_view.selectionModel()
selection_model.clearSelection()
# Select the task
mode = selection_model.Select | selection_model.Rows
- for row in range(model.rowCount(QtCore.QModelIndex())):
- index = model.index(row, 0, QtCore.QModelIndex())
- name = index.data(QtCore.Qt.DisplayRole)
- if name == task:
+ for row in range(self._tasks_model.rowCount()):
+ index = self._tasks_model.index(row, 0)
+ name = index.data(TASK_NAME_ROLE)
+ if name == task_name:
selection_model.select(index, mode)
# Set the currently active index
- view.setCurrentIndex(index)
+ self._tasks_view.setCurrentIndex(index)
+ break
- def get_current_task(self):
+ def get_current_task_name(self):
"""Return name of task at current index (selected)
Returns:
str: Name of the current task.
"""
- view = self.widgets["view"]
- index = view.currentIndex()
- index = index.sibling(index.row(), 0) # ensure column zero for name
+ index = self._tasks_view.currentIndex()
+ selection_model = self._tasks_view.selectionModel()
+ if index.isValid() and selection_model.isSelected(index):
+ return index.data(TASK_NAME_ROLE)
+ return None
- selection = view.selectionModel()
- if selection.isSelected(index):
- # Ignore when the current task is not selected as the "No task"
- # placeholder might be the current index even though it's
- # disallowed to be selected. So we only return if it is selected.
- return index.data(QtCore.Qt.DisplayRole)
+ def get_current_task_type(self):
+ index = self._tasks_view.currentIndex()
+ selection_model = self._tasks_view.selectionModel()
+ if index.isValid() and selection_model.isSelected(index):
+ return index.data(TASK_TYPE_ROLE)
+ return None
class FilesWidget(QtWidgets.QWidget):
@@ -411,7 +424,8 @@ class FilesWidget(QtWidgets.QWidget):
# Setup
self._asset = None
- self._task = None
+ self._task_name = None
+ self._task_type = None
# Pype's anatomy object for current project
self.anatomy = Anatomy(io.Session["AVALON_PROJECT"])
@@ -506,14 +520,15 @@ class FilesWidget(QtWidgets.QWidget):
self.btn_browse = btn_browse
self.btn_save = btn_save
- def set_asset_task(self, asset, task):
+ def set_asset_task(self, asset, task_name, task_type):
self._asset = asset
- self._task = task
+ self._task_name = task_name
+ self._task_type = task_type
# Define a custom session so we can query the work root
# for a "Work area" that is not our current Session.
# This way we can browse it even before we enter it.
- if self._asset and self._task:
+ if self._asset and self._task_name and self._task_type:
session = self._get_session()
self.root = self.host.work_root(session)
self.files_model.set_root(self.root)
@@ -533,10 +548,16 @@ class FilesWidget(QtWidgets.QWidget):
"""Return a modified session for the current asset and task"""
session = api.Session.copy()
+ self.template_key = get_workfile_template_key(
+ self._task_type,
+ session["AVALON_APP"],
+ project_name=session["AVALON_PROJECT"]
+ )
changes = pipeline.compute_session_changes(
session,
asset=self._asset,
- task=self._task
+ task=self._task_name,
+ template_key=self.template_key
)
session.update(changes)
@@ -549,14 +570,19 @@ class FilesWidget(QtWidgets.QWidget):
changes = pipeline.compute_session_changes(
session,
asset=self._asset,
- task=self._task
+ task=self._task_name,
+ template_key=self.template_key
)
if not changes:
# Return early if we're already in the right Session context
# to avoid any unwanted Task Changed callbacks to be triggered.
return
- api.update_current_task(asset=self._asset, task=self._task)
+ api.update_current_task(
+ asset=self._asset,
+ task=self._task_name,
+ template_key=self.template_key
+ )
def open_file(self, filepath):
host = self.host
@@ -606,7 +632,7 @@ class FilesWidget(QtWidgets.QWidget):
result = messagebox.exec_()
if result == messagebox.Yes:
return True
- elif result == messagebox.No:
+ if result == messagebox.No:
return False
return None
@@ -700,7 +726,7 @@ class FilesWidget(QtWidgets.QWidget):
self._enter_session() # Make sure we are in the right session
self.host.save_file(file_path)
- self.set_asset_task(self._asset, self._task)
+ self.set_asset_task(self._asset, self._task_name, self._task_type)
pipeline.emit("after.workfile.save", [file_path])
@@ -727,7 +753,8 @@ class FilesWidget(QtWidgets.QWidget):
changes = pipeline.compute_session_changes(
session,
asset=self._asset,
- task=self._task
+ task=self._task_name,
+ template_key=self.template_key
)
session.update(changes)
@@ -750,7 +777,7 @@ class FilesWidget(QtWidgets.QWidget):
# Force a full to the asset as opposed to just self.refresh() so
# that it will actually check again whether the Work directory exists
- self.set_asset_task(self._asset, self._task)
+ self.set_asset_task(self._asset, self._task_name, self._task_type)
def refresh(self):
"""Refresh listed files for current selection in the interface"""
@@ -927,7 +954,7 @@ class Window(QtWidgets.QMainWindow):
assets_widget = AssetWidget(io, parent=home_body_widget)
assets_widget.set_current_asset_btn_visibility(True)
- tasks_widget = TasksWidget(home_body_widget)
+ tasks_widget = TasksWidget(io, home_body_widget)
files_widget = FilesWidget(home_body_widget)
side_panel = SidePanelWidget(home_body_widget)
@@ -999,7 +1026,7 @@ class Window(QtWidgets.QMainWindow):
if asset_docs:
asset_doc = asset_docs[0]
- task_name = self.tasks_widget.get_current_task()
+ task_name = self.tasks_widget.get_current_task_name()
workfile_doc = None
if asset_doc and task_name and filepath:
@@ -1026,7 +1053,7 @@ class Window(QtWidgets.QMainWindow):
def _get_current_workfile_doc(self, filepath=None):
if filepath is None:
filepath = self.files_widget._get_selected_filepath()
- task_name = self.tasks_widget.get_current_task()
+ task_name = self.tasks_widget.get_current_task_name()
asset_docs = self.assets_widget.get_selected_assets()
if not task_name or not asset_docs or not filepath:
return
@@ -1046,7 +1073,7 @@ class Window(QtWidgets.QMainWindow):
workdir, filename = os.path.split(filepath)
asset_docs = self.assets_widget.get_selected_assets()
asset_doc = asset_docs[0]
- task_name = self.tasks_widget.get_current_task()
+ task_name = self.tasks_widget.get_current_task_name()
create_workfile_doc(asset_doc, task_name, filename, workdir, io)
def set_context(self, context):
@@ -1065,7 +1092,6 @@ class Window(QtWidgets.QMainWindow):
# Select the asset
self.assets_widget.select_assets([asset], expand=True)
- # Force a refresh on Tasks?
self.tasks_widget.set_asset(asset_document)
if "task" in context:
@@ -1095,12 +1121,13 @@ class Window(QtWidgets.QMainWindow):
asset = self.assets_widget.get_selected_assets() or None
if asset is not None:
asset = asset[0]
- task = self.tasks_widget.get_current_task()
+ task_name = self.tasks_widget.get_current_task_name()
+ task_type = self.tasks_widget.get_current_task_type()
self.tasks_widget.setEnabled(bool(asset))
- self.files_widget.setEnabled(all([bool(task), bool(asset)]))
- self.files_widget.set_asset_task(asset, task)
+ self.files_widget.setEnabled(all([bool(task_name), bool(asset)]))
+ self.files_widget.set_asset_task(asset, task_name, task_type)
self.files_widget.refresh()
diff --git a/openpype/tools/workfiles/model.py b/openpype/tools/workfiles/model.py
index 368988fd4e..92fbf76b95 100644
--- a/openpype/tools/workfiles/model.py
+++ b/openpype/tools/workfiles/model.py
@@ -1,7 +1,7 @@
import os
import logging
-from Qt import QtCore
+from Qt import QtCore, QtGui
from avalon import style
from avalon.vendor import qtawesome
@@ -9,6 +9,10 @@ from avalon.tools.models import TreeModel, Item
log = logging.getLogger(__name__)
+TASK_NAME_ROLE = QtCore.Qt.UserRole + 1
+TASK_TYPE_ROLE = QtCore.Qt.UserRole + 2
+TASK_ORDER_ROLE = QtCore.Qt.UserRole + 3
+
class FilesModel(TreeModel):
"""Model listing files with specified extensions in a root folder"""
@@ -151,3 +155,142 @@ class FilesModel(TreeModel):
return "Date modified"
return super(FilesModel, self).headerData(section, orientation, role)
+
+
+class TasksProxyModel(QtCore.QSortFilterProxyModel):
+ def lessThan(self, x_index, y_index):
+ x_order = x_index.data(TASK_ORDER_ROLE)
+ y_order = y_index.data(TASK_ORDER_ROLE)
+ if x_order is not None and y_order is not None:
+ if x_order < y_order:
+ return True
+ if x_order > y_order:
+ return False
+
+ elif x_order is None and y_order is not None:
+ return True
+
+ elif y_order is None and x_order is not None:
+ return False
+
+ x_name = x_index.data(QtCore.Qt.DisplayRole)
+ y_name = y_index.data(QtCore.Qt.DisplayRole)
+ if x_name == y_name:
+ return True
+
+ if x_name == tuple(sorted((x_name, y_name)))[0]:
+ return False
+ return True
+
+
+class TasksModel(QtGui.QStandardItemModel):
+ """A model listing the tasks combined for a list of assets"""
+ def __init__(self, dbcon, parent=None):
+ super(TasksModel, self).__init__(parent=parent)
+ self.dbcon = dbcon
+ self._default_icon = qtawesome.icon(
+ "fa.male",
+ color=style.colors.default
+ )
+ self._no_tasks_icon = qtawesome.icon(
+ "fa.exclamation-circle",
+ color=style.colors.mid
+ )
+ self._cached_icons = {}
+ self._project_task_types = {}
+
+ self._refresh_task_types()
+
+ def _refresh_task_types(self):
+ # Get the project configured icons from database
+ project = self.dbcon.find_one(
+ {"type": "project"},
+ {"config.tasks"}
+ )
+ tasks = project["config"].get("tasks") or {}
+ self._project_task_types = tasks
+
+ def _try_get_awesome_icon(self, icon_name):
+ icon = None
+ if icon_name:
+ try:
+ icon = qtawesome.icon(
+ "fa.{}".format(icon_name),
+ color=style.colors.default
+ )
+
+ except Exception:
+ pass
+ return icon
+
+ def headerData(self, section, orientation, role):
+ # Show nice labels in the header
+ if (
+ role == QtCore.Qt.DisplayRole
+ and orientation == QtCore.Qt.Horizontal
+ ):
+ if section == 0:
+ return "Tasks"
+
+ return super(TasksModel, self).headerData(section, orientation, role)
+
+ def _get_icon(self, task_icon, task_type_icon):
+ if task_icon in self._cached_icons:
+ return self._cached_icons[task_icon]
+
+ icon = self._try_get_awesome_icon(task_icon)
+ if icon is not None:
+ self._cached_icons[task_icon] = icon
+ return icon
+
+ if task_type_icon in self._cached_icons:
+ icon = self._cached_icons[task_type_icon]
+ self._cached_icons[task_icon] = icon
+ return icon
+
+ icon = self._try_get_awesome_icon(task_type_icon)
+ if icon is None:
+ icon = self._default_icon
+
+ self._cached_icons[task_icon] = icon
+ self._cached_icons[task_type_icon] = icon
+
+ return icon
+
+ def set_asset(self, asset_doc):
+ """Set assets to track by their database id
+
+ Arguments:
+ asset_doc (dict): Asset document from MongoDB.
+ """
+ self.clear()
+
+ if not asset_doc:
+ return
+
+ asset_tasks = asset_doc.get("data", {}).get("tasks") or {}
+ items = []
+ for task_name, task_info in asset_tasks.items():
+ task_icon = task_info.get("icon")
+ task_type = task_info.get("type")
+ task_order = task_info.get("order")
+ task_type_info = self._project_task_types.get(task_type) or {}
+ task_type_icon = task_type_info.get("icon")
+ icon = self._get_icon(task_icon, task_type_icon)
+
+ label = "{} ({})".format(task_name, task_type or "type N/A")
+ item = QtGui.QStandardItem(label)
+ item.setData(task_name, TASK_NAME_ROLE)
+ item.setData(task_type, TASK_TYPE_ROLE)
+ item.setData(task_order, TASK_ORDER_ROLE)
+ item.setData(icon, QtCore.Qt.DecorationRole)
+ item.setFlags(QtCore.Qt.ItemIsEnabled | QtCore.Qt.ItemIsSelectable)
+ items.append(item)
+
+ if not items:
+ item = QtGui.QStandardItem("No task")
+ item.setData(self._no_tasks_icon, QtCore.Qt.DecorationRole)
+ item.setFlags(QtCore.Qt.NoItemFlags)
+ items.append(item)
+
+ self.invisibleRootItem().appendRows(items)
diff --git a/openpype/version.py b/openpype/version.py
index c4bd5a14cb..17bd0ff892 100644
--- a/openpype/version.py
+++ b/openpype/version.py
@@ -1,3 +1,3 @@
# -*- coding: utf-8 -*-
"""Package declaring Pype version."""
-__version__ = "3.3.0-nightly.9"
+__version__ = "3.4.0-nightly.4"
diff --git a/repos/avalon-core b/repos/avalon-core
index e5c8a15fde..f48fce09c0 160000
--- a/repos/avalon-core
+++ b/repos/avalon-core
@@ -1 +1 @@
-Subproject commit e5c8a15fde77708c924eab3018bda255f17b5390
+Subproject commit f48fce09c0986c1fd7f6731de33907be46b436c5
diff --git a/tools/build_dependencies.py b/tools/build_dependencies.py
index 3898450471..e5a430e220 100644
--- a/tools/build_dependencies.py
+++ b/tools/build_dependencies.py
@@ -135,6 +135,16 @@ progress_bar.close()
# iterate over frozen libs and create list to delete
libs_dir = build_dir / "lib"
+# On Windows "python3.dll" is needed for PyQt5 from the build.
+if platform.system().lower() == "windows":
+ src = Path(libs_dir / "PyQt5" / "python3.dll")
+ dst = Path(deps_dir / "PyQt5" / "python3.dll")
+ if src.exists():
+ shutil.copyfile(src, dst)
+ else:
+ _print("Could not find {}".format(src), 1)
+ sys.exit(1)
+
to_delete = []
# _print("Finding duplicates ...")
deps_items = list(deps_dir.iterdir())
diff --git a/tools/ci_tools.py b/tools/ci_tools.py
index 436551c243..3c1aaae991 100644
--- a/tools/ci_tools.py
+++ b/tools/ci_tools.py
@@ -36,7 +36,7 @@ def get_log_since_tag(version):
def release_type(log):
regex_minor = ["feature/", "(feat)"]
- regex_patch = ["bugfix/", "fix/", "(fix)"]
+ regex_patch = ["bugfix/", "fix/", "(fix)", "enhancement/"]
for reg in regex_minor:
if re.search(reg, log):
return "minor"
diff --git a/vendor/deadline/custom/plugins/GlobalJobPreLoad.py b/vendor/deadline/custom/plugins/GlobalJobPreLoad.py
index 41df9d4dc9..8631b035cf 100644
--- a/vendor/deadline/custom/plugins/GlobalJobPreLoad.py
+++ b/vendor/deadline/custom/plugins/GlobalJobPreLoad.py
@@ -55,9 +55,9 @@ def inject_openpype_environment(deadlinePlugin):
"AVALON_TASK, AVALON_APP_NAME"
raise RuntimeError(msg)
- print("args::{}".format(args))
+ print("args:::{}".format(args))
- exit_code = subprocess.call(args, shell=True)
+ exit_code = subprocess.call(args, cwd=os.path.dirname(openpype_app))
if exit_code != 0:
raise RuntimeError("Publishing failed, check worker's log")
diff --git a/website/docs/admin_webserver_for_webpublisher.md b/website/docs/admin_webserver_for_webpublisher.md
new file mode 100644
index 0000000000..6e72ccaf32
--- /dev/null
+++ b/website/docs/admin_webserver_for_webpublisher.md
@@ -0,0 +1,83 @@
+---
+id: admin_webserver_for_webpublisher
+title: Webserver for webpublisher
+sidebar_label: Webserver for webpublisher
+---
+
+import Tabs from '@theme/Tabs';
+import TabItem from '@theme/TabItem';
+
+Running Openpype webserver is needed as a backend part for Web publishing.
+Any OS supported by Openpype could be used as a host server.
+
+Webpublishing consists of two sides, Front end (FE) and Openpype backend. This documenation is only targeted on OP side.
+
+It is expected that FE and OP will live on two separate servers, FE publicly available, OP safely in customer network.
+
+# Requirements for servers
+- OP server allows access to its `8079` port for FE. (It is recommended to whitelist only FE IP.)
+- have shared folder for published resources (images, workfiles etc) on both servers
+
+# Prepare Ftrack
+Current webpublish process expects authentication via Slack. It is expected that customer has users created on a Ftrack
+with same email addresses as on Slack. As some customer might have usernames different from emails, conversion from email to username is needed.
+
+For this "pype.club" user needs to be present on Ftrack, creation of this user should be standard part of Ftrack preparation for Openpype.
+Next create API key on Ftrack, store this information temporarily as you won't have access to this key after creation.
+
+
+# Prepare Openpype
+
+Deploy OP build distribution (Openpype Igniter) on an OS of your choice.
+
+##Run webserver as a Linux service:
+
+(This expects that OP Igniter is deployed to `opt/openpype` and log should be stored in `/tmp/openpype.log`)
+
+- create file `sudo vi /opt/openpype/webpublisher_webserver.sh`
+
+- paste content
+```sh
+#!/usr/bin/env bash
+export OPENPYPE_DEBUG=3
+export FTRACK_BOT_API_USER=YOUR_API_USER
+export FTRACK_BOT_API_KEY=YOUR_API_KEY
+export PYTHONDONTWRITEBYTECODE=1
+export OPENPYPE_MONGO=YOUR_MONGODB_CONNECTION
+
+pushd /opt/openpype
+./openpype_console webpublisherwebserver --upload_dir YOUR_SHARED_FOLDER_ON_HOST --executable /opt/openpype/openpype_console --host YOUR_HOST_IP --port YOUR_HOST_PORT > /tmp/openpype.log 2>&1
+```
+
+1. create service file `sudo vi /etc/systemd/system/openpye-webserver.service`
+
+2. paste content
+```sh
+[Unit]
+Description=Run OpenPype Ftrack Webserver Service
+After=network.target
+
+[Service]
+Type=idle
+ExecStart=/opt/openpype/webpublisher_webserver.sh
+Restart=on-failure
+RestartSec=10s
+StandardOutput=append:/tmp/openpype.log
+StandardError=append:/tmp/openpype.log
+
+[Install]
+WantedBy=multi-user.target
+```
+
+5. change file permission:
+ `sudo chmod 0755 /etc/systemd/system/openpype-webserver.service`
+
+6. enable service:
+ `sudo systemctl enable openpype-webserver`
+
+7. start service:
+ `sudo systemctl start openpype-webserver`
+
+8. Check `/tmp/openpype.log` if OP got started
+
+(Note: service could be restarted by `service openpype-webserver restart` - this will result in purge of current log file!)
\ No newline at end of file
diff --git a/website/docs/artist_hosts_houdini.md b/website/docs/artist_hosts_houdini.md
new file mode 100644
index 0000000000..d2aadf05cb
--- /dev/null
+++ b/website/docs/artist_hosts_houdini.md
@@ -0,0 +1,78 @@
+---
+id: artist_hosts_houdini
+title: Houdini
+sidebar_label: Houdini
+---
+
+## OpenPype global tools
+
+- [Work Files](artist_tools.md#workfiles)
+- [Create](artist_tools.md#creator)
+- [Load](artist_tools.md#loader)
+- [Manage (Inventory)](artist_tools.md#inventory)
+- [Publish](artist_tools.md#publisher)
+- [Library Loader](artist_tools.md#library-loader)
+
+## Publishing Alembic Cameras
+You can publish baked camera in Alembic format. Select your camera and go **OpenPype -> Create** and select **Camera (abc)**.
+This will create Alembic ROP in **out** with path and frame range already set. This node will have a name you've
+assigned in the **Creator** menu. For example if you name the subset `Default`, output Alembic Driver will be named
+`cameraDefault`. After that, you can **OpenPype -> Publish** and after some validations your camera will be published
+to `abc` file.
+
+## Publishing Composites - Image Sequences
+You can publish image sequence directly from Houdini. You can use any `cop` network you have and publish image
+sequence generated from it. For example I've created simple **cop** graph to generate some noise:
+
+
+If I want to publish it, I'll select node I like - in this case `radialblur1` and go **OpenPype -> Create** and
+select **Composite (Image Sequence)**. This will create `/out/imagesequenceNoise` Composite ROP (I've named my subset
+*Noise*) with frame range set. When you hit **Publish** it will render image sequence from selected node.
+
+## Publishing Point Caches (alembic)
+Publishing point caches in alembic format is pretty straightforward, but it is by default enforcing better compatibility
+with other DCCs, so it needs data do be exported prepared in certain way. You need to add `path` attribute so objects
+in alembic are better structured. When using alembic round trip in Houdini (loading alembics, modifying then and
+then publishing modifications), `path` is automatically resolved by alembic nodes.
+
+In this example, I've created this node graph on **sop** level, and I want to publish it as point cache.
+
+
+
+*Note: `connectivity` will add index for each primitive and `primitivewrangle1` will add `path` attribute, so it will
+be for each primitive (`sphere1` and `sphere2`) as Maya is expecting - `strange_GRP/strange0_GEO/strange0_GEOShape`. How
+you handle `path` attribute is up to you, this is just an example.*
+
+Now select the `output0` node and go **OpenPype -> Create** and select **Point Cache**. It will create
+Alembic ROP `/out/pointcacheStrange`
+
+
+## Redshift
+:::note Work in progress
+This part of documentation is still work in progress.
+:::
+
+## USD (experimental support)
+### Publishing USD
+You can publish your Solaris Stage as USD file.
+
+
+This is very simple test stage. I've selected `output` **lop** node and went to **OpenPype -> Create** where I've
+selected **USD**. This created `/out/usdDefault` USD ROP node.
+
+### Publishing USD render
+
+USD Render works in similar manner as USD file, except it will create **USD Render** ROP node in out and will publish
+images produced by it. If you have selected node in Solaris Stage it will by added as **lop path** to ROP.
+
+## Publishing VDB
+
+Publishing VDB files works as with other data types. In this example I've created simple PyroFX explosion from
+sphere. In `pyro_import` I've converted the volume to VDB:
+
+
+
+I've selected `vdb1` and went **OpenPype -> Create** and selected **VDB Cache**. This will create
+geometry ROP in `/out` and sets its paths to output vdb files. During the publishing process
+whole dops are cooked.
+
diff --git a/website/docs/artist_hosts_maya.md b/website/docs/artist_hosts_maya.md
index 6fbd59ae1e..6387da4adc 100644
--- a/website/docs/artist_hosts_maya.md
+++ b/website/docs/artist_hosts_maya.md
@@ -701,6 +701,32 @@ under `input_SET`). This mechanism uses *cbId* attribute on those shapes.
If match is found shapes are connected using their `outMesh` and `outMesh`. Thus you can easily connect existing animation to loaded rig.
:::
+## Working with Xgen in OpenPype
+
+OpenPype support publishing and loading of Xgen interactive grooms. You can publish
+them as mayaAscii files with scalps that can be loaded into another maya scene, or as
+alembic caches.
+
+### Publishing Xgen Grooms
+
+To prepare xgen for publishing just select all the descriptions that should be published together and the create Xgen Subset in the scene using - **OpenPype menu** β **Create**... and select **Xgen Interactive**. Leave Use selection checked.
+
+For actual publishing of your groom to go **OpenPype β Publish** and then press βΆ to publish. This will export `.ma` file containing your grooms with any geometries they are attached to and also a baked cache in `.abc` format
+
+
+:::tip adding more descriptions
+You can add multiple xgen desctiption into the subset you are about to publish, simply by
+adding them to the maya set that was created for you. Please make sure that only xgen description nodes are present inside of the set and not the scalp geometry.
+:::
+
+### Loading Xgen
+
+You can use published xgens by loading them using OpenPype Publisher. You can choose to reference or import xgen. We don't have any automatic mesh linking at the moment and it is expected, that groom is published with a scalp, that can then be manually attached to your animated mesh for example.
+
+The alembic representation can be loaded too and it contains the groom converted to curves. Keep in mind that the density of the alembic directly depends on your viewport xgen density at the point of export.
+
+
+
## Using Redshift Proxies
OpenPype supports working with Redshift Proxy files. You can create Redshift Proxy from almost
diff --git a/website/docs/assets/houdini_imagesequence_cop.png b/website/docs/assets/houdini_imagesequence_cop.png
new file mode 100644
index 0000000000..54ed5977b9
Binary files /dev/null and b/website/docs/assets/houdini_imagesequence_cop.png differ
diff --git a/website/docs/assets/houdini_pointcache_path.png b/website/docs/assets/houdini_pointcache_path.png
new file mode 100644
index 0000000000..3687a9c0dd
Binary files /dev/null and b/website/docs/assets/houdini_pointcache_path.png differ
diff --git a/website/docs/assets/houdini_usd_stage.png b/website/docs/assets/houdini_usd_stage.png
new file mode 100644
index 0000000000..cba9428604
Binary files /dev/null and b/website/docs/assets/houdini_usd_stage.png differ
diff --git a/website/docs/assets/houdini_vdb_setup.png b/website/docs/assets/houdini_vdb_setup.png
new file mode 100644
index 0000000000..e27e0b6c36
Binary files /dev/null and b/website/docs/assets/houdini_vdb_setup.png differ
diff --git a/website/sidebars.js b/website/sidebars.js
index 488814a385..3a4b933b9a 100644
--- a/website/sidebars.js
+++ b/website/sidebars.js
@@ -22,6 +22,7 @@ module.exports = {
"artist_hosts_maya",
"artist_hosts_blender",
"artist_hosts_harmony",
+ "artist_hosts_houdini",
"artist_hosts_aftereffects",
"artist_hosts_resolve",
"artist_hosts_photoshop",
diff --git a/website/src/pages/index.js b/website/src/pages/index.js
index 6a233ddb66..00cf002aec 100644
--- a/website/src/pages/index.js
+++ b/website/src/pages/index.js
@@ -120,7 +120,12 @@ const studios = [
title: "Bad Clay",
image: "/img/badClay_logo.png",
infoLink: "https://www.bad-clay.com/",
- }
+ },
+ {
+ title: "Moonrock Animation Studio",
+ image: "/img/moonrock_logo.png",
+ infoLink: "https://www.moonrock.eu/",
+ }
];
function Service({imageUrl, title, description}) {
diff --git a/website/static/img/moonrock_logo.png b/website/static/img/moonrock_logo.png
new file mode 100644
index 0000000000..249db7c247
Binary files /dev/null and b/website/static/img/moonrock_logo.png differ
diff --git a/website/yarn.lock b/website/yarn.lock
index a63bf37731..b4c12edeb6 100644
--- a/website/yarn.lock
+++ b/website/yarn.lock
@@ -6168,9 +6168,9 @@ path-key@^3.0.0, path-key@^3.1.0:
integrity sha512-ojmeN0qd+y0jszEtoY48r0Peq5dwMEkIlCOu6Q5f41lfkswXuKtYrhgoTpLnyIcHm24Uhqx+5Tqm2InSwLhE6Q==
path-parse@^1.0.6:
- version "1.0.6"
- resolved "https://registry.yarnpkg.com/path-parse/-/path-parse-1.0.6.tgz#d62dbb5679405d72c4737ec58600e9ddcf06d24c"
- integrity sha512-GSmOT2EbHrINBf9SR7CDELwlJ8AENk3Qn7OikK4nFYAu3Ote2+JYNVvkpAEQm3/TLNEJFD/xZJjzyxg3KBWOzw==
+ version "1.0.7"
+ resolved "https://registry.yarnpkg.com/path-parse/-/path-parse-1.0.7.tgz#fbc114b60ca42b30d9daf5858e4bd68bbedb6735"
+ integrity sha512-LDJzPVEEEPR+y48z93A0Ed0yXb8pAByGWo/k5YYdYgpY2/2EsOsksJrq7lOHxryrVOn1ejG6oAp8ahvOIQD8sw==
path-to-regexp@0.1.7:
version "0.1.7"
@@ -8341,9 +8341,9 @@ url-parse-lax@^3.0.0:
prepend-http "^2.0.0"
url-parse@^1.4.3, url-parse@^1.4.7:
- version "1.5.1"
- resolved "https://registry.yarnpkg.com/url-parse/-/url-parse-1.5.1.tgz#d5fa9890af8a5e1f274a2c98376510f6425f6e3b"
- integrity sha512-HOfCOUJt7iSYzEx/UqgtwKRMC6EU91NFhsCHMv9oM03VJcVo2Qrp8T8kI9D7amFf1cu+/3CEhgb3rF9zL7k85Q==
+ version "1.5.3"
+ resolved "https://registry.yarnpkg.com/url-parse/-/url-parse-1.5.3.tgz#71c1303d38fb6639ade183c2992c8cc0686df862"
+ integrity sha512-IIORyIQD9rvj0A4CLWsHkBBJuNqWpFQe224b6j9t/ABmquIS0qDU2pY6kl6AuOrL5OkCXHMCFNe1jBcuAggjvQ==
dependencies:
querystringify "^2.1.1"
requires-port "^1.0.0"