mirror of
https://github.com/ynput/ayon-core.git
synced 2025-12-26 22:02:15 +01:00
Merge branch 'develop' into feature/maya_python_3
This commit is contained in:
commit
437fe5a1bd
310 changed files with 15069 additions and 3248 deletions
|
|
@ -87,7 +87,7 @@ ipython_config.py
|
|||
# pyenv
|
||||
# For a library or package, you might want to ignore these files since the code is
|
||||
# intended to run in multiple environments; otherwise, check them in:
|
||||
# .python-version
|
||||
.python-version
|
||||
|
||||
# pipenv
|
||||
# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
|
||||
|
|
@ -142,5 +142,6 @@ cython_debug/
|
|||
.poetry/
|
||||
.github/
|
||||
vendor/bin/
|
||||
vendor/python/
|
||||
docs/
|
||||
website/
|
||||
|
|
|
|||
11
.github/workflows/prerelease.yml
vendored
11
.github/workflows/prerelease.yml
vendored
|
|
@ -20,12 +20,12 @@ jobs:
|
|||
python-version: 3.7
|
||||
|
||||
- name: Install Python requirements
|
||||
run: pip install gitpython semver
|
||||
run: pip install gitpython semver PyGithub
|
||||
|
||||
- name: 🔎 Determine next version type
|
||||
id: version_type
|
||||
run: |
|
||||
TYPE=$(python ./tools/ci_tools.py --bump)
|
||||
TYPE=$(python ./tools/ci_tools.py --bump --github_token ${{ secrets.GITHUB_TOKEN }})
|
||||
|
||||
echo ::set-output name=type::$TYPE
|
||||
|
||||
|
|
@ -43,11 +43,7 @@ jobs:
|
|||
uses: heinrichreimer/github-changelog-generator-action@v2.2
|
||||
with:
|
||||
token: ${{ secrets.ADMIN_TOKEN }}
|
||||
breakingLabel: '**💥 Breaking**'
|
||||
enhancementLabel: '**🚀 Enhancements**'
|
||||
bugsLabel: '**🐛 Bug fixes**'
|
||||
deprecatedLabel: '**⚠️ Deprecations**'
|
||||
addSections: '{"documentation":{"prefix":"### 📖 Documentation","labels":["documentation"]},"tests":{"prefix":"### ✅ Testing","labels":["tests"]},"feature":{"prefix":"### 🆕 New features","labels":["feature"]},}'
|
||||
addSections: '{"documentation":{"prefix":"### 📖 Documentation","labels":["type: documentation"]},"tests":{"prefix":"### ✅ Testing","labels":["tests"]},"feature":{"prefix":"**🆕 New features**", "labels":["type: feature"]},"breaking":{"prefix":"**💥 Breaking**", "labels":["breaking"]},"enhancements":{"prefix":"**🚀 Enhancements**", "labels":["type: enhancement"]},"bugs":{"prefix":"**🐛 Bug fixes**", "labels":["type: bug"]},"deprecated":{"prefix":"**⚠️ Deprecations**", "labels":["depreciated"]}}'
|
||||
issues: false
|
||||
issuesWoLabels: false
|
||||
sinceTag: "3.0.0"
|
||||
|
|
@ -80,6 +76,7 @@ jobs:
|
|||
git add .
|
||||
git commit -m "[Automated] Bump version"
|
||||
tag_name="CI/${{ steps.version.outputs.next_tag }}"
|
||||
echo $tag_name
|
||||
git tag -a $tag_name -m "nightly build"
|
||||
|
||||
- name: Push to protected main branch
|
||||
|
|
|
|||
14
.github/workflows/release.yml
vendored
14
.github/workflows/release.yml
vendored
|
|
@ -21,7 +21,7 @@ jobs:
|
|||
with:
|
||||
python-version: 3.7
|
||||
- name: Install Python requirements
|
||||
run: pip install gitpython semver
|
||||
run: pip install gitpython semver PyGithub
|
||||
|
||||
- name: 💉 Inject new version into files
|
||||
id: version
|
||||
|
|
@ -39,11 +39,7 @@ jobs:
|
|||
uses: heinrichreimer/github-changelog-generator-action@v2.2
|
||||
with:
|
||||
token: ${{ secrets.ADMIN_TOKEN }}
|
||||
breakingLabel: '**💥 Breaking**'
|
||||
enhancementLabel: '**🚀 Enhancements**'
|
||||
bugsLabel: '**🐛 Bug fixes**'
|
||||
deprecatedLabel: '**⚠️ Deprecations**'
|
||||
addSections: '{"documentation":{"prefix":"### 📖 Documentation","labels":["documentation"]},"tests":{"prefix":"### ✅ Testing","labels":["tests"]}}'
|
||||
addSections: '{"tests":{"prefix":"### ✅ Testing","labels":["tests"]},"feature":{"prefix":"**🆕 New features**", "labels":["type: feature"]},"breaking":{"prefix":"**💥 Breaking**", "labels":["breaking"]},"enhancements":{"prefix":"**🚀 Enhancements**", "labels":["type: enhancement"]},"bugs":{"prefix":"**🐛 Bug fixes**", "labels":["type: bug"]},"deprecated":{"prefix":"**⚠️ Deprecations**", "labels":["depreciated"]},"documentation":{"prefix":"### 📖 Documentation","labels":["type: documentation"]}}'
|
||||
issues: false
|
||||
issuesWoLabels: false
|
||||
sinceTag: "3.0.0"
|
||||
|
|
@ -85,11 +81,7 @@ jobs:
|
|||
uses: heinrichreimer/github-changelog-generator-action@v2.2
|
||||
with:
|
||||
token: ${{ secrets.ADMIN_TOKEN }}
|
||||
breakingLabel: '**💥 Breaking**'
|
||||
enhancementLabel: '**🚀 Enhancements**'
|
||||
bugsLabel: '**🐛 Bug fixes**'
|
||||
deprecatedLabel: '**⚠️ Deprecations**'
|
||||
addSections: '{"documentation":{"prefix":"### 📖 Documentation","labels":["documentation"]},"tests":{"prefix":"### ✅ Testing","labels":["tests"]}}'
|
||||
addSections: '{"documentation":{"prefix":"### 📖 Documentation","labels":["type: documentation"]},"tests":{"prefix":"### ✅ Testing","labels":["tests"]},"feature":{"prefix":"**🆕 New features**", "labels":["type: feature"]},"breaking":{"prefix":"**💥 Breaking**", "labels":["breaking"]},"enhancements":{"prefix":"**🚀 Enhancements**", "labels":["type: enhancement"]},"bugs":{"prefix":"**🐛 Bug fixes**", "labels":["type: bug"]},"deprecated":{"prefix":"**⚠️ Deprecations**", "labels":["depreciated"]}}'
|
||||
issues: false
|
||||
issuesWoLabels: false
|
||||
sinceTag: ${{ steps.version.outputs.last_release }}
|
||||
|
|
|
|||
1
.gitignore
vendored
1
.gitignore
vendored
|
|
@ -39,6 +39,7 @@ Temporary Items
|
|||
/dist/
|
||||
|
||||
/vendor/bin/*
|
||||
/vendor/python/*
|
||||
/.venv
|
||||
/venv/
|
||||
|
||||
|
|
|
|||
200
CHANGELOG.md
200
CHANGELOG.md
|
|
@ -1,92 +1,144 @@
|
|||
# Changelog
|
||||
|
||||
## [3.4.0-nightly.4](https://github.com/pypeclub/OpenPype/tree/HEAD)
|
||||
## [3.5.0](https://github.com/pypeclub/OpenPype/tree/3.5.0) (2021-10-17)
|
||||
|
||||
[Full Changelog](https://github.com/pypeclub/OpenPype/compare/3.3.1...HEAD)
|
||||
[Full Changelog](https://github.com/pypeclub/OpenPype/compare/3.4.1...3.5.0)
|
||||
|
||||
**Deprecated:**
|
||||
|
||||
- Maya: Change mayaAscii family to mayaScene [\#2106](https://github.com/pypeclub/OpenPype/pull/2106)
|
||||
|
||||
**🆕 New features**
|
||||
|
||||
- Added project and task into context change message in Maya [\#2131](https://github.com/pypeclub/OpenPype/pull/2131)
|
||||
- Add ExtractBurnin to photoshop review [\#2124](https://github.com/pypeclub/OpenPype/pull/2124)
|
||||
- PYPE-1218 - changed namespace to contain subset name in Maya [\#2114](https://github.com/pypeclub/OpenPype/pull/2114)
|
||||
- Added running configurable disk mapping command before start of OP [\#2091](https://github.com/pypeclub/OpenPype/pull/2091)
|
||||
- SFTP provider [\#2073](https://github.com/pypeclub/OpenPype/pull/2073)
|
||||
- Maya: Validate setdress top group [\#2068](https://github.com/pypeclub/OpenPype/pull/2068)
|
||||
|
||||
**🚀 Enhancements**
|
||||
|
||||
- Maya: make rig validators configurable in settings [\#2137](https://github.com/pypeclub/OpenPype/pull/2137)
|
||||
- Settings: Updated readme for entity types in settings [\#2132](https://github.com/pypeclub/OpenPype/pull/2132)
|
||||
- Nuke: unified clip loader [\#2128](https://github.com/pypeclub/OpenPype/pull/2128)
|
||||
- Settings UI: Project model refreshing and sorting [\#2104](https://github.com/pypeclub/OpenPype/pull/2104)
|
||||
- Create Read From Rendered - Disable Relative paths by default [\#2093](https://github.com/pypeclub/OpenPype/pull/2093)
|
||||
- Added choosing different dirmap mapping if workfile synched locally [\#2088](https://github.com/pypeclub/OpenPype/pull/2088)
|
||||
- General: Remove IdleManager module [\#2084](https://github.com/pypeclub/OpenPype/pull/2084)
|
||||
- Tray UI: Message box about missing settings defaults [\#2080](https://github.com/pypeclub/OpenPype/pull/2080)
|
||||
- Tray UI: Show menu where first click happened [\#2079](https://github.com/pypeclub/OpenPype/pull/2079)
|
||||
- Global: add global validators to settings [\#2078](https://github.com/pypeclub/OpenPype/pull/2078)
|
||||
- Use CRF for burnin when available [\#2070](https://github.com/pypeclub/OpenPype/pull/2070)
|
||||
- Project manager: Filter first item after selection of project [\#2069](https://github.com/pypeclub/OpenPype/pull/2069)
|
||||
- Nuke: Adding `still` image family workflow [\#2064](https://github.com/pypeclub/OpenPype/pull/2064)
|
||||
- Maya: validate authorized loaded plugins [\#2062](https://github.com/pypeclub/OpenPype/pull/2062)
|
||||
- Tools: add support for pyenv on windows [\#2051](https://github.com/pypeclub/OpenPype/pull/2051)
|
||||
|
||||
**🐛 Bug fixes**
|
||||
|
||||
- Maya: fix model publishing [\#2130](https://github.com/pypeclub/OpenPype/pull/2130)
|
||||
- Fix - oiiotool wasn't recognized even if present [\#2129](https://github.com/pypeclub/OpenPype/pull/2129)
|
||||
- General: Disk mapping group [\#2120](https://github.com/pypeclub/OpenPype/pull/2120)
|
||||
- Hiero: publishing effect first time makes wrong resources path [\#2115](https://github.com/pypeclub/OpenPype/pull/2115)
|
||||
- Add startup script for Houdini Core. [\#2110](https://github.com/pypeclub/OpenPype/pull/2110)
|
||||
- TVPaint: Behavior name of loop also accept repeat [\#2109](https://github.com/pypeclub/OpenPype/pull/2109)
|
||||
- Ftrack: Project settings save custom attributes skip unknown attributes [\#2103](https://github.com/pypeclub/OpenPype/pull/2103)
|
||||
- Blender: Fix NoneType error when animation\_data is missing for a rig [\#2101](https://github.com/pypeclub/OpenPype/pull/2101)
|
||||
- Fix broken import in sftp provider [\#2100](https://github.com/pypeclub/OpenPype/pull/2100)
|
||||
- Global: Fix docstring on publish plugin extract review [\#2097](https://github.com/pypeclub/OpenPype/pull/2097)
|
||||
- Delivery Action Files Sequence fix [\#2096](https://github.com/pypeclub/OpenPype/pull/2096)
|
||||
- General: Cloud mongo ca certificate issue [\#2095](https://github.com/pypeclub/OpenPype/pull/2095)
|
||||
- TVPaint: Creator use context from workfile [\#2087](https://github.com/pypeclub/OpenPype/pull/2087)
|
||||
- Blender: fix texture missing when publishing blend files [\#2085](https://github.com/pypeclub/OpenPype/pull/2085)
|
||||
- General: Startup validations oiio tool path fix on linux [\#2083](https://github.com/pypeclub/OpenPype/pull/2083)
|
||||
- Deadline: Collect deadline server does not check existence of deadline key [\#2082](https://github.com/pypeclub/OpenPype/pull/2082)
|
||||
- Blender: fixed Curves with modifiers in Rigs [\#2081](https://github.com/pypeclub/OpenPype/pull/2081)
|
||||
- Nuke UI scaling [\#2077](https://github.com/pypeclub/OpenPype/pull/2077)
|
||||
- Maya: Fix multi-camera renders [\#2065](https://github.com/pypeclub/OpenPype/pull/2065)
|
||||
- Fix Sync Queue when project disabled [\#2063](https://github.com/pypeclub/OpenPype/pull/2063)
|
||||
|
||||
**Merged pull requests:**
|
||||
|
||||
- Ftrack: Fix hosts attribute in collect ftrack username [\#1972](https://github.com/pypeclub/OpenPype/pull/1972)
|
||||
- Removed deprecated submodules [\#1967](https://github.com/pypeclub/OpenPype/pull/1967)
|
||||
- Launcher: Fix crashes on action click [\#1964](https://github.com/pypeclub/OpenPype/pull/1964)
|
||||
- Settings: Minor fixes in UI and missing default values [\#1963](https://github.com/pypeclub/OpenPype/pull/1963)
|
||||
- Blender: Toggle system console works on windows [\#1962](https://github.com/pypeclub/OpenPype/pull/1962)
|
||||
- Resolve path when adding to zip [\#1960](https://github.com/pypeclub/OpenPype/pull/1960)
|
||||
- Bump url-parse from 1.5.1 to 1.5.3 in /website [\#1958](https://github.com/pypeclub/OpenPype/pull/1958)
|
||||
- Global: Avalon Host name collector [\#1949](https://github.com/pypeclub/OpenPype/pull/1949)
|
||||
- Global: Define hosts in CollectSceneVersion [\#1948](https://github.com/pypeclub/OpenPype/pull/1948)
|
||||
- Maya: Add Xgen family support [\#1947](https://github.com/pypeclub/OpenPype/pull/1947)
|
||||
- Add face sets to exported alembics [\#1942](https://github.com/pypeclub/OpenPype/pull/1942)
|
||||
- Bump path-parse from 1.0.6 to 1.0.7 in /website [\#1933](https://github.com/pypeclub/OpenPype/pull/1933)
|
||||
- \#1894 - adds host to template\_name\_profiles for filtering [\#1915](https://github.com/pypeclub/OpenPype/pull/1915)
|
||||
- Environments: Tool environments in alphabetical order [\#1910](https://github.com/pypeclub/OpenPype/pull/1910)
|
||||
- Disregard publishing time. [\#1888](https://github.com/pypeclub/OpenPype/pull/1888)
|
||||
- Feature/webpublisher backend [\#1876](https://github.com/pypeclub/OpenPype/pull/1876)
|
||||
- Dynamic modules [\#1872](https://github.com/pypeclub/OpenPype/pull/1872)
|
||||
- Houdini: add Camera, Point Cache, Composite, Redshift ROP and VDB Cache support [\#1821](https://github.com/pypeclub/OpenPype/pull/1821)
|
||||
- Bump pywin32 from 300 to 301 [\#2086](https://github.com/pypeclub/OpenPype/pull/2086)
|
||||
|
||||
## [3.4.1](https://github.com/pypeclub/OpenPype/tree/3.4.1) (2021-09-23)
|
||||
|
||||
[Full Changelog](https://github.com/pypeclub/OpenPype/compare/CI/3.4.1-nightly.1...3.4.1)
|
||||
|
||||
**🆕 New features**
|
||||
|
||||
- Settings: Flag project as deactivated and hide from tools' view [\#2008](https://github.com/pypeclub/OpenPype/pull/2008)
|
||||
|
||||
**🚀 Enhancements**
|
||||
|
||||
- General: Startup validations [\#2054](https://github.com/pypeclub/OpenPype/pull/2054)
|
||||
- Nuke: proxy mode validator [\#2052](https://github.com/pypeclub/OpenPype/pull/2052)
|
||||
- Ftrack: Removed ftrack interface [\#2049](https://github.com/pypeclub/OpenPype/pull/2049)
|
||||
- Settings UI: Deffered set value on entity [\#2044](https://github.com/pypeclub/OpenPype/pull/2044)
|
||||
- Loader: Families filtering [\#2043](https://github.com/pypeclub/OpenPype/pull/2043)
|
||||
- Settings UI: Project view enhancements [\#2042](https://github.com/pypeclub/OpenPype/pull/2042)
|
||||
- Settings for Nuke IncrementScriptVersion [\#2039](https://github.com/pypeclub/OpenPype/pull/2039)
|
||||
- Loader & Library loader: Use tools from OpenPype [\#2038](https://github.com/pypeclub/OpenPype/pull/2038)
|
||||
- Adding predefined project folders creation in PM [\#2030](https://github.com/pypeclub/OpenPype/pull/2030)
|
||||
- WebserverModule: Removed interface of webserver module [\#2028](https://github.com/pypeclub/OpenPype/pull/2028)
|
||||
- TimersManager: Removed interface of timers manager [\#2024](https://github.com/pypeclub/OpenPype/pull/2024)
|
||||
- Feature Maya import asset from scene inventory [\#2018](https://github.com/pypeclub/OpenPype/pull/2018)
|
||||
|
||||
**🐛 Bug fixes**
|
||||
|
||||
- Timers manger: Typo fix [\#2058](https://github.com/pypeclub/OpenPype/pull/2058)
|
||||
- Hiero: Editorial fixes [\#2057](https://github.com/pypeclub/OpenPype/pull/2057)
|
||||
- Differentiate jpg sequences from thumbnail [\#2056](https://github.com/pypeclub/OpenPype/pull/2056)
|
||||
- FFmpeg: Split command to list does not work [\#2046](https://github.com/pypeclub/OpenPype/pull/2046)
|
||||
- Removed shell flag in subprocess call [\#2045](https://github.com/pypeclub/OpenPype/pull/2045)
|
||||
|
||||
**Merged pull requests:**
|
||||
|
||||
- Bump prismjs from 1.24.0 to 1.25.0 in /website [\#2050](https://github.com/pypeclub/OpenPype/pull/2050)
|
||||
|
||||
## [3.4.0](https://github.com/pypeclub/OpenPype/tree/3.4.0) (2021-09-17)
|
||||
|
||||
[Full Changelog](https://github.com/pypeclub/OpenPype/compare/CI/3.4.0-nightly.6...3.4.0)
|
||||
|
||||
**🆕 New features**
|
||||
|
||||
- Nuke: Compatibility with Nuke 13 [\#2003](https://github.com/pypeclub/OpenPype/pull/2003)
|
||||
|
||||
**🚀 Enhancements**
|
||||
|
||||
- Added possibility to configure of synchronization of workfile version… [\#2041](https://github.com/pypeclub/OpenPype/pull/2041)
|
||||
- General: Task types in profiles [\#2036](https://github.com/pypeclub/OpenPype/pull/2036)
|
||||
- Console interpreter: Handle invalid sizes on initialization [\#2022](https://github.com/pypeclub/OpenPype/pull/2022)
|
||||
- Ftrack: Show OpenPype versions in event server status [\#2019](https://github.com/pypeclub/OpenPype/pull/2019)
|
||||
- General: Staging icon [\#2017](https://github.com/pypeclub/OpenPype/pull/2017)
|
||||
- Ftrack: Sync to avalon actions have jobs [\#2015](https://github.com/pypeclub/OpenPype/pull/2015)
|
||||
- Modules: Connect method is not required [\#2009](https://github.com/pypeclub/OpenPype/pull/2009)
|
||||
- Settings UI: Number with configurable steps [\#2001](https://github.com/pypeclub/OpenPype/pull/2001)
|
||||
- Moving project folder structure creation out of ftrack module \#1989 [\#1996](https://github.com/pypeclub/OpenPype/pull/1996)
|
||||
|
||||
**🐛 Bug fixes**
|
||||
|
||||
- Workfiles tool: Task selection [\#2040](https://github.com/pypeclub/OpenPype/pull/2040)
|
||||
- Ftrack: Delete old versions missing settings key [\#2037](https://github.com/pypeclub/OpenPype/pull/2037)
|
||||
- Nuke: typo on a button [\#2034](https://github.com/pypeclub/OpenPype/pull/2034)
|
||||
- Hiero: Fix "none" named tags [\#2033](https://github.com/pypeclub/OpenPype/pull/2033)
|
||||
- FFmpeg: Subprocess arguments as list [\#2032](https://github.com/pypeclub/OpenPype/pull/2032)
|
||||
- General: Fix Python 2 breaking line [\#2016](https://github.com/pypeclub/OpenPype/pull/2016)
|
||||
- Bugfix/webpublisher task type [\#2006](https://github.com/pypeclub/OpenPype/pull/2006)
|
||||
|
||||
### 📖 Documentation
|
||||
|
||||
- Documentation: Ftrack launch argsuments update [\#2014](https://github.com/pypeclub/OpenPype/pull/2014)
|
||||
|
||||
## [3.3.1](https://github.com/pypeclub/OpenPype/tree/3.3.1) (2021-08-20)
|
||||
|
||||
[Full Changelog](https://github.com/pypeclub/OpenPype/compare/CI/3.3.1-nightly.1...3.3.1)
|
||||
|
||||
**Merged pull requests:**
|
||||
|
||||
- TVPaint: Fixed rendered frame indexes [\#1946](https://github.com/pypeclub/OpenPype/pull/1946)
|
||||
- Maya: Menu actions fix [\#1945](https://github.com/pypeclub/OpenPype/pull/1945)
|
||||
- standalone: editorial shared object problem [\#1941](https://github.com/pypeclub/OpenPype/pull/1941)
|
||||
- Bugfix nuke deadline app name [\#1928](https://github.com/pypeclub/OpenPype/pull/1928)
|
||||
|
||||
## [3.3.0](https://github.com/pypeclub/OpenPype/tree/3.3.0) (2021-08-17)
|
||||
|
||||
[Full Changelog](https://github.com/pypeclub/OpenPype/compare/CI/3.3.0-nightly.11...3.3.0)
|
||||
|
||||
**Merged pull requests:**
|
||||
|
||||
- Python console interpreter [\#1940](https://github.com/pypeclub/OpenPype/pull/1940)
|
||||
- Fix - make AE workfile publish to Ftrack configurable [\#1937](https://github.com/pypeclub/OpenPype/pull/1937)
|
||||
- Fix - ftrack family was added incorrectly in some cases [\#1935](https://github.com/pypeclub/OpenPype/pull/1935)
|
||||
- Settings UI: Breadcrumbs in settings [\#1932](https://github.com/pypeclub/OpenPype/pull/1932)
|
||||
- Fix - Deadline publish on Linux started Tray instead of headless publishing [\#1930](https://github.com/pypeclub/OpenPype/pull/1930)
|
||||
- Maya: Validate Model Name - repair accident deletion in settings defaults [\#1929](https://github.com/pypeclub/OpenPype/pull/1929)
|
||||
- Global: Updated logos and Default settings [\#1927](https://github.com/pypeclub/OpenPype/pull/1927)
|
||||
- Nuke: submit to farm failed due `ftrack` family remove [\#1926](https://github.com/pypeclub/OpenPype/pull/1926)
|
||||
- Check for missing ✨ Python when using `pyenv` [\#1925](https://github.com/pypeclub/OpenPype/pull/1925)
|
||||
- Maya: Scene patching 🩹on submission to Deadline [\#1923](https://github.com/pypeclub/OpenPype/pull/1923)
|
||||
- Fix - validate takes repre\["files"\] as list all the time [\#1922](https://github.com/pypeclub/OpenPype/pull/1922)
|
||||
- Settings: Default values for enum [\#1920](https://github.com/pypeclub/OpenPype/pull/1920)
|
||||
- Settings UI: Modifiable dict view enhance [\#1919](https://github.com/pypeclub/OpenPype/pull/1919)
|
||||
- standalone: validator asset parents [\#1917](https://github.com/pypeclub/OpenPype/pull/1917)
|
||||
- Nuke: update video file crassing [\#1916](https://github.com/pypeclub/OpenPype/pull/1916)
|
||||
- Fix - texture validators for workfiles triggers only for textures workfiles [\#1914](https://github.com/pypeclub/OpenPype/pull/1914)
|
||||
- submodules: avalon-core update [\#1911](https://github.com/pypeclub/OpenPype/pull/1911)
|
||||
- Settings UI: List order works as expected [\#1906](https://github.com/pypeclub/OpenPype/pull/1906)
|
||||
- Add support for multiple Deadline ☠️➖ servers [\#1905](https://github.com/pypeclub/OpenPype/pull/1905)
|
||||
- Hiero: loaded clip was not set colorspace from version data [\#1904](https://github.com/pypeclub/OpenPype/pull/1904)
|
||||
- Pyblish UI: Fix collecting stage processing [\#1903](https://github.com/pypeclub/OpenPype/pull/1903)
|
||||
- Burnins: Use input's bitrate in h624 [\#1902](https://github.com/pypeclub/OpenPype/pull/1902)
|
||||
- Feature AE local render [\#1901](https://github.com/pypeclub/OpenPype/pull/1901)
|
||||
- Ftrack: Where I run action enhancement [\#1900](https://github.com/pypeclub/OpenPype/pull/1900)
|
||||
- Ftrack: Private project server actions [\#1899](https://github.com/pypeclub/OpenPype/pull/1899)
|
||||
- Support nested studio plugins paths. [\#1898](https://github.com/pypeclub/OpenPype/pull/1898)
|
||||
- Bug: fixed python detection [\#1893](https://github.com/pypeclub/OpenPype/pull/1893)
|
||||
- Settings: global validators with options [\#1892](https://github.com/pypeclub/OpenPype/pull/1892)
|
||||
- Settings: Conditional dict enum positioning [\#1891](https://github.com/pypeclub/OpenPype/pull/1891)
|
||||
- global: integrate name missing default template [\#1890](https://github.com/pypeclub/OpenPype/pull/1890)
|
||||
- publisher: editorial plugins fixes [\#1889](https://github.com/pypeclub/OpenPype/pull/1889)
|
||||
- Expose stop timer through rest api. [\#1886](https://github.com/pypeclub/OpenPype/pull/1886)
|
||||
- TVPaint: Increment workfile [\#1885](https://github.com/pypeclub/OpenPype/pull/1885)
|
||||
- Allow Multiple Notes to run on tasks. [\#1882](https://github.com/pypeclub/OpenPype/pull/1882)
|
||||
- Normalize path returned from Workfiles. [\#1880](https://github.com/pypeclub/OpenPype/pull/1880)
|
||||
- Prepare for pyside2 [\#1869](https://github.com/pypeclub/OpenPype/pull/1869)
|
||||
- Filter hosts in settings host-enum [\#1868](https://github.com/pypeclub/OpenPype/pull/1868)
|
||||
- Local actions with process identifier [\#1867](https://github.com/pypeclub/OpenPype/pull/1867)
|
||||
- Workfile tool start at host launch support [\#1865](https://github.com/pypeclub/OpenPype/pull/1865)
|
||||
- Maya: add support for `RedshiftNormalMap` node, fix `tx` linear space 🚀 [\#1863](https://github.com/pypeclub/OpenPype/pull/1863)
|
||||
- Workfiles tool event arguments fix [\#1862](https://github.com/pypeclub/OpenPype/pull/1862)
|
||||
- Maya: support for configurable `dirmap` 🗺️ [\#1859](https://github.com/pypeclub/OpenPype/pull/1859)
|
||||
- Maya: don't add reference members as connections to the container set 📦 [\#1855](https://github.com/pypeclub/OpenPype/pull/1855)
|
||||
- Settings list can use template or schema as object type [\#1815](https://github.com/pypeclub/OpenPype/pull/1815)
|
||||
|
||||
## [3.2.0](https://github.com/pypeclub/OpenPype/tree/3.2.0) (2021-07-13)
|
||||
|
||||
[Full Changelog](https://github.com/pypeclub/OpenPype/compare/CI/3.2.0-nightly.7...3.2.0)
|
||||
|
|
|
|||
83
Dockerfile
83
Dockerfile
|
|
@ -1,7 +1,9 @@
|
|||
# Build Pype docker image
|
||||
FROM centos:7 AS builder
|
||||
ARG OPENPYPE_PYTHON_VERSION=3.7.10
|
||||
FROM debian:bookworm-slim AS builder
|
||||
ARG OPENPYPE_PYTHON_VERSION=3.7.12
|
||||
|
||||
LABEL maintainer="info@openpype.io"
|
||||
LABEL description="Docker Image to build and run OpenPype"
|
||||
LABEL org.opencontainers.image.name="pypeclub/openpype"
|
||||
LABEL org.opencontainers.image.title="OpenPype Docker Image"
|
||||
LABEL org.opencontainers.image.url="https://openpype.io/"
|
||||
|
|
@ -9,56 +11,49 @@ LABEL org.opencontainers.image.source="https://github.com/pypeclub/pype"
|
|||
|
||||
USER root
|
||||
|
||||
# update base
|
||||
RUN yum -y install deltarpm \
|
||||
&& yum -y update \
|
||||
&& yum clean all
|
||||
ARG DEBIAN_FRONTEND=noninteractive
|
||||
|
||||
# add tools we need
|
||||
RUN yum -y install https://dl.fedoraproject.org/pub/epel/epel-release-latest-7.noarch.rpm \
|
||||
&& yum -y install centos-release-scl \
|
||||
&& yum -y install \
|
||||
# update base
|
||||
RUN apt-get update \
|
||||
&& apt-get install -y --no-install-recommends \
|
||||
ca-certificates \
|
||||
bash \
|
||||
which \
|
||||
git \
|
||||
devtoolset-7-gcc* \
|
||||
make \
|
||||
cmake \
|
||||
make \
|
||||
curl \
|
||||
wget \
|
||||
gcc \
|
||||
zlib-devel \
|
||||
bzip2 \
|
||||
bzip2-devel \
|
||||
readline-devel \
|
||||
sqlite sqlite-devel \
|
||||
openssl-devel \
|
||||
tk-devel libffi-devel \
|
||||
qt5-qtbase-devel \
|
||||
patchelf \
|
||||
&& yum clean all
|
||||
build-essential \
|
||||
checkinstall \
|
||||
libssl-dev \
|
||||
zlib1g-dev \
|
||||
libbz2-dev \
|
||||
libreadline-dev \
|
||||
libsqlite3-dev \
|
||||
llvm \
|
||||
libncursesw5-dev \
|
||||
xz-utils \
|
||||
tk-dev \
|
||||
libxml2-dev \
|
||||
libxmlsec1-dev \
|
||||
libffi-dev \
|
||||
liblzma-dev \
|
||||
patchelf
|
||||
|
||||
SHELL ["/bin/bash", "-c"]
|
||||
|
||||
RUN mkdir /opt/openpype
|
||||
# RUN useradd -m pype
|
||||
# RUN chown pype /opt/openpype
|
||||
# USER pype
|
||||
|
||||
RUN curl https://pyenv.run | bash
|
||||
ENV PYTHON_CONFIGURE_OPTS --enable-shared
|
||||
|
||||
RUN echo 'export PATH="$HOME/.pyenv/bin:$PATH"'>> $HOME/.bashrc \
|
||||
RUN curl https://pyenv.run | bash \
|
||||
&& echo 'export PATH="$HOME/.pyenv/bin:$PATH"'>> $HOME/.bashrc \
|
||||
&& echo 'eval "$(pyenv init -)"' >> $HOME/.bashrc \
|
||||
&& echo 'eval "$(pyenv virtualenv-init -)"' >> $HOME/.bashrc \
|
||||
&& echo 'eval "$(pyenv init --path)"' >> $HOME/.bashrc
|
||||
RUN source $HOME/.bashrc && pyenv install ${OPENPYPE_PYTHON_VERSION}
|
||||
&& echo 'eval "$(pyenv init --path)"' >> $HOME/.bashrc \
|
||||
&& source $HOME/.bashrc && pyenv install ${OPENPYPE_PYTHON_VERSION}
|
||||
|
||||
COPY . /opt/openpype/
|
||||
RUN rm -rf /openpype/.poetry || echo "No Poetry installed yet."
|
||||
# USER root
|
||||
# RUN chown -R pype /opt/openpype
|
||||
RUN chmod +x /opt/openpype/tools/create_env.sh && chmod +x /opt/openpype/tools/build.sh
|
||||
|
||||
# USER pype
|
||||
RUN chmod +x /opt/openpype/tools/create_env.sh && chmod +x /opt/openpype/tools/build.sh
|
||||
|
||||
WORKDIR /opt/openpype
|
||||
|
||||
|
|
@ -67,16 +62,8 @@ RUN cd /opt/openpype \
|
|||
&& pyenv local ${OPENPYPE_PYTHON_VERSION}
|
||||
|
||||
RUN source $HOME/.bashrc \
|
||||
&& ./tools/create_env.sh
|
||||
|
||||
RUN source $HOME/.bashrc \
|
||||
&& ./tools/create_env.sh \
|
||||
&& ./tools/fetch_thirdparty_libs.sh
|
||||
|
||||
RUN source $HOME/.bashrc \
|
||||
&& bash ./tools/build.sh \
|
||||
&& cp /usr/lib64/libffi* ./build/exe.linux-x86_64-3.7/lib \
|
||||
&& cp /usr/lib64/libssl* ./build/exe.linux-x86_64-3.7/lib \
|
||||
&& cp /usr/lib64/libcrypto* ./build/exe.linux-x86_64-3.7/lib
|
||||
|
||||
RUN cd /opt/openpype \
|
||||
rm -rf ./vendor/bin
|
||||
&& bash ./tools/build.sh
|
||||
|
|
|
|||
98
Dockerfile.centos7
Normal file
98
Dockerfile.centos7
Normal file
|
|
@ -0,0 +1,98 @@
|
|||
# Build Pype docker image
|
||||
FROM centos:7 AS builder
|
||||
ARG OPENPYPE_PYTHON_VERSION=3.7.10
|
||||
|
||||
LABEL org.opencontainers.image.name="pypeclub/openpype"
|
||||
LABEL org.opencontainers.image.title="OpenPype Docker Image"
|
||||
LABEL org.opencontainers.image.url="https://openpype.io/"
|
||||
LABEL org.opencontainers.image.source="https://github.com/pypeclub/pype"
|
||||
|
||||
USER root
|
||||
|
||||
# update base
|
||||
RUN yum -y install deltarpm \
|
||||
&& yum -y update \
|
||||
&& yum clean all
|
||||
|
||||
# add tools we need
|
||||
RUN yum -y install https://dl.fedoraproject.org/pub/epel/epel-release-latest-7.noarch.rpm \
|
||||
&& yum -y install centos-release-scl \
|
||||
&& yum -y install \
|
||||
bash \
|
||||
which \
|
||||
git \
|
||||
make \
|
||||
devtoolset-7 \
|
||||
cmake \
|
||||
curl \
|
||||
wget \
|
||||
gcc \
|
||||
zlib-devel \
|
||||
bzip2 \
|
||||
bzip2-devel \
|
||||
readline-devel \
|
||||
sqlite sqlite-devel \
|
||||
openssl-devel \
|
||||
openssl-libs \
|
||||
tk-devel libffi-devel \
|
||||
patchelf \
|
||||
automake \
|
||||
autoconf \
|
||||
ncurses \
|
||||
ncurses-devel \
|
||||
qt5-qtbase-devel \
|
||||
&& yum clean all
|
||||
|
||||
# we need to build our own patchelf
|
||||
WORKDIR /temp-patchelf
|
||||
RUN git clone https://github.com/NixOS/patchelf.git . \
|
||||
&& source scl_source enable devtoolset-7 \
|
||||
&& ./bootstrap.sh \
|
||||
&& ./configure \
|
||||
&& make \
|
||||
&& make install
|
||||
|
||||
RUN mkdir /opt/openpype
|
||||
# RUN useradd -m pype
|
||||
# RUN chown pype /opt/openpype
|
||||
# USER pype
|
||||
|
||||
RUN curl https://pyenv.run | bash
|
||||
# ENV PYTHON_CONFIGURE_OPTS --enable-shared
|
||||
|
||||
RUN echo 'export PATH="$HOME/.pyenv/bin:$PATH"'>> $HOME/.bashrc \
|
||||
&& echo 'eval "$(pyenv init -)"' >> $HOME/.bashrc \
|
||||
&& echo 'eval "$(pyenv virtualenv-init -)"' >> $HOME/.bashrc \
|
||||
&& echo 'eval "$(pyenv init --path)"' >> $HOME/.bashrc
|
||||
RUN source $HOME/.bashrc && pyenv install ${OPENPYPE_PYTHON_VERSION}
|
||||
|
||||
COPY . /opt/openpype/
|
||||
RUN rm -rf /openpype/.poetry || echo "No Poetry installed yet."
|
||||
# USER root
|
||||
# RUN chown -R pype /opt/openpype
|
||||
RUN chmod +x /opt/openpype/tools/create_env.sh && chmod +x /opt/openpype/tools/build.sh
|
||||
|
||||
# USER pype
|
||||
|
||||
WORKDIR /opt/openpype
|
||||
|
||||
RUN cd /opt/openpype \
|
||||
&& source $HOME/.bashrc \
|
||||
&& pyenv local ${OPENPYPE_PYTHON_VERSION}
|
||||
|
||||
RUN source $HOME/.bashrc \
|
||||
&& ./tools/create_env.sh
|
||||
|
||||
RUN source $HOME/.bashrc \
|
||||
&& ./tools/fetch_thirdparty_libs.sh
|
||||
|
||||
RUN source $HOME/.bashrc \
|
||||
&& bash ./tools/build.sh
|
||||
|
||||
RUN cp /usr/lib64/libffi* ./build/exe.linux-x86_64-3.7/lib \
|
||||
&& cp /usr/lib64/libssl* ./build/exe.linux-x86_64-3.7/lib \
|
||||
&& cp /usr/lib64/libcrypto* ./build/exe.linux-x86_64-3.7/lib \
|
||||
&& cp /root/.pyenv/versions/${OPENPYPE_PYTHON_VERSION}/lib/libpython* ./build/exe.linux-x86_64-3.7/lib
|
||||
|
||||
RUN cd /opt/openpype \
|
||||
rm -rf ./vendor/bin
|
||||
11
README.md
11
README.md
|
|
@ -133,6 +133,12 @@ Easiest way to build OpenPype on Linux is using [Docker](https://www.docker.com/
|
|||
sudo ./tools/docker_build.sh
|
||||
```
|
||||
|
||||
This will by default use Debian as base image. If you need to make Centos 7 compatible build, please run:
|
||||
|
||||
```sh
|
||||
sudo ./tools/docker_build.sh centos7
|
||||
```
|
||||
|
||||
If all is successful, you'll find built OpenPype in `./build/` folder.
|
||||
|
||||
#### Manual build
|
||||
|
|
@ -158,6 +164,11 @@ you'll need also additional libraries for Qt5:
|
|||
```sh
|
||||
sudo apt install qt5-default
|
||||
```
|
||||
or if you are on Ubuntu > 20.04, there is no `qt5-default` packages so you need to install its content individually:
|
||||
|
||||
```sh
|
||||
sudo apt-get install qtbase5-dev qtchooser qt5-qmake qtbase5-dev-tools
|
||||
```
|
||||
</details>
|
||||
|
||||
<details>
|
||||
|
|
|
|||
148
igniter/tools.py
148
igniter/tools.py
|
|
@ -1,18 +1,12 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
"""Tools used in **Igniter** GUI.
|
||||
|
||||
Functions ``compose_url()`` and ``decompose_url()`` are the same as in
|
||||
``openpype.lib`` and they are here to avoid importing OpenPype module before its
|
||||
version is decided.
|
||||
|
||||
"""
|
||||
import sys
|
||||
"""Tools used in **Igniter** GUI."""
|
||||
import os
|
||||
from typing import Dict, Union
|
||||
from typing import Union
|
||||
from urllib.parse import urlparse, parse_qs
|
||||
from pathlib import Path
|
||||
import platform
|
||||
|
||||
import certifi
|
||||
from pymongo import MongoClient
|
||||
from pymongo.errors import (
|
||||
ServerSelectionTimeoutError,
|
||||
|
|
@ -22,89 +16,32 @@ from pymongo.errors import (
|
|||
)
|
||||
|
||||
|
||||
def decompose_url(url: str) -> Dict:
|
||||
"""Decompose mongodb url to its separate components.
|
||||
|
||||
Args:
|
||||
url (str): Mongodb url.
|
||||
|
||||
Returns:
|
||||
dict: Dictionary of components.
|
||||
def should_add_certificate_path_to_mongo_url(mongo_url):
|
||||
"""Check if should add ca certificate to mongo url.
|
||||
|
||||
Since 30.9.2021 cloud mongo requires newer certificates that are not
|
||||
available on most of workstation. This adds path to certifi certificate
|
||||
which is valid for it. To add the certificate path url must have scheme
|
||||
'mongodb+srv' or has 'ssl=true' or 'tls=true' in url query.
|
||||
"""
|
||||
components = {
|
||||
"scheme": None,
|
||||
"host": None,
|
||||
"port": None,
|
||||
"username": None,
|
||||
"password": None,
|
||||
"auth_db": None
|
||||
}
|
||||
parsed = urlparse(mongo_url)
|
||||
query = parse_qs(parsed.query)
|
||||
lowered_query_keys = set(key.lower() for key in query.keys())
|
||||
add_certificate = False
|
||||
# Check if url 'ssl' or 'tls' are set to 'true'
|
||||
for key in ("ssl", "tls"):
|
||||
if key in query and "true" in query["ssl"]:
|
||||
add_certificate = True
|
||||
break
|
||||
|
||||
result = urlparse(url)
|
||||
if result.scheme is None:
|
||||
_url = "mongodb://{}".format(url)
|
||||
result = urlparse(_url)
|
||||
# Check if url contains 'mongodb+srv'
|
||||
if not add_certificate and parsed.scheme == "mongodb+srv":
|
||||
add_certificate = True
|
||||
|
||||
components["scheme"] = result.scheme
|
||||
components["host"] = result.hostname
|
||||
try:
|
||||
components["port"] = result.port
|
||||
except ValueError:
|
||||
raise RuntimeError("invalid port specified")
|
||||
components["username"] = result.username
|
||||
components["password"] = result.password
|
||||
|
||||
try:
|
||||
components["auth_db"] = parse_qs(result.query)['authSource'][0]
|
||||
except KeyError:
|
||||
# no auth db provided, mongo will use the one we are connecting to
|
||||
pass
|
||||
|
||||
return components
|
||||
|
||||
|
||||
def compose_url(scheme: str = None,
|
||||
host: str = None,
|
||||
username: str = None,
|
||||
password: str = None,
|
||||
port: int = None,
|
||||
auth_db: str = None) -> str:
|
||||
"""Compose mongodb url from its individual components.
|
||||
|
||||
Args:
|
||||
scheme (str, optional):
|
||||
host (str, optional):
|
||||
username (str, optional):
|
||||
password (str, optional):
|
||||
port (str, optional):
|
||||
auth_db (str, optional):
|
||||
|
||||
Returns:
|
||||
str: mongodb url
|
||||
|
||||
"""
|
||||
|
||||
url = "{scheme}://"
|
||||
|
||||
if username and password:
|
||||
url += "{username}:{password}@"
|
||||
|
||||
url += "{host}"
|
||||
if port:
|
||||
url += ":{port}"
|
||||
|
||||
if auth_db:
|
||||
url += "?authSource={auth_db}"
|
||||
|
||||
return url.format(**{
|
||||
"scheme": scheme,
|
||||
"host": host,
|
||||
"username": username,
|
||||
"password": password,
|
||||
"port": port,
|
||||
"auth_db": auth_db
|
||||
})
|
||||
# Check if url does already contain certificate path
|
||||
if add_certificate and "tlscafile" in lowered_query_keys:
|
||||
add_certificate = False
|
||||
return add_certificate
|
||||
|
||||
|
||||
def validate_mongo_connection(cnx: str) -> (bool, str):
|
||||
|
|
@ -121,12 +58,18 @@ def validate_mongo_connection(cnx: str) -> (bool, str):
|
|||
if parsed.scheme not in ["mongodb", "mongodb+srv"]:
|
||||
return False, "Not mongodb schema"
|
||||
|
||||
kwargs = {
|
||||
"serverSelectionTimeoutMS": 2000
|
||||
}
|
||||
# Add certificate path if should be required
|
||||
if should_add_certificate_path_to_mongo_url(cnx):
|
||||
kwargs["ssl_ca_certs"] = certifi.where()
|
||||
|
||||
try:
|
||||
client = MongoClient(
|
||||
cnx,
|
||||
serverSelectionTimeoutMS=2000
|
||||
)
|
||||
client = MongoClient(cnx, **kwargs)
|
||||
client.server_info()
|
||||
with client.start_session():
|
||||
pass
|
||||
client.close()
|
||||
except ServerSelectionTimeoutError as e:
|
||||
return False, f"Cannot connect to server {cnx} - {e}"
|
||||
|
|
@ -152,10 +95,7 @@ def validate_mongo_string(mongo: str) -> (bool, str):
|
|||
"""
|
||||
if not mongo:
|
||||
return True, "empty string"
|
||||
parsed = urlparse(mongo)
|
||||
if parsed.scheme in ["mongodb", "mongodb+srv"]:
|
||||
return validate_mongo_connection(mongo)
|
||||
return False, "not valid mongodb schema"
|
||||
return validate_mongo_connection(mongo)
|
||||
|
||||
|
||||
def validate_path_string(path: str) -> (bool, str):
|
||||
|
|
@ -195,21 +135,13 @@ def get_openpype_global_settings(url: str) -> dict:
|
|||
Returns:
|
||||
dict: With settings data. Empty dictionary is returned if not found.
|
||||
"""
|
||||
try:
|
||||
components = decompose_url(url)
|
||||
except RuntimeError:
|
||||
return {}
|
||||
mongo_kwargs = {
|
||||
"host": compose_url(**components),
|
||||
"serverSelectionTimeoutMS": 2000
|
||||
}
|
||||
port = components.get("port")
|
||||
if port is not None:
|
||||
mongo_kwargs["port"] = int(port)
|
||||
kwargs = {}
|
||||
if should_add_certificate_path_to_mongo_url(url):
|
||||
kwargs["ssl_ca_certs"] = certifi.where()
|
||||
|
||||
try:
|
||||
# Create mongo connection
|
||||
client = MongoClient(**mongo_kwargs)
|
||||
client = MongoClient(url, **kwargs)
|
||||
# Access settings collection
|
||||
col = client["openpype"]["settings"]
|
||||
# Query global settings
|
||||
|
|
|
|||
|
|
@ -69,6 +69,7 @@ def install():
|
|||
"""Install Pype to Avalon."""
|
||||
from pyblish.lib import MessageHandler
|
||||
from openpype.modules import load_modules
|
||||
from avalon import pipeline
|
||||
|
||||
# Make sure modules are loaded
|
||||
load_modules()
|
||||
|
|
@ -117,7 +118,9 @@ def install():
|
|||
|
||||
# apply monkey patched discover to original one
|
||||
log.info("Patching discovery")
|
||||
|
||||
avalon.discover = patched_discover
|
||||
pipeline.discover = patched_discover
|
||||
|
||||
avalon.on("taskChanged", _on_task_change)
|
||||
|
||||
|
|
|
|||
|
|
@ -283,3 +283,18 @@ def run(script):
|
|||
args_string = " ".join(args[1:])
|
||||
print(f"... running: {script} {args_string}")
|
||||
runpy.run_path(script, run_name="__main__", )
|
||||
|
||||
|
||||
@main.command()
|
||||
@click.argument("folder", nargs=-1)
|
||||
@click.option("-m",
|
||||
"--mark",
|
||||
help="Run tests marked by",
|
||||
default=None)
|
||||
@click.option("-p",
|
||||
"--pyargs",
|
||||
help="Run tests from package",
|
||||
default=None)
|
||||
def runtests(folder, mark, pyargs):
|
||||
"""Run all automatic tests after proper initialization via start.py"""
|
||||
PypeCommands().run_tests(folder, mark, pyargs)
|
||||
|
|
|
|||
|
|
@ -43,6 +43,8 @@ class GlobalHostDataHook(PreLaunchHook):
|
|||
|
||||
"env": self.launch_context.env,
|
||||
|
||||
"last_workfile_path": self.data.get("last_workfile_path"),
|
||||
|
||||
"log": self.log
|
||||
})
|
||||
|
||||
|
|
|
|||
|
|
@ -66,12 +66,16 @@ class BlendRigLoader(plugin.AssetLoader):
|
|||
objects = []
|
||||
nodes = list(container.children)
|
||||
|
||||
for obj in nodes:
|
||||
obj.parent = asset_group
|
||||
allowed_types = ['ARMATURE', 'MESH']
|
||||
|
||||
for obj in nodes:
|
||||
objects.append(obj)
|
||||
nodes.extend(list(obj.children))
|
||||
if obj.type in allowed_types:
|
||||
obj.parent = asset_group
|
||||
|
||||
for obj in nodes:
|
||||
if obj.type in allowed_types:
|
||||
objects.append(obj)
|
||||
nodes.extend(list(obj.children))
|
||||
|
||||
objects.reverse()
|
||||
|
||||
|
|
@ -107,7 +111,8 @@ class BlendRigLoader(plugin.AssetLoader):
|
|||
|
||||
if action is not None:
|
||||
local_obj.animation_data.action = action
|
||||
elif local_obj.animation_data.action is not None:
|
||||
elif (local_obj.animation_data and
|
||||
local_obj.animation_data.action is not None):
|
||||
plugin.prepare_data(
|
||||
local_obj.animation_data.action, group_name)
|
||||
|
||||
|
|
@ -126,7 +131,30 @@ class BlendRigLoader(plugin.AssetLoader):
|
|||
|
||||
objects.reverse()
|
||||
|
||||
bpy.data.orphans_purge(do_local_ids=False)
|
||||
curves = [obj for obj in data_to.objects if obj.type == 'CURVE']
|
||||
|
||||
for curve in curves:
|
||||
local_obj = plugin.prepare_data(curve, group_name)
|
||||
plugin.prepare_data(local_obj.data, group_name)
|
||||
|
||||
local_obj.use_fake_user = True
|
||||
|
||||
for mod in local_obj.modifiers:
|
||||
mod_target_name = mod.object.name
|
||||
mod.object = bpy.data.objects.get(
|
||||
f"{group_name}:{mod_target_name}")
|
||||
|
||||
if not local_obj.get(AVALON_PROPERTY):
|
||||
local_obj[AVALON_PROPERTY] = dict()
|
||||
|
||||
avalon_info = local_obj[AVALON_PROPERTY]
|
||||
avalon_info.update({"container_name": group_name})
|
||||
|
||||
local_obj.parent = asset_group
|
||||
objects.append(local_obj)
|
||||
|
||||
while bpy.data.orphans_purge(do_local_ids=False):
|
||||
pass
|
||||
|
||||
bpy.ops.object.select_all(action='DESELECT')
|
||||
|
||||
|
|
|
|||
|
|
@ -28,6 +28,16 @@ class ExtractBlend(openpype.api.Extractor):
|
|||
|
||||
for obj in instance:
|
||||
data_blocks.add(obj)
|
||||
# Pack used images in the blend files.
|
||||
if obj.type == 'MESH':
|
||||
for material_slot in obj.material_slots:
|
||||
mat = material_slot.material
|
||||
if mat and mat.use_nodes:
|
||||
tree = mat.node_tree
|
||||
if tree.type == 'SHADER':
|
||||
for node in tree.nodes:
|
||||
if node.bl_idname == 'ShaderNodeTexImage':
|
||||
node.image.pack()
|
||||
|
||||
bpy.data.libraries.write(filepath, data_blocks)
|
||||
|
||||
|
|
|
|||
|
|
@ -4,7 +4,6 @@ import copy
|
|||
import argparse
|
||||
|
||||
from avalon import io
|
||||
from avalon.tools import publish
|
||||
|
||||
import pyblish.api
|
||||
import pyblish.util
|
||||
|
|
@ -13,6 +12,7 @@ from openpype.api import Logger
|
|||
import openpype
|
||||
import openpype.hosts.celaction
|
||||
from openpype.hosts.celaction import api as celaction
|
||||
from openpype.tools.utils import host_tools
|
||||
|
||||
log = Logger().get_logger("Celaction_cli_publisher")
|
||||
|
||||
|
|
@ -82,7 +82,7 @@ def main():
|
|||
|
||||
pyblish.api.register_host(publish_host)
|
||||
|
||||
return publish.show()
|
||||
return host_tools.show_publish()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
|
|
|||
|
|
@ -1,8 +1,6 @@
|
|||
from .pipeline import (
|
||||
install,
|
||||
uninstall,
|
||||
publish,
|
||||
launch_workfiles_app
|
||||
uninstall
|
||||
)
|
||||
|
||||
from .utils import (
|
||||
|
|
@ -22,12 +20,9 @@ __all__ = [
|
|||
# pipeline
|
||||
"install",
|
||||
"uninstall",
|
||||
"publish",
|
||||
"launch_workfiles_app",
|
||||
|
||||
# utils
|
||||
"setup",
|
||||
"get_resolve_module",
|
||||
|
||||
# lib
|
||||
"get_additional_data",
|
||||
|
|
|
|||
|
|
@ -3,17 +3,7 @@ import sys
|
|||
|
||||
from Qt import QtWidgets, QtCore
|
||||
|
||||
from .pipeline import (
|
||||
publish,
|
||||
launch_workfiles_app
|
||||
)
|
||||
|
||||
from avalon.tools import (
|
||||
creator,
|
||||
loader,
|
||||
sceneinventory,
|
||||
libraryloader
|
||||
)
|
||||
from openpype.tools.utils import host_tools
|
||||
|
||||
from openpype.hosts.fusion.scripts import (
|
||||
set_rendermode,
|
||||
|
|
@ -34,7 +24,7 @@ def load_stylesheet():
|
|||
|
||||
class Spacer(QtWidgets.QWidget):
|
||||
def __init__(self, height, *args, **kwargs):
|
||||
super(self.__class__, self).__init__(*args, **kwargs)
|
||||
super(Spacer, self).__init__(*args, **kwargs)
|
||||
|
||||
self.setFixedHeight(height)
|
||||
|
||||
|
|
@ -51,7 +41,7 @@ class Spacer(QtWidgets.QWidget):
|
|||
|
||||
class OpenPypeMenu(QtWidgets.QWidget):
|
||||
def __init__(self, *args, **kwargs):
|
||||
super(self.__class__, self).__init__(*args, **kwargs)
|
||||
super(OpenPypeMenu, self).__init__(*args, **kwargs)
|
||||
|
||||
self.setObjectName("OpenPypeMenu")
|
||||
|
||||
|
|
@ -115,27 +105,27 @@ class OpenPypeMenu(QtWidgets.QWidget):
|
|||
|
||||
def on_workfile_clicked(self):
|
||||
print("Clicked Workfile")
|
||||
launch_workfiles_app()
|
||||
host_tools.show_workfiles()
|
||||
|
||||
def on_create_clicked(self):
|
||||
print("Clicked Create")
|
||||
creator.show()
|
||||
host_tools.show_creator()
|
||||
|
||||
def on_publish_clicked(self):
|
||||
print("Clicked Publish")
|
||||
publish(None)
|
||||
host_tools.show_publish()
|
||||
|
||||
def on_load_clicked(self):
|
||||
print("Clicked Load")
|
||||
loader.show(use_context=True)
|
||||
host_tools.show_loader(use_context=True)
|
||||
|
||||
def on_inventory_clicked(self):
|
||||
print("Clicked Inventory")
|
||||
sceneinventory.show()
|
||||
host_tools.show_scene_inventory()
|
||||
|
||||
def on_libload_clicked(self):
|
||||
print("Clicked Library")
|
||||
libraryloader.show()
|
||||
host_tools.show_library_loader()
|
||||
|
||||
def on_rendernode_clicked(self):
|
||||
from avalon import style
|
||||
|
|
|
|||
|
|
@ -3,7 +3,6 @@ Basic avalon integration
|
|||
"""
|
||||
import os
|
||||
|
||||
from avalon.tools import workfiles
|
||||
from avalon import api as avalon
|
||||
from pyblish import api as pyblish
|
||||
from openpype.api import Logger
|
||||
|
|
@ -98,14 +97,3 @@ def on_pyblish_instance_toggled(instance, new_value, old_value):
|
|||
current = attrs["TOOLB_PassThrough"]
|
||||
if current != passthrough:
|
||||
tool.SetAttrs({"TOOLB_PassThrough": passthrough})
|
||||
|
||||
|
||||
def launch_workfiles_app(*args):
|
||||
workdir = os.environ["AVALON_WORKDIR"]
|
||||
workfiles.show(workdir)
|
||||
|
||||
|
||||
def publish(parent):
|
||||
"""Shorthand to publish from within host"""
|
||||
from avalon.tools import publish
|
||||
return publish.show(parent)
|
||||
|
|
|
|||
|
|
@ -3,17 +3,14 @@
|
|||
import os
|
||||
from pathlib import Path
|
||||
import logging
|
||||
import re
|
||||
|
||||
from openpype import lib
|
||||
from openpype.api import (get_current_project_settings)
|
||||
import openpype.hosts.harmony
|
||||
|
||||
import pyblish.api
|
||||
|
||||
from avalon import io, harmony
|
||||
import avalon.api
|
||||
import avalon.tools.sceneinventory
|
||||
|
||||
|
||||
log = logging.getLogger("openpype.hosts.harmony")
|
||||
|
|
|
|||
|
|
@ -91,7 +91,8 @@ class ExtractRender(pyblish.api.InstancePlugin):
|
|||
thumbnail_path = os.path.join(path, "thumbnail.png")
|
||||
ffmpeg_path = openpype.lib.get_ffmpeg_tool_path("ffmpeg")
|
||||
args = [
|
||||
"{}".format(ffmpeg_path), "-y",
|
||||
ffmpeg_path,
|
||||
"-y",
|
||||
"-i", os.path.join(path, list(collections[0])[0]),
|
||||
"-vf", "scale=300:-1",
|
||||
"-vframes", "1",
|
||||
|
|
|
|||
|
|
@ -2,6 +2,7 @@ import os
|
|||
import sys
|
||||
import hiero.core
|
||||
from openpype.api import Logger
|
||||
from openpype.tools.utils import host_tools
|
||||
from avalon.api import Session
|
||||
from hiero.ui import findMenuAction
|
||||
|
||||
|
|
@ -41,7 +42,6 @@ def menu_install():
|
|||
apply_colorspace_project, apply_colorspace_clips
|
||||
)
|
||||
# here is the best place to add menu
|
||||
from avalon.tools import cbloader, creator, sceneinventory
|
||||
from avalon.vendor.Qt import QtGui
|
||||
|
||||
menu_name = os.environ['AVALON_LABEL']
|
||||
|
|
@ -86,15 +86,15 @@ def menu_install():
|
|||
|
||||
creator_action = menu.addAction("Create ...")
|
||||
creator_action.setIcon(QtGui.QIcon("icons:CopyRectangle.png"))
|
||||
creator_action.triggered.connect(creator.show)
|
||||
creator_action.triggered.connect(host_tools.show_creator)
|
||||
|
||||
loader_action = menu.addAction("Load ...")
|
||||
loader_action.setIcon(QtGui.QIcon("icons:CopyRectangle.png"))
|
||||
loader_action.triggered.connect(cbloader.show)
|
||||
loader_action.triggered.connect(host_tools.show_loader)
|
||||
|
||||
sceneinventory_action = menu.addAction("Manage ...")
|
||||
sceneinventory_action.setIcon(QtGui.QIcon("icons:CopyRectangle.png"))
|
||||
sceneinventory_action.triggered.connect(sceneinventory.show)
|
||||
sceneinventory_action.triggered.connect(host_tools.show_scene_inventory)
|
||||
menu.addSeparator()
|
||||
|
||||
if os.getenv("OPENPYPE_DEVELOP"):
|
||||
|
|
|
|||
|
|
@ -4,15 +4,12 @@ Basic avalon integration
|
|||
import os
|
||||
import contextlib
|
||||
from collections import OrderedDict
|
||||
from avalon.tools import (
|
||||
workfiles,
|
||||
publish as _publish
|
||||
)
|
||||
from avalon.pipeline import AVALON_CONTAINER_ID
|
||||
from avalon import api as avalon
|
||||
from avalon import schema
|
||||
from pyblish import api as pyblish
|
||||
from openpype.api import Logger
|
||||
from openpype.tools.utils import host_tools
|
||||
from . import lib, menu, events
|
||||
|
||||
log = Logger().get_logger(__name__)
|
||||
|
|
@ -213,15 +210,13 @@ def update_container(track_item, data=None):
|
|||
def launch_workfiles_app(*args):
|
||||
''' Wrapping function for workfiles launcher '''
|
||||
|
||||
workdir = os.environ["AVALON_WORKDIR"]
|
||||
|
||||
# show workfile gui
|
||||
workfiles.show(workdir)
|
||||
host_tools.show_workfiles()
|
||||
|
||||
|
||||
def publish(parent):
|
||||
"""Shorthand to publish from within host"""
|
||||
return _publish.show(parent)
|
||||
return host_tools.show_publish(parent)
|
||||
|
||||
|
||||
@contextlib.contextmanager
|
||||
|
|
|
|||
|
|
@ -10,16 +10,16 @@ log = Logger().get_logger(__name__)
|
|||
|
||||
def tag_data():
|
||||
return {
|
||||
"Retiming": {
|
||||
"editable": "1",
|
||||
"note": "Clip has retime or TimeWarp effects (or multiple effects stacked on the clip)", # noqa
|
||||
"icon": "retiming.png",
|
||||
"metadata": {
|
||||
"family": "retiming",
|
||||
"marginIn": 1,
|
||||
"marginOut": 1
|
||||
}
|
||||
},
|
||||
# "Retiming": {
|
||||
# "editable": "1",
|
||||
# "note": "Clip has retime or TimeWarp effects (or multiple effects stacked on the clip)", # noqa
|
||||
# "icon": "retiming.png",
|
||||
# "metadata": {
|
||||
# "family": "retiming",
|
||||
# "marginIn": 1,
|
||||
# "marginOut": 1
|
||||
# }
|
||||
# },
|
||||
"[Lenses]": {
|
||||
"Set lense here": {
|
||||
"editable": "1",
|
||||
|
|
@ -31,15 +31,15 @@ def tag_data():
|
|||
}
|
||||
}
|
||||
},
|
||||
"NukeScript": {
|
||||
"editable": "1",
|
||||
"note": "Collecting track items to Nuke scripts.",
|
||||
"icon": "icons:TagNuke.png",
|
||||
"metadata": {
|
||||
"family": "nukescript",
|
||||
"subset": "main"
|
||||
}
|
||||
},
|
||||
# "NukeScript": {
|
||||
# "editable": "1",
|
||||
# "note": "Collecting track items to Nuke scripts.",
|
||||
# "icon": "icons:TagNuke.png",
|
||||
# "metadata": {
|
||||
# "family": "nukescript",
|
||||
# "subset": "main"
|
||||
# }
|
||||
# },
|
||||
"Comment": {
|
||||
"editable": "1",
|
||||
"note": "Comment on a shot.",
|
||||
|
|
@ -78,8 +78,7 @@ def update_tag(tag, data):
|
|||
# set icon if any available in input data
|
||||
if data.get("icon"):
|
||||
tag.setIcon(str(data["icon"]))
|
||||
# set note description of tag
|
||||
tag.setNote(data["note"])
|
||||
|
||||
# get metadata of tag
|
||||
mtd = tag.metadata()
|
||||
# get metadata key from data
|
||||
|
|
@ -97,6 +96,9 @@ def update_tag(tag, data):
|
|||
"tag.{}".format(str(k)),
|
||||
str(v)
|
||||
)
|
||||
|
||||
# set note description of tag
|
||||
tag.setNote(str(data["note"]))
|
||||
return tag
|
||||
|
||||
|
||||
|
|
@ -106,6 +108,26 @@ def add_tags_to_workfile():
|
|||
"""
|
||||
from .lib import get_current_project
|
||||
|
||||
def add_tag_to_bin(root_bin, name, data):
|
||||
# for Tags to be created in root level Bin
|
||||
# at first check if any of input data tag is not already created
|
||||
done_tag = next((t for t in root_bin.items()
|
||||
if str(name) in t.name()), None)
|
||||
|
||||
if not done_tag:
|
||||
# create Tag
|
||||
tag = create_tag(name, data)
|
||||
tag.setName(str(name))
|
||||
|
||||
log.debug("__ creating tag: {}".format(tag))
|
||||
# adding Tag to Root Bin
|
||||
root_bin.addItem(tag)
|
||||
else:
|
||||
# update only non hierarchy tags
|
||||
update_tag(done_tag, data)
|
||||
done_tag.setName(str(name))
|
||||
log.debug("__ updating tag: {}".format(done_tag))
|
||||
|
||||
# get project and root bin object
|
||||
project = get_current_project()
|
||||
root_bin = project.tagsBin()
|
||||
|
|
@ -125,10 +147,8 @@ def add_tags_to_workfile():
|
|||
for task_type in tasks.keys():
|
||||
nks_pres_tags["[Tasks]"][task_type.lower()] = {
|
||||
"editable": "1",
|
||||
"note": "",
|
||||
"icon": {
|
||||
"path": "icons:TagGood.png"
|
||||
},
|
||||
"note": task_type,
|
||||
"icon": "icons:TagGood.png",
|
||||
"metadata": {
|
||||
"family": "task",
|
||||
"type": task_type
|
||||
|
|
@ -157,10 +177,10 @@ def add_tags_to_workfile():
|
|||
# check if key is not decorated with [] so it is defined as bin
|
||||
bin_find = None
|
||||
pattern = re.compile(r"\[(.*)\]")
|
||||
bin_finds = pattern.findall(_k)
|
||||
_bin_finds = pattern.findall(_k)
|
||||
# if there is available any then pop it to string
|
||||
if bin_finds:
|
||||
bin_find = bin_finds.pop()
|
||||
if _bin_finds:
|
||||
bin_find = _bin_finds.pop()
|
||||
|
||||
# if bin was found then create or update
|
||||
if bin_find:
|
||||
|
|
@ -168,7 +188,6 @@ def add_tags_to_workfile():
|
|||
# first check if in root lever is not already created bins
|
||||
bins = [b for b in root_bin.items()
|
||||
if b.name() in str(bin_find)]
|
||||
log.debug(">>> bins: {}".format(bins))
|
||||
|
||||
if bins:
|
||||
bin = bins.pop()
|
||||
|
|
@ -178,49 +197,14 @@ def add_tags_to_workfile():
|
|||
bin = hiero.core.Bin(str(bin_find))
|
||||
|
||||
# update or create tags in the bin
|
||||
for k, v in _val.items():
|
||||
tags = [t for t in bin.items()
|
||||
if str(k) in t.name()
|
||||
if len(str(k)) == len(t.name())]
|
||||
if not tags:
|
||||
# create Tag obj
|
||||
tag = create_tag(k, v)
|
||||
|
||||
# adding Tag to Bin
|
||||
bin.addItem(tag)
|
||||
else:
|
||||
update_tag(tags.pop(), v)
|
||||
for __k, __v in _val.items():
|
||||
add_tag_to_bin(bin, __k, __v)
|
||||
|
||||
# finally add the Bin object to the root level Bin
|
||||
if root_add:
|
||||
# adding Tag to Root Bin
|
||||
root_bin.addItem(bin)
|
||||
else:
|
||||
# for Tags to be created in root level Bin
|
||||
# at first check if any of input data tag is not already created
|
||||
tags = None
|
||||
tags = [t for t in root_bin.items()
|
||||
if str(_k) in t.name()]
|
||||
|
||||
if not tags:
|
||||
# create Tag
|
||||
tag = create_tag(_k, _val)
|
||||
|
||||
# adding Tag to Root Bin
|
||||
root_bin.addItem(tag)
|
||||
else:
|
||||
# update Tags if they already exists
|
||||
for _t in tags:
|
||||
# skip bin objects
|
||||
if isinstance(_t, hiero.core.Bin):
|
||||
continue
|
||||
|
||||
# check if Hierarchy in name and skip it
|
||||
# because hierarchy could be edited
|
||||
if "hierarchy" in _t.name().lower():
|
||||
continue
|
||||
|
||||
# update only non hierarchy tags
|
||||
update_tag(_t, _val)
|
||||
add_tag_to_bin(root_bin, _k, _val)
|
||||
|
||||
log.info("Default Tags were set...")
|
||||
|
|
|
|||
|
|
@ -378,6 +378,17 @@ def add_otio_metadata(otio_item, media_source, **kwargs):
|
|||
|
||||
def create_otio_timeline():
|
||||
|
||||
def set_prev_item(itemindex, track_item):
|
||||
# Add Gap if needed
|
||||
if itemindex == 0:
|
||||
# if it is first track item at track then add
|
||||
# it to previouse item
|
||||
return track_item
|
||||
|
||||
else:
|
||||
# get previouse item
|
||||
return track_item.parent().items()[itemindex - 1]
|
||||
|
||||
# get current timeline
|
||||
self.timeline = hiero.ui.activeSequence()
|
||||
self.project_fps = self.timeline.framerate().toFloat()
|
||||
|
|
@ -396,14 +407,6 @@ def create_otio_timeline():
|
|||
type(track), track.name())
|
||||
|
||||
for itemindex, track_item in enumerate(track):
|
||||
# skip offline track items
|
||||
if not track_item.isMediaPresent():
|
||||
continue
|
||||
|
||||
# skip if track item is disabled
|
||||
if not track_item.isEnabled():
|
||||
continue
|
||||
|
||||
# Add Gap if needed
|
||||
if itemindex == 0:
|
||||
# if it is first track item at track then add
|
||||
|
|
|
|||
|
|
@ -131,7 +131,7 @@ class PrecollectInstances(pyblish.api.ContextPlugin):
|
|||
self.create_shot_instance(context, **data)
|
||||
|
||||
self.log.info("Creating instance: {}".format(instance))
|
||||
self.log.debug(
|
||||
self.log.info(
|
||||
"_ instance.data: {}".format(pformat(instance.data)))
|
||||
|
||||
if not with_audio:
|
||||
|
|
|
|||
|
|
@ -8,6 +8,7 @@ from openpype.hosts.hiero.otio import hiero_export
|
|||
from Qt.QtGui import QPixmap
|
||||
import tempfile
|
||||
|
||||
|
||||
class PrecollectWorkfile(pyblish.api.ContextPlugin):
|
||||
"""Inject the current working file into context"""
|
||||
|
||||
|
|
|
|||
|
|
@ -7,24 +7,30 @@
|
|||
<scriptItem id="avalon_create">
|
||||
<label>Create ...</label>
|
||||
<scriptCode><![CDATA[
|
||||
from avalon.tools import creator
|
||||
creator.show()
|
||||
import hou
|
||||
from openpype.tools.utils import host_tools
|
||||
parent = hou.qt.mainWindow()
|
||||
host_tools.show_creator(parent)
|
||||
]]></scriptCode>
|
||||
</scriptItem>
|
||||
|
||||
<scriptItem id="avalon_load">
|
||||
<label>Load ...</label>
|
||||
<scriptCode><![CDATA[
|
||||
from avalon.tools import cbloader
|
||||
cbloader.show(use_context=True)
|
||||
import hou
|
||||
from openpype.tools.utils import host_tools
|
||||
parent = hou.qt.mainWindow()
|
||||
host_tools.show_loader(parent=parent, use_context=True)
|
||||
]]></scriptCode>
|
||||
</scriptItem>
|
||||
|
||||
<scriptItem id="avalon_manage">
|
||||
<label>Manage ...</label>
|
||||
<scriptCode><![CDATA[
|
||||
from avalon.tools import cbsceneinventory
|
||||
cbsceneinventory.show()
|
||||
import hou
|
||||
from openpype.tools.utils import host_tools
|
||||
parent = hou.qt.mainWindow()
|
||||
host_tools.show_scene_inventory(parent)
|
||||
]]></scriptCode>
|
||||
</scriptItem>
|
||||
|
||||
|
|
@ -32,9 +38,9 @@ cbsceneinventory.show()
|
|||
<label>Publish ...</label>
|
||||
<scriptCode><![CDATA[
|
||||
import hou
|
||||
from avalon.tools import publish
|
||||
from openpype.tools.utils import host_tools
|
||||
parent = hou.qt.mainWindow()
|
||||
publish.show(parent)
|
||||
host_tools.show_publish(parent)
|
||||
]]></scriptCode>
|
||||
</scriptItem>
|
||||
|
||||
|
|
@ -43,9 +49,10 @@ publish.show(parent)
|
|||
<scriptItem id="workfiles">
|
||||
<label>Work Files ...</label>
|
||||
<scriptCode><![CDATA[
|
||||
import hou, os
|
||||
from openpype.tools import workfiles
|
||||
workfiles.show(os.environ["AVALON_WORKDIR"])
|
||||
import hou
|
||||
from openpype.tools.utils import host_tools
|
||||
parent = hou.qt.mainWindow()
|
||||
host_tools.show_workfiles(parent)
|
||||
]]></scriptCode>
|
||||
</scriptItem>
|
||||
|
||||
|
|
|
|||
9
openpype/hosts/houdini/startup/scripts/houdinicore.py
Normal file
9
openpype/hosts/houdini/startup/scripts/houdinicore.py
Normal file
|
|
@ -0,0 +1,9 @@
|
|||
from avalon import api, houdini
|
||||
|
||||
|
||||
def main():
|
||||
print("Installing OpenPype ...")
|
||||
api.install(houdini)
|
||||
|
||||
|
||||
main()
|
||||
|
|
@ -8,7 +8,7 @@ from avalon import api as avalon
|
|||
from avalon import pipeline
|
||||
from avalon.maya import suspended_refresh
|
||||
from avalon.maya.pipeline import IS_HEADLESS
|
||||
from avalon.tools import workfiles
|
||||
from openpype.tools.utils import host_tools
|
||||
from pyblish import api as pyblish
|
||||
from openpype.lib import any_outdated
|
||||
import openpype.hosts.maya
|
||||
|
|
@ -35,6 +35,7 @@ def install():
|
|||
pyblish.register_plugin_path(PUBLISH_PATH)
|
||||
avalon.register_plugin_path(avalon.Loader, LOAD_PATH)
|
||||
avalon.register_plugin_path(avalon.Creator, CREATE_PATH)
|
||||
avalon.register_plugin_path(avalon.InventoryAction, INVENTORY_PATH)
|
||||
log.info(PUBLISH_PATH)
|
||||
menu.install()
|
||||
|
||||
|
|
@ -63,14 +64,23 @@ def process_dirmap(project_settings):
|
|||
# type: (dict) -> None
|
||||
"""Go through all paths in Settings and set them using `dirmap`.
|
||||
|
||||
If artists has Site Sync enabled, take dirmap mapping directly from
|
||||
Local Settings when artist is syncing workfile locally.
|
||||
|
||||
Args:
|
||||
project_settings (dict): Settings for current project.
|
||||
|
||||
"""
|
||||
if not project_settings["maya"].get("maya-dirmap"):
|
||||
local_mapping = _get_local_sync_dirmap(project_settings)
|
||||
if not project_settings["maya"].get("maya-dirmap") and not local_mapping:
|
||||
return
|
||||
mapping = project_settings["maya"]["maya-dirmap"]["paths"] or {}
|
||||
mapping_enabled = project_settings["maya"]["maya-dirmap"]["enabled"]
|
||||
|
||||
mapping = local_mapping or \
|
||||
project_settings["maya"]["maya-dirmap"]["paths"] \
|
||||
or {}
|
||||
mapping_enabled = project_settings["maya"]["maya-dirmap"]["enabled"] \
|
||||
or bool(local_mapping)
|
||||
|
||||
if not mapping or not mapping_enabled:
|
||||
return
|
||||
if mapping.get("source-path") and mapping_enabled is True:
|
||||
|
|
@ -93,10 +103,72 @@ def process_dirmap(project_settings):
|
|||
continue
|
||||
|
||||
|
||||
def _get_local_sync_dirmap(project_settings):
|
||||
"""
|
||||
Returns dirmap if synch to local project is enabled.
|
||||
|
||||
Only valid mapping is from roots of remote site to local site set in
|
||||
Local Settings.
|
||||
|
||||
Args:
|
||||
project_settings (dict)
|
||||
Returns:
|
||||
dict : { "source-path": [XXX], "destination-path": [YYYY]}
|
||||
"""
|
||||
import json
|
||||
mapping = {}
|
||||
|
||||
if not project_settings["global"]["sync_server"]["enabled"]:
|
||||
log.debug("Site Sync not enabled")
|
||||
return mapping
|
||||
|
||||
from openpype.settings.lib import get_site_local_overrides
|
||||
from openpype.modules import ModulesManager
|
||||
|
||||
manager = ModulesManager()
|
||||
sync_module = manager.modules_by_name["sync_server"]
|
||||
|
||||
project_name = os.getenv("AVALON_PROJECT")
|
||||
sync_settings = sync_module.get_sync_project_setting(
|
||||
os.getenv("AVALON_PROJECT"), exclude_locals=False, cached=False)
|
||||
log.debug(json.dumps(sync_settings, indent=4))
|
||||
|
||||
active_site = sync_module.get_local_normalized_site(
|
||||
sync_module.get_active_site(project_name))
|
||||
remote_site = sync_module.get_local_normalized_site(
|
||||
sync_module.get_remote_site(project_name))
|
||||
log.debug("active {} - remote {}".format(active_site, remote_site))
|
||||
|
||||
if active_site == "local" \
|
||||
and project_name in sync_module.get_enabled_projects()\
|
||||
and active_site != remote_site:
|
||||
overrides = get_site_local_overrides(os.getenv("AVALON_PROJECT"),
|
||||
active_site)
|
||||
for root_name, value in overrides.items():
|
||||
if os.path.isdir(value):
|
||||
try:
|
||||
mapping["destination-path"] = [value]
|
||||
mapping["source-path"] = [sync_settings["sites"]\
|
||||
[remote_site]\
|
||||
["root"]\
|
||||
[root_name]]
|
||||
except IndexError:
|
||||
# missing corresponding destination path
|
||||
log.debug("overrides".format(overrides))
|
||||
log.error(
|
||||
("invalid dirmap mapping, missing corresponding"
|
||||
" destination directory."))
|
||||
break
|
||||
|
||||
log.debug("local sync mapping:: {}".format(mapping))
|
||||
return mapping
|
||||
|
||||
|
||||
def uninstall():
|
||||
pyblish.deregister_plugin_path(PUBLISH_PATH)
|
||||
avalon.deregister_plugin_path(avalon.Loader, LOAD_PATH)
|
||||
avalon.deregister_plugin_path(avalon.Creator, CREATE_PATH)
|
||||
avalon.deregister_plugin_path(avalon.InventoryAction, INVENTORY_PATH)
|
||||
|
||||
menu.uninstall()
|
||||
|
||||
|
|
@ -136,16 +208,12 @@ def on_init(_):
|
|||
launch_workfiles = os.environ.get("WORKFILES_STARTUP")
|
||||
|
||||
if launch_workfiles:
|
||||
safe_deferred(launch_workfiles_app)
|
||||
safe_deferred(host_tools.show_workfiles)
|
||||
|
||||
if not IS_HEADLESS:
|
||||
safe_deferred(override_toolbox_ui)
|
||||
|
||||
|
||||
def launch_workfiles_app():
|
||||
workfiles.show(os.environ["AVALON_WORKDIR"])
|
||||
|
||||
|
||||
def on_before_save(return_code, _):
|
||||
"""Run validation for scene's FPS prior to saving"""
|
||||
return lib.validate_fps()
|
||||
|
|
@ -241,9 +309,15 @@ def on_task_changed(*args):
|
|||
lib.set_context_settings()
|
||||
lib.update_content_on_context_change()
|
||||
|
||||
msg = " project: {}\n asset: {}\n task:{}".format(
|
||||
avalon.Session["AVALON_PROJECT"],
|
||||
avalon.Session["AVALON_ASSET"],
|
||||
avalon.Session["AVALON_TASK"]
|
||||
)
|
||||
|
||||
lib.show_message(
|
||||
"Context was changed",
|
||||
("Context was changed to {}".format(avalon.Session["AVALON_ASSET"])),
|
||||
("Context was changed to:\n{}".format(msg)),
|
||||
)
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -1,10 +1,16 @@
|
|||
"""A set of commands that install overrides to Maya's UI"""
|
||||
|
||||
import os
|
||||
import logging
|
||||
|
||||
from functools import partial
|
||||
|
||||
import maya.cmds as mc
|
||||
import maya.mel as mel
|
||||
from functools import partial
|
||||
import os
|
||||
import logging
|
||||
|
||||
from avalon.maya import pipeline
|
||||
from openpype.api import resources
|
||||
from openpype.tools.utils import host_tools
|
||||
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
|
@ -69,39 +75,8 @@ def override_component_mask_commands():
|
|||
|
||||
def override_toolbox_ui():
|
||||
"""Add custom buttons in Toolbox as replacement for Maya web help icon."""
|
||||
inventory = None
|
||||
loader = None
|
||||
launch_workfiles_app = None
|
||||
mayalookassigner = None
|
||||
try:
|
||||
import avalon.tools.sceneinventory as inventory
|
||||
except Exception:
|
||||
log.warning("Could not import SceneInventory tool")
|
||||
|
||||
try:
|
||||
import avalon.tools.loader as loader
|
||||
except Exception:
|
||||
log.warning("Could not import Loader tool")
|
||||
|
||||
try:
|
||||
from avalon.maya.pipeline import launch_workfiles_app
|
||||
except Exception:
|
||||
log.warning("Could not import Workfiles tool")
|
||||
|
||||
try:
|
||||
from openpype.tools import mayalookassigner
|
||||
except Exception:
|
||||
log.warning("Could not import Maya Look assigner tool")
|
||||
|
||||
from openpype.api import resources
|
||||
|
||||
icons = resources.get_resource("icons")
|
||||
|
||||
if not any((
|
||||
mayalookassigner, launch_workfiles_app, loader, inventory
|
||||
)):
|
||||
return
|
||||
|
||||
# Ensure the maya web icon on toolbox exists
|
||||
web_button = "ToolBox|MainToolboxLayout|mayaWebButton"
|
||||
if not mc.iconTextButton(web_button, query=True, exists=True):
|
||||
|
|
@ -120,14 +95,23 @@ def override_toolbox_ui():
|
|||
# Create our controls
|
||||
background_color = (0.267, 0.267, 0.267)
|
||||
controls = []
|
||||
if mayalookassigner:
|
||||
look_assigner = None
|
||||
try:
|
||||
look_assigner = host_tools.get_tool_by_name(
|
||||
"lookassigner",
|
||||
parent=pipeline._parent
|
||||
)
|
||||
except Exception:
|
||||
log.warning("Couldn't create Look assigner window.", exc_info=True)
|
||||
|
||||
if look_assigner is not None:
|
||||
controls.append(
|
||||
mc.iconTextButton(
|
||||
"pype_toolbox_lookmanager",
|
||||
annotation="Look Manager",
|
||||
label="Look Manager",
|
||||
image=os.path.join(icons, "lookmanager.png"),
|
||||
command=lambda: mayalookassigner.show(),
|
||||
command=host_tools.show_look_assigner,
|
||||
bgc=background_color,
|
||||
width=icon_size,
|
||||
height=icon_size,
|
||||
|
|
@ -135,50 +119,53 @@ def override_toolbox_ui():
|
|||
)
|
||||
)
|
||||
|
||||
if launch_workfiles_app:
|
||||
controls.append(
|
||||
mc.iconTextButton(
|
||||
"pype_toolbox_workfiles",
|
||||
annotation="Work Files",
|
||||
label="Work Files",
|
||||
image=os.path.join(icons, "workfiles.png"),
|
||||
command=lambda: launch_workfiles_app(),
|
||||
bgc=background_color,
|
||||
width=icon_size,
|
||||
height=icon_size,
|
||||
parent=parent
|
||||
)
|
||||
controls.append(
|
||||
mc.iconTextButton(
|
||||
"pype_toolbox_workfiles",
|
||||
annotation="Work Files",
|
||||
label="Work Files",
|
||||
image=os.path.join(icons, "workfiles.png"),
|
||||
command=lambda: host_tools.show_workfiles(
|
||||
parent=pipeline._parent
|
||||
),
|
||||
bgc=background_color,
|
||||
width=icon_size,
|
||||
height=icon_size,
|
||||
parent=parent
|
||||
)
|
||||
)
|
||||
|
||||
if loader:
|
||||
controls.append(
|
||||
mc.iconTextButton(
|
||||
"pype_toolbox_loader",
|
||||
annotation="Loader",
|
||||
label="Loader",
|
||||
image=os.path.join(icons, "loader.png"),
|
||||
command=lambda: loader.show(use_context=True),
|
||||
bgc=background_color,
|
||||
width=icon_size,
|
||||
height=icon_size,
|
||||
parent=parent
|
||||
)
|
||||
controls.append(
|
||||
mc.iconTextButton(
|
||||
"pype_toolbox_loader",
|
||||
annotation="Loader",
|
||||
label="Loader",
|
||||
image=os.path.join(icons, "loader.png"),
|
||||
command=lambda: host_tools.show_loader(
|
||||
parent=pipeline._parent, use_context=True
|
||||
),
|
||||
bgc=background_color,
|
||||
width=icon_size,
|
||||
height=icon_size,
|
||||
parent=parent
|
||||
)
|
||||
)
|
||||
|
||||
if inventory:
|
||||
controls.append(
|
||||
mc.iconTextButton(
|
||||
"pype_toolbox_manager",
|
||||
annotation="Inventory",
|
||||
label="Inventory",
|
||||
image=os.path.join(icons, "inventory.png"),
|
||||
command=lambda: inventory.show(),
|
||||
bgc=background_color,
|
||||
width=icon_size,
|
||||
height=icon_size,
|
||||
parent=parent
|
||||
)
|
||||
controls.append(
|
||||
mc.iconTextButton(
|
||||
"pype_toolbox_manager",
|
||||
annotation="Inventory",
|
||||
label="Inventory",
|
||||
image=os.path.join(icons, "inventory.png"),
|
||||
command=lambda: host_tools.show_scene_inventory(
|
||||
parent=pipeline._parent
|
||||
),
|
||||
bgc=background_color,
|
||||
width=icon_size,
|
||||
height=icon_size,
|
||||
parent=parent
|
||||
)
|
||||
)
|
||||
|
||||
# Add the buttons on the bottom and stack
|
||||
# them above each other with side padding
|
||||
|
|
|
|||
|
|
@ -114,6 +114,8 @@ class RenderProduct(object):
|
|||
aov = attr.ib(default=None) # source aov
|
||||
driver = attr.ib(default=None) # source driver
|
||||
multipart = attr.ib(default=False) # multichannel file
|
||||
camera = attr.ib(default=None) # used only when rendering
|
||||
# from multiple cameras
|
||||
|
||||
|
||||
def get(layer, render_instance=None):
|
||||
|
|
@ -183,6 +185,16 @@ class ARenderProducts:
|
|||
self.layer_data = self._get_layer_data()
|
||||
self.layer_data.products = self.get_render_products()
|
||||
|
||||
def has_camera_token(self):
|
||||
# type: () -> bool
|
||||
"""Check if camera token is in image prefix.
|
||||
|
||||
Returns:
|
||||
bool: True/False if camera token is present.
|
||||
|
||||
"""
|
||||
return "<camera>" in self.layer_data.filePrefix.lower()
|
||||
|
||||
@abstractmethod
|
||||
def get_render_products(self):
|
||||
"""To be implemented by renderer class.
|
||||
|
|
@ -307,7 +319,7 @@ class ARenderProducts:
|
|||
# Deadline allows submitting renders with a custom frame list
|
||||
# to support those cases we might want to allow 'custom frames'
|
||||
# to be overridden to `ExpectFiles` class?
|
||||
layer_data = LayerMetadata(
|
||||
return LayerMetadata(
|
||||
frameStart=int(self.get_render_attribute("startFrame")),
|
||||
frameEnd=int(self.get_render_attribute("endFrame")),
|
||||
frameStep=int(self.get_render_attribute("byFrameStep")),
|
||||
|
|
@ -321,7 +333,6 @@ class ARenderProducts:
|
|||
defaultExt=self._get_attr("defaultRenderGlobals.imfPluginKey"),
|
||||
filePrefix=file_prefix
|
||||
)
|
||||
return layer_data
|
||||
|
||||
def _generate_file_sequence(
|
||||
self, layer_data,
|
||||
|
|
@ -330,7 +341,7 @@ class ARenderProducts:
|
|||
force_cameras=None):
|
||||
# type: (LayerMetadata, str, str, list) -> list
|
||||
expected_files = []
|
||||
cameras = force_cameras if force_cameras else layer_data.cameras
|
||||
cameras = force_cameras or layer_data.cameras
|
||||
ext = force_ext or layer_data.defaultExt
|
||||
for cam in cameras:
|
||||
file_prefix = layer_data.filePrefix
|
||||
|
|
@ -361,8 +372,8 @@ class ARenderProducts:
|
|||
)
|
||||
return expected_files
|
||||
|
||||
def get_files(self, product, camera):
|
||||
# type: (RenderProduct, str) -> list
|
||||
def get_files(self, product):
|
||||
# type: (RenderProduct) -> list
|
||||
"""Return list of expected files.
|
||||
|
||||
It will translate render token strings ('<RenderPass>', etc.) to
|
||||
|
|
@ -373,7 +384,6 @@ class ARenderProducts:
|
|||
Args:
|
||||
product (RenderProduct): Render product to be used for file
|
||||
generation.
|
||||
camera (str): Camera name.
|
||||
|
||||
Returns:
|
||||
List of files
|
||||
|
|
@ -383,7 +393,7 @@ class ARenderProducts:
|
|||
self.layer_data,
|
||||
force_aov_name=product.productName,
|
||||
force_ext=product.ext,
|
||||
force_cameras=[camera]
|
||||
force_cameras=[product.camera]
|
||||
)
|
||||
|
||||
def get_renderable_cameras(self):
|
||||
|
|
@ -460,15 +470,21 @@ class RenderProductsArnold(ARenderProducts):
|
|||
|
||||
return prefix
|
||||
|
||||
def _get_aov_render_products(self, aov):
|
||||
def _get_aov_render_products(self, aov, cameras=None):
|
||||
"""Return all render products for the AOV"""
|
||||
|
||||
products = list()
|
||||
products = []
|
||||
aov_name = self._get_attr(aov, "name")
|
||||
ai_drivers = cmds.listConnections("{}.outputs".format(aov),
|
||||
source=True,
|
||||
destination=False,
|
||||
type="aiAOVDriver") or []
|
||||
if not cameras:
|
||||
cameras = [
|
||||
self.sanitize_camera_name(
|
||||
self.get_renderable_cameras()[0]
|
||||
)
|
||||
]
|
||||
|
||||
for ai_driver in ai_drivers:
|
||||
# todo: check aiAOVDriver.prefix as it could have
|
||||
|
|
@ -497,30 +513,37 @@ class RenderProductsArnold(ARenderProducts):
|
|||
name = "beauty"
|
||||
|
||||
# Support Arnold light groups for AOVs
|
||||
# Global AOV: When disabled the main layer is not written: `{pass}`
|
||||
# Global AOV: When disabled the main layer is
|
||||
# not written: `{pass}`
|
||||
# All Light Groups: When enabled, a `{pass}_lgroups` file is
|
||||
# written and is always merged into a single file
|
||||
# Light Groups List: When set, a product per light group is written
|
||||
# written and is always merged into a
|
||||
# single file
|
||||
# Light Groups List: When set, a product per light
|
||||
# group is written
|
||||
# e.g. {pass}_front, {pass}_rim
|
||||
global_aov = self._get_attr(aov, "globalAov")
|
||||
if global_aov:
|
||||
product = RenderProduct(productName=name,
|
||||
ext=ext,
|
||||
aov=aov_name,
|
||||
driver=ai_driver)
|
||||
products.append(product)
|
||||
for camera in cameras:
|
||||
product = RenderProduct(productName=name,
|
||||
ext=ext,
|
||||
aov=aov_name,
|
||||
driver=ai_driver,
|
||||
camera=camera)
|
||||
products.append(product)
|
||||
|
||||
all_light_groups = self._get_attr(aov, "lightGroups")
|
||||
if all_light_groups:
|
||||
# All light groups is enabled. A single multipart
|
||||
# Render Product
|
||||
product = RenderProduct(productName=name + "_lgroups",
|
||||
ext=ext,
|
||||
aov=aov_name,
|
||||
driver=ai_driver,
|
||||
# Always multichannel output
|
||||
multipart=True)
|
||||
products.append(product)
|
||||
for camera in cameras:
|
||||
product = RenderProduct(productName=name + "_lgroups",
|
||||
ext=ext,
|
||||
aov=aov_name,
|
||||
driver=ai_driver,
|
||||
# Always multichannel output
|
||||
multipart=True,
|
||||
camera=camera)
|
||||
products.append(product)
|
||||
else:
|
||||
value = self._get_attr(aov, "lightGroupsList")
|
||||
if not value:
|
||||
|
|
@ -529,11 +552,15 @@ class RenderProductsArnold(ARenderProducts):
|
|||
for light_group in selected_light_groups:
|
||||
# Render Product per selected light group
|
||||
aov_light_group_name = "{}_{}".format(name, light_group)
|
||||
product = RenderProduct(productName=aov_light_group_name,
|
||||
aov=aov_name,
|
||||
driver=ai_driver,
|
||||
ext=ext)
|
||||
products.append(product)
|
||||
for camera in cameras:
|
||||
product = RenderProduct(
|
||||
productName=aov_light_group_name,
|
||||
aov=aov_name,
|
||||
driver=ai_driver,
|
||||
ext=ext,
|
||||
camera=camera
|
||||
)
|
||||
products.append(product)
|
||||
|
||||
return products
|
||||
|
||||
|
|
@ -556,17 +583,26 @@ class RenderProductsArnold(ARenderProducts):
|
|||
# anyway.
|
||||
return []
|
||||
|
||||
default_ext = self._get_attr("defaultRenderGlobals.imfPluginKey")
|
||||
beauty_product = RenderProduct(productName="beauty",
|
||||
ext=default_ext,
|
||||
driver="defaultArnoldDriver")
|
||||
# check if camera token is in prefix. If so, and we have list of
|
||||
# renderable cameras, generate render product for each and every
|
||||
# of them.
|
||||
cameras = [
|
||||
self.sanitize_camera_name(c)
|
||||
for c in self.get_renderable_cameras()
|
||||
]
|
||||
|
||||
default_ext = self._get_attr("defaultRenderGlobals.imfPluginKey")
|
||||
beauty_products = [RenderProduct(
|
||||
productName="beauty",
|
||||
ext=default_ext,
|
||||
driver="defaultArnoldDriver",
|
||||
camera=camera) for camera in cameras]
|
||||
# AOVs > Legacy > Maya Render View > Mode
|
||||
aovs_enabled = bool(
|
||||
self._get_attr("defaultArnoldRenderOptions.aovMode")
|
||||
)
|
||||
if not aovs_enabled:
|
||||
return [beauty_product]
|
||||
return beauty_products
|
||||
|
||||
# Common > File Output > Merge AOVs or <RenderPass>
|
||||
# We don't need to check for Merge AOVs due to overridden
|
||||
|
|
@ -575,8 +611,9 @@ class RenderProductsArnold(ARenderProducts):
|
|||
"<renderpass>" in self.layer_data.filePrefix.lower()
|
||||
)
|
||||
if not has_renderpass_token:
|
||||
beauty_product.multipart = True
|
||||
return [beauty_product]
|
||||
for product in beauty_products:
|
||||
product.multipart = True
|
||||
return beauty_products
|
||||
|
||||
# AOVs are set to be rendered separately. We should expect
|
||||
# <RenderPass> token in path.
|
||||
|
|
@ -598,14 +635,14 @@ class RenderProductsArnold(ARenderProducts):
|
|||
continue
|
||||
|
||||
# For now stick to the legacy output format.
|
||||
aov_products = self._get_aov_render_products(aov)
|
||||
aov_products = self._get_aov_render_products(aov, cameras)
|
||||
products.extend(aov_products)
|
||||
|
||||
if not any(product.aov == "RGBA" for product in products):
|
||||
if all(product.aov != "RGBA" for product in products):
|
||||
# Append default 'beauty' as this is arnolds default.
|
||||
# However, it is excluded whenever a RGBA pass is enabled.
|
||||
# For legibility add the beauty layer as first entry
|
||||
products.insert(0, beauty_product)
|
||||
products += beauty_products
|
||||
|
||||
# TODO: Output Denoising AOVs?
|
||||
|
||||
|
|
@ -670,6 +707,11 @@ class RenderProductsVray(ARenderProducts):
|
|||
# anyway.
|
||||
return []
|
||||
|
||||
cameras = [
|
||||
self.sanitize_camera_name(c)
|
||||
for c in self.get_renderable_cameras()
|
||||
]
|
||||
|
||||
image_format_str = self._get_attr("vraySettings.imageFormatStr")
|
||||
default_ext = image_format_str
|
||||
if default_ext in {"exr (multichannel)", "exr (deep)"}:
|
||||
|
|
@ -680,13 +722,21 @@ class RenderProductsVray(ARenderProducts):
|
|||
# add beauty as default when not disabled
|
||||
dont_save_rgb = self._get_attr("vraySettings.dontSaveRgbChannel")
|
||||
if not dont_save_rgb:
|
||||
products.append(RenderProduct(productName="", ext=default_ext))
|
||||
for camera in cameras:
|
||||
products.append(
|
||||
RenderProduct(productName="",
|
||||
ext=default_ext,
|
||||
camera=camera))
|
||||
|
||||
# separate alpha file
|
||||
separate_alpha = self._get_attr("vraySettings.separateAlpha")
|
||||
if separate_alpha:
|
||||
products.append(RenderProduct(productName="Alpha",
|
||||
ext=default_ext))
|
||||
for camera in cameras:
|
||||
products.append(
|
||||
RenderProduct(productName="Alpha",
|
||||
ext=default_ext,
|
||||
camera=camera)
|
||||
)
|
||||
|
||||
if image_format_str == "exr (multichannel)":
|
||||
# AOVs are merged in m-channel file, only main layer is rendered
|
||||
|
|
@ -716,19 +766,23 @@ class RenderProductsVray(ARenderProducts):
|
|||
# instead seems to output multiple Render Products,
|
||||
# specifically "Self_Illumination" and "Environment"
|
||||
product_names = ["Self_Illumination", "Environment"]
|
||||
for name in product_names:
|
||||
product = RenderProduct(productName=name,
|
||||
ext=default_ext,
|
||||
aov=aov)
|
||||
products.append(product)
|
||||
for camera in cameras:
|
||||
for name in product_names:
|
||||
product = RenderProduct(productName=name,
|
||||
ext=default_ext,
|
||||
aov=aov,
|
||||
camera=camera)
|
||||
products.append(product)
|
||||
# Continue as we've processed this special case AOV
|
||||
continue
|
||||
|
||||
aov_name = self._get_vray_aov_name(aov)
|
||||
product = RenderProduct(productName=aov_name,
|
||||
ext=default_ext,
|
||||
aov=aov)
|
||||
products.append(product)
|
||||
for camera in cameras:
|
||||
product = RenderProduct(productName=aov_name,
|
||||
ext=default_ext,
|
||||
aov=aov,
|
||||
camera=camera)
|
||||
products.append(product)
|
||||
|
||||
return products
|
||||
|
||||
|
|
@ -875,6 +929,11 @@ class RenderProductsRedshift(ARenderProducts):
|
|||
# anyway.
|
||||
return []
|
||||
|
||||
cameras = [
|
||||
self.sanitize_camera_name(c)
|
||||
for c in self.get_renderable_cameras()
|
||||
]
|
||||
|
||||
# For Redshift we don't directly return upon forcing multilayer
|
||||
# due to some AOVs still being written into separate files,
|
||||
# like Cryptomatte.
|
||||
|
|
@ -933,11 +992,14 @@ class RenderProductsRedshift(ARenderProducts):
|
|||
for light_group in light_groups:
|
||||
aov_light_group_name = "{}_{}".format(aov_name,
|
||||
light_group)
|
||||
product = RenderProduct(productName=aov_light_group_name,
|
||||
aov=aov_name,
|
||||
ext=ext,
|
||||
multipart=aov_multipart)
|
||||
products.append(product)
|
||||
for camera in cameras:
|
||||
product = RenderProduct(
|
||||
productName=aov_light_group_name,
|
||||
aov=aov_name,
|
||||
ext=ext,
|
||||
multipart=aov_multipart,
|
||||
camera=camera)
|
||||
products.append(product)
|
||||
|
||||
if light_groups:
|
||||
light_groups_enabled = True
|
||||
|
|
@ -945,11 +1007,13 @@ class RenderProductsRedshift(ARenderProducts):
|
|||
# Redshift AOV Light Select always renders the global AOV
|
||||
# even when light groups are present so we don't need to
|
||||
# exclude it when light groups are active
|
||||
product = RenderProduct(productName=aov_name,
|
||||
aov=aov_name,
|
||||
ext=ext,
|
||||
multipart=aov_multipart)
|
||||
products.append(product)
|
||||
for camera in cameras:
|
||||
product = RenderProduct(productName=aov_name,
|
||||
aov=aov_name,
|
||||
ext=ext,
|
||||
multipart=aov_multipart,
|
||||
camera=camera)
|
||||
products.append(product)
|
||||
|
||||
# When a Beauty AOV is added manually, it will be rendered as
|
||||
# 'Beauty_other' in file name and "standard" beauty will have
|
||||
|
|
@ -959,10 +1023,12 @@ class RenderProductsRedshift(ARenderProducts):
|
|||
return products
|
||||
|
||||
beauty_name = "Beauty_other" if has_beauty_aov else ""
|
||||
products.insert(0,
|
||||
RenderProduct(productName=beauty_name,
|
||||
ext=ext,
|
||||
multipart=multipart))
|
||||
for camera in cameras:
|
||||
products.insert(0,
|
||||
RenderProduct(productName=beauty_name,
|
||||
ext=ext,
|
||||
multipart=multipart,
|
||||
camera=camera))
|
||||
|
||||
return products
|
||||
|
||||
|
|
@ -987,6 +1053,16 @@ class RenderProductsRenderman(ARenderProducts):
|
|||
:func:`ARenderProducts.get_render_products()`
|
||||
|
||||
"""
|
||||
cameras = [
|
||||
self.sanitize_camera_name(c)
|
||||
for c in self.get_renderable_cameras()
|
||||
]
|
||||
|
||||
if not cameras:
|
||||
cameras = [
|
||||
self.sanitize_camera_name(
|
||||
self.get_renderable_cameras()[0])
|
||||
]
|
||||
products = []
|
||||
|
||||
default_ext = "exr"
|
||||
|
|
@ -1000,9 +1076,11 @@ class RenderProductsRenderman(ARenderProducts):
|
|||
if aov_name == "rmanDefaultDisplay":
|
||||
aov_name = "beauty"
|
||||
|
||||
product = RenderProduct(productName=aov_name,
|
||||
ext=default_ext)
|
||||
products.append(product)
|
||||
for camera in cameras:
|
||||
product = RenderProduct(productName=aov_name,
|
||||
ext=default_ext,
|
||||
camera=camera)
|
||||
products.append(product)
|
||||
|
||||
return products
|
||||
|
||||
|
|
|
|||
|
|
@ -2,13 +2,15 @@ import sys
|
|||
import os
|
||||
import logging
|
||||
|
||||
from avalon.vendor.Qt import QtWidgets, QtGui
|
||||
from avalon.maya import pipeline
|
||||
from openpype.api import BuildWorkfile
|
||||
import maya.cmds as cmds
|
||||
from openpype.settings import get_project_settings
|
||||
from Qt import QtWidgets, QtGui
|
||||
|
||||
self = sys.modules[__name__]
|
||||
import maya.cmds as cmds
|
||||
|
||||
from avalon.maya import pipeline
|
||||
|
||||
from openpype.api import BuildWorkfile
|
||||
from openpype.settings import get_project_settings
|
||||
from openpype.tools.utils import host_tools
|
||||
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
|
@ -36,25 +38,15 @@ def deferred():
|
|||
)
|
||||
|
||||
def add_look_assigner_item():
|
||||
import mayalookassigner
|
||||
cmds.menuItem(
|
||||
"Look assigner",
|
||||
parent=pipeline._menu,
|
||||
command=lambda *args: mayalookassigner.show()
|
||||
command=lambda *args: host_tools.show_look_assigner(
|
||||
pipeline._parent
|
||||
)
|
||||
)
|
||||
|
||||
def modify_workfiles():
|
||||
from openpype.tools import workfiles
|
||||
|
||||
def launch_workfiles_app(*_args, **_kwargs):
|
||||
workfiles.show(
|
||||
os.path.join(
|
||||
cmds.workspace(query=True, rootDirectory=True),
|
||||
cmds.workspace(fileRuleEntry="scene")
|
||||
),
|
||||
parent=pipeline._parent
|
||||
)
|
||||
|
||||
# Find the pipeline menu
|
||||
top_menu = _get_menu()
|
||||
|
||||
|
|
@ -75,7 +67,7 @@ def deferred():
|
|||
cmds.menuItem(
|
||||
"Work Files",
|
||||
parent=pipeline._menu,
|
||||
command=launch_workfiles_app,
|
||||
command=lambda *args: host_tools.show_workfiles(pipeline._parent),
|
||||
insertAfter=after_action
|
||||
)
|
||||
|
||||
|
|
|
|||
|
|
@ -4,6 +4,53 @@ import avalon.maya
|
|||
from openpype.api import PypeCreatorMixin
|
||||
|
||||
|
||||
def get_reference_node(members, log=None):
|
||||
"""Get the reference node from the container members
|
||||
Args:
|
||||
members: list of node names
|
||||
|
||||
Returns:
|
||||
str: Reference node name.
|
||||
|
||||
"""
|
||||
|
||||
from maya import cmds
|
||||
|
||||
# Collect the references without .placeHolderList[] attributes as
|
||||
# unique entries (objects only) and skipping the sharedReferenceNode.
|
||||
references = set()
|
||||
for ref in cmds.ls(members, exactType="reference", objectsOnly=True):
|
||||
|
||||
# Ignore any `:sharedReferenceNode`
|
||||
if ref.rsplit(":", 1)[-1].startswith("sharedReferenceNode"):
|
||||
continue
|
||||
|
||||
# Ignore _UNKNOWN_REF_NODE_ (PLN-160)
|
||||
if ref.rsplit(":", 1)[-1].startswith("_UNKNOWN_REF_NODE_"):
|
||||
continue
|
||||
|
||||
references.add(ref)
|
||||
|
||||
assert references, "No reference node found in container"
|
||||
|
||||
# Get highest reference node (least parents)
|
||||
highest = min(references,
|
||||
key=lambda x: len(get_reference_node_parents(x)))
|
||||
|
||||
# Warn the user when we're taking the highest reference node
|
||||
if len(references) > 1:
|
||||
if not log:
|
||||
from openpype.lib import PypeLogger
|
||||
|
||||
log = PypeLogger().get_logger(__name__)
|
||||
|
||||
log.warning("More than one reference node found in "
|
||||
"container, using highest reference node: "
|
||||
"%s (in: %s)", highest, list(references))
|
||||
|
||||
return highest
|
||||
|
||||
|
||||
def get_reference_node_parents(ref):
|
||||
"""Return all parent reference nodes of reference node
|
||||
|
||||
|
|
@ -76,7 +123,7 @@ class ReferenceLoader(api.Loader):
|
|||
count = options.get("count") or 1
|
||||
for c in range(0, count):
|
||||
namespace = namespace or lib.unique_namespace(
|
||||
asset["name"] + "_",
|
||||
"{}_{}_".format(asset["name"], context["subset"]["name"]),
|
||||
prefix="_" if asset["name"][0].isdigit() else "",
|
||||
suffix="_",
|
||||
)
|
||||
|
|
@ -109,7 +156,7 @@ class ReferenceLoader(api.Loader):
|
|||
loader=self.__class__.__name__
|
||||
))
|
||||
else:
|
||||
ref_node = self._get_reference_node(nodes)
|
||||
ref_node = get_reference_node(nodes, self.log)
|
||||
loaded_containers.append(containerise(
|
||||
name=name,
|
||||
namespace=namespace,
|
||||
|
|
@ -126,46 +173,6 @@ class ReferenceLoader(api.Loader):
|
|||
"""To be implemented by subclass"""
|
||||
raise NotImplementedError("Must be implemented by subclass")
|
||||
|
||||
def _get_reference_node(self, members):
|
||||
"""Get the reference node from the container members
|
||||
Args:
|
||||
members: list of node names
|
||||
|
||||
Returns:
|
||||
str: Reference node name.
|
||||
|
||||
"""
|
||||
|
||||
from maya import cmds
|
||||
|
||||
# Collect the references without .placeHolderList[] attributes as
|
||||
# unique entries (objects only) and skipping the sharedReferenceNode.
|
||||
references = set()
|
||||
for ref in cmds.ls(members, exactType="reference", objectsOnly=True):
|
||||
|
||||
# Ignore any `:sharedReferenceNode`
|
||||
if ref.rsplit(":", 1)[-1].startswith("sharedReferenceNode"):
|
||||
continue
|
||||
|
||||
# Ignore _UNKNOWN_REF_NODE_ (PLN-160)
|
||||
if ref.rsplit(":", 1)[-1].startswith("_UNKNOWN_REF_NODE_"):
|
||||
continue
|
||||
|
||||
references.add(ref)
|
||||
|
||||
assert references, "No reference node found in container"
|
||||
|
||||
# Get highest reference node (least parents)
|
||||
highest = min(references,
|
||||
key=lambda x: len(get_reference_node_parents(x)))
|
||||
|
||||
# Warn the user when we're taking the highest reference node
|
||||
if len(references) > 1:
|
||||
self.log.warning("More than one reference node found in "
|
||||
"container, using highest reference node: "
|
||||
"%s (in: %s)", highest, list(references))
|
||||
|
||||
return highest
|
||||
|
||||
def update(self, container, representation):
|
||||
|
||||
|
|
@ -178,7 +185,7 @@ class ReferenceLoader(api.Loader):
|
|||
|
||||
# Get reference node from container members
|
||||
members = cmds.sets(node, query=True, nodesOnly=True)
|
||||
reference_node = self._get_reference_node(members)
|
||||
reference_node = get_reference_node(members, self.log)
|
||||
|
||||
file_type = {
|
||||
"ma": "mayaAscii",
|
||||
|
|
@ -274,7 +281,7 @@ class ReferenceLoader(api.Loader):
|
|||
|
||||
# Assume asset has been referenced
|
||||
members = cmds.sets(node, query=True)
|
||||
reference_node = self._get_reference_node(members)
|
||||
reference_node = get_reference_node(members, self.log)
|
||||
|
||||
assert reference_node, ("Imported container not supported; "
|
||||
"container must be referenced.")
|
||||
|
|
|
|||
|
|
@ -31,7 +31,7 @@ class ShaderDefinitionsEditor(QtWidgets.QWidget):
|
|||
|
||||
self.setObjectName("shaderDefinitionEditor")
|
||||
self.setWindowTitle("OpenPype shader name definition editor")
|
||||
icon = QtGui.QIcon(resources.pype_icon_filepath())
|
||||
icon = QtGui.QIcon(resources.get_openpype_icon_filepath())
|
||||
self.setWindowIcon(icon)
|
||||
self.setWindowFlags(QtCore.Qt.Window)
|
||||
self.setParent(parent)
|
||||
|
|
|
|||
|
|
@ -1,11 +1,11 @@
|
|||
from openpype.hosts.maya.api import plugin
|
||||
|
||||
|
||||
class CreateMayaAscii(plugin.Creator):
|
||||
"""Raw Maya Ascii file export"""
|
||||
class CreateMayaScene(plugin.Creator):
|
||||
"""Raw Maya Scene file export"""
|
||||
|
||||
name = "mayaAscii"
|
||||
label = "Maya Ascii"
|
||||
family = "mayaAscii"
|
||||
name = "mayaScene"
|
||||
label = "Maya Scene"
|
||||
family = "mayaScene"
|
||||
icon = "file-archive-o"
|
||||
defaults = ['Main']
|
||||
|
|
|
|||
|
|
@ -9,3 +9,8 @@ class CreateSetDress(plugin.Creator):
|
|||
family = "setdress"
|
||||
icon = "cubes"
|
||||
defaults = ["Main", "Anim"]
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
super(CreateSetDress, self).__init__(*args, **kwargs)
|
||||
|
||||
self.data["exactSetMembersOnly"] = True
|
||||
|
|
|
|||
92
openpype/hosts/maya/plugins/inventory/import_modelrender.py
Normal file
92
openpype/hosts/maya/plugins/inventory/import_modelrender.py
Normal file
|
|
@ -0,0 +1,92 @@
|
|||
from avalon import api, io
|
||||
|
||||
|
||||
class ImportModelRender(api.InventoryAction):
|
||||
|
||||
label = "Import Model Render Sets"
|
||||
icon = "industry"
|
||||
color = "#55DDAA"
|
||||
|
||||
scene_type_regex = "meta.render.m[ab]"
|
||||
look_data_type = "meta.render.json"
|
||||
|
||||
@staticmethod
|
||||
def is_compatible(container):
|
||||
return (
|
||||
container.get("loader") == "ReferenceLoader"
|
||||
and container.get("name", "").startswith("model")
|
||||
)
|
||||
|
||||
def process(self, containers):
|
||||
from maya import cmds
|
||||
|
||||
for container in containers:
|
||||
con_name = container["objectName"]
|
||||
nodes = []
|
||||
for n in cmds.sets(con_name, query=True, nodesOnly=True) or []:
|
||||
if cmds.nodeType(n) == "reference":
|
||||
nodes += cmds.referenceQuery(n, nodes=True)
|
||||
else:
|
||||
nodes.append(n)
|
||||
|
||||
repr_doc = io.find_one({
|
||||
"_id": io.ObjectId(container["representation"]),
|
||||
})
|
||||
version_id = repr_doc["parent"]
|
||||
|
||||
print("Importing render sets for model %r" % con_name)
|
||||
self.assign_model_render_by_version(nodes, version_id)
|
||||
|
||||
def assign_model_render_by_version(self, nodes, version_id):
|
||||
"""Assign nodes a specific published model render data version by id.
|
||||
|
||||
This assumes the nodes correspond with the asset.
|
||||
|
||||
Args:
|
||||
nodes(list): nodes to assign render data to
|
||||
version_id (bson.ObjectId): database id of the version of model
|
||||
|
||||
Returns:
|
||||
None
|
||||
"""
|
||||
import json
|
||||
from maya import cmds
|
||||
from avalon import maya, io, pipeline
|
||||
from openpype.hosts.maya.api import lib
|
||||
|
||||
# Get representations of shader file and relationships
|
||||
look_repr = io.find_one({
|
||||
"type": "representation",
|
||||
"parent": version_id,
|
||||
"name": {"$regex": self.scene_type_regex},
|
||||
})
|
||||
if not look_repr:
|
||||
print("No model render sets for this model version..")
|
||||
return
|
||||
|
||||
json_repr = io.find_one({
|
||||
"type": "representation",
|
||||
"parent": version_id,
|
||||
"name": self.look_data_type,
|
||||
})
|
||||
|
||||
context = pipeline.get_representation_context(look_repr["_id"])
|
||||
maya_file = pipeline.get_representation_path_from_context(context)
|
||||
|
||||
context = pipeline.get_representation_context(json_repr["_id"])
|
||||
json_file = pipeline.get_representation_path_from_context(context)
|
||||
|
||||
# Import the look file
|
||||
with maya.maintained_selection():
|
||||
shader_nodes = cmds.file(maya_file,
|
||||
i=True, # import
|
||||
returnNewNodes=True)
|
||||
# imprint context data
|
||||
|
||||
# Load relationships
|
||||
shader_relation = json_file
|
||||
with open(shader_relation, "r") as f:
|
||||
relationships = json.load(f)
|
||||
|
||||
# Assign relationships
|
||||
lib.apply_shaders(relationships, shader_nodes, nodes)
|
||||
29
openpype/hosts/maya/plugins/inventory/import_reference.py
Normal file
29
openpype/hosts/maya/plugins/inventory/import_reference.py
Normal file
|
|
@ -0,0 +1,29 @@
|
|||
from maya import cmds
|
||||
|
||||
from avalon import api
|
||||
|
||||
from openpype.hosts.maya.api.plugin import get_reference_node
|
||||
|
||||
|
||||
class ImportReference(api.InventoryAction):
|
||||
"""Imports selected reference to inside of the file."""
|
||||
|
||||
label = "Import Reference"
|
||||
icon = "download"
|
||||
color = "#d8d8d8"
|
||||
|
||||
def process(self, containers):
|
||||
references = cmds.ls(type="reference")
|
||||
for container in containers:
|
||||
if container["loader"] != "ReferenceLoader":
|
||||
print("Not a reference, skipping")
|
||||
continue
|
||||
|
||||
node = container["objectName"]
|
||||
members = cmds.sets(node, query=True, nodesOnly=True)
|
||||
ref_node = get_reference_node(members)
|
||||
|
||||
ref_file = cmds.referenceQuery(ref_node, f=True)
|
||||
cmds.file(ref_file, importReference=True)
|
||||
|
||||
return True # return anything to trigger model refresh
|
||||
|
|
@ -13,6 +13,7 @@ class ReferenceLoader(openpype.hosts.maya.api.plugin.ReferenceLoader):
|
|||
"pointcache",
|
||||
"animation",
|
||||
"mayaAscii",
|
||||
"mayaScene",
|
||||
"setdress",
|
||||
"layout",
|
||||
"camera",
|
||||
|
|
@ -40,14 +41,13 @@ class ReferenceLoader(openpype.hosts.maya.api.plugin.ReferenceLoader):
|
|||
family = "model"
|
||||
|
||||
with maya.maintained_selection():
|
||||
|
||||
groupName = "{}:{}".format(namespace, name)
|
||||
groupName = "{}:_GRP".format(namespace)
|
||||
cmds.loadPlugin("AbcImport.mll", quiet=True)
|
||||
nodes = cmds.file(self.fname,
|
||||
namespace=namespace,
|
||||
sharedReferenceFile=False,
|
||||
groupReference=True,
|
||||
groupName="{}:{}".format(namespace, name),
|
||||
groupName=groupName,
|
||||
reference=True,
|
||||
returnNewNodes=True)
|
||||
|
||||
|
|
@ -71,7 +71,7 @@ class ReferenceLoader(openpype.hosts.maya.api.plugin.ReferenceLoader):
|
|||
except: # noqa: E722
|
||||
pass
|
||||
|
||||
if family not in ["layout", "setdress", "mayaAscii"]:
|
||||
if family not in ["layout", "setdress", "mayaAscii", "mayaScene"]:
|
||||
for root in roots:
|
||||
root.setParent(world=True)
|
||||
|
||||
|
|
|
|||
|
|
@ -223,8 +223,8 @@ class CollectLook(pyblish.api.InstancePlugin):
|
|||
|
||||
def process(self, instance):
|
||||
"""Collect the Look in the instance with the correct layer settings"""
|
||||
|
||||
with lib.renderlayer(instance.data["renderlayer"]):
|
||||
renderlayer = instance.data.get("renderlayer", "defaultRenderLayer")
|
||||
with lib.renderlayer(renderlayer):
|
||||
self.collect(instance)
|
||||
|
||||
def collect(self, instance):
|
||||
|
|
@ -357,6 +357,23 @@ class CollectLook(pyblish.api.InstancePlugin):
|
|||
for vray_node in vray_plugin_nodes:
|
||||
history.extend(cmds.listHistory(vray_node))
|
||||
|
||||
# handling render attribute sets
|
||||
render_set_types = [
|
||||
"VRayDisplacement",
|
||||
"VRayLightMesh",
|
||||
"VRayObjectProperties",
|
||||
"RedshiftObjectId",
|
||||
"RedshiftMeshParameters",
|
||||
]
|
||||
render_sets = cmds.ls(look_sets, type=render_set_types)
|
||||
if render_sets:
|
||||
history.extend(
|
||||
cmds.listHistory(render_sets,
|
||||
future=False,
|
||||
pruneDagObjects=True)
|
||||
or []
|
||||
)
|
||||
|
||||
files = cmds.ls(history, type="file", long=True)
|
||||
files.extend(cmds.ls(history, type="aiImage", long=True))
|
||||
files.extend(cmds.ls(history, type="RedshiftNormalMap", long=True))
|
||||
|
|
@ -550,3 +567,45 @@ class CollectLook(pyblish.api.InstancePlugin):
|
|||
"source": source, # required for resources
|
||||
"files": files,
|
||||
"color_space": color_space} # required for resources
|
||||
|
||||
|
||||
class CollectModelRenderSets(CollectLook):
|
||||
"""Collect render attribute sets for model instance.
|
||||
|
||||
Collects additional render attribute sets so they can be
|
||||
published with model.
|
||||
|
||||
"""
|
||||
|
||||
order = pyblish.api.CollectorOrder + 0.21
|
||||
families = ["model"]
|
||||
label = "Collect Model Render Sets"
|
||||
hosts = ["maya"]
|
||||
maketx = True
|
||||
|
||||
def collect_sets(self, instance):
|
||||
"""Collect all related objectSets except shadingEngines
|
||||
|
||||
Args:
|
||||
instance (list): all nodes to be published
|
||||
|
||||
Returns:
|
||||
dict
|
||||
"""
|
||||
|
||||
sets = {}
|
||||
for node in instance:
|
||||
related_sets = lib.get_related_sets(node)
|
||||
if not related_sets:
|
||||
continue
|
||||
|
||||
for objset in related_sets:
|
||||
if objset in sets:
|
||||
continue
|
||||
|
||||
if "shadingEngine" in cmds.nodeType(objset, inherited=True):
|
||||
continue
|
||||
|
||||
sets[objset] = {"uuid": lib.get_id(objset), "members": list()}
|
||||
|
||||
return sets
|
||||
|
|
|
|||
|
|
@ -3,14 +3,14 @@ from maya import cmds
|
|||
import pyblish.api
|
||||
|
||||
|
||||
class CollectMayaAscii(pyblish.api.InstancePlugin):
|
||||
"""Collect May Ascii Data
|
||||
class CollectMayaScene(pyblish.api.InstancePlugin):
|
||||
"""Collect Maya Scene Data
|
||||
|
||||
"""
|
||||
|
||||
order = pyblish.api.CollectorOrder + 0.2
|
||||
label = 'Collect Model Data'
|
||||
families = ["mayaAscii"]
|
||||
families = ["mayaScene"]
|
||||
|
||||
def process(self, instance):
|
||||
# Extract only current frame (override)
|
||||
|
|
@ -174,10 +174,16 @@ class CollectMayaRender(pyblish.api.ContextPlugin):
|
|||
assert render_products, "no render products generated"
|
||||
exp_files = []
|
||||
for product in render_products:
|
||||
for camera in layer_render_products.layer_data.cameras:
|
||||
exp_files.append(
|
||||
{product.productName: layer_render_products.get_files(
|
||||
product, camera)})
|
||||
product_name = product.productName
|
||||
if product.camera and layer_render_products.has_camera_token():
|
||||
product_name = "{}{}".format(
|
||||
product.camera,
|
||||
"_" + product_name if product_name else "")
|
||||
exp_files.append(
|
||||
{
|
||||
product_name: layer_render_products.get_files(
|
||||
product)
|
||||
})
|
||||
|
||||
self.log.info("multipart: {}".format(
|
||||
layer_render_products.multipart))
|
||||
|
|
@ -199,12 +205,14 @@ class CollectMayaRender(pyblish.api.ContextPlugin):
|
|||
|
||||
# replace relative paths with absolute. Render products are
|
||||
# returned as list of dictionaries.
|
||||
publish_meta_path = None
|
||||
for aov in exp_files:
|
||||
full_paths = []
|
||||
for file in aov[aov.keys()[0]]:
|
||||
full_path = os.path.join(workspace, "renders", file)
|
||||
full_path = full_path.replace("\\", "/")
|
||||
full_paths.append(full_path)
|
||||
publish_meta_path = os.path.dirname(full_path)
|
||||
aov_dict[aov.keys()[0]] = full_paths
|
||||
|
||||
frame_start_render = int(self.get_render_attribute(
|
||||
|
|
@ -230,6 +238,26 @@ class CollectMayaRender(pyblish.api.ContextPlugin):
|
|||
frame_end_handle = frame_end_render
|
||||
|
||||
full_exp_files.append(aov_dict)
|
||||
|
||||
# find common path to store metadata
|
||||
# so if image prefix is branching to many directories
|
||||
# metadata file will be located in top-most common
|
||||
# directory.
|
||||
# TODO: use `os.path.commonpath()` after switch to Python 3
|
||||
publish_meta_path = os.path.normpath(publish_meta_path)
|
||||
common_publish_meta_path = os.path.splitdrive(
|
||||
publish_meta_path)[0]
|
||||
if common_publish_meta_path:
|
||||
common_publish_meta_path += os.path.sep
|
||||
for part in publish_meta_path.replace(
|
||||
common_publish_meta_path, "").split(os.path.sep):
|
||||
common_publish_meta_path = os.path.join(
|
||||
common_publish_meta_path, part)
|
||||
if part == expected_layer_name:
|
||||
break
|
||||
self.log.info(
|
||||
"Publish meta path: {}".format(common_publish_meta_path))
|
||||
|
||||
self.log.info(full_exp_files)
|
||||
self.log.info("collecting layer: {}".format(layer_name))
|
||||
# Get layer specific settings, might be overrides
|
||||
|
|
@ -262,6 +290,7 @@ class CollectMayaRender(pyblish.api.ContextPlugin):
|
|||
# which was submitted originally
|
||||
"source": filepath,
|
||||
"expectedFiles": full_exp_files,
|
||||
"publishRenderMetadataFolder": common_publish_meta_path,
|
||||
"resolutionWidth": cmds.getAttr("defaultResolution.width"),
|
||||
"resolutionHeight": cmds.getAttr("defaultResolution.height"),
|
||||
"pixelAspect": cmds.getAttr("defaultResolution.pixelAspect"),
|
||||
|
|
|
|||
|
|
@ -4,7 +4,7 @@ import os
|
|||
from maya import cmds
|
||||
|
||||
|
||||
class CollectMayaScene(pyblish.api.ContextPlugin):
|
||||
class CollectWorkfile(pyblish.api.ContextPlugin):
|
||||
"""Inject the current working file into context"""
|
||||
|
||||
order = pyblish.api.CollectorOrder - 0.01
|
||||
|
|
@ -122,7 +122,7 @@ def no_workspace_dir():
|
|||
|
||||
|
||||
class ExtractLook(openpype.api.Extractor):
|
||||
"""Extract Look (Maya Ascii + JSON)
|
||||
"""Extract Look (Maya Scene + JSON)
|
||||
|
||||
Only extracts the sets (shadingEngines and alike) alongside a .json file
|
||||
that stores it relationships for the sets and "attribute" data for the
|
||||
|
|
@ -130,11 +130,12 @@ class ExtractLook(openpype.api.Extractor):
|
|||
|
||||
"""
|
||||
|
||||
label = "Extract Look (Maya ASCII + JSON)"
|
||||
label = "Extract Look (Maya Scene + JSON)"
|
||||
hosts = ["maya"]
|
||||
families = ["look"]
|
||||
order = pyblish.api.ExtractorOrder + 0.2
|
||||
scene_type = "ma"
|
||||
look_data_type = "json"
|
||||
|
||||
@staticmethod
|
||||
def get_renderer_name():
|
||||
|
|
@ -176,6 +177,8 @@ class ExtractLook(openpype.api.Extractor):
|
|||
# no preset found
|
||||
pass
|
||||
|
||||
return "mayaAscii" if self.scene_type == "ma" else "mayaBinary"
|
||||
|
||||
def process(self, instance):
|
||||
"""Plugin entry point.
|
||||
|
||||
|
|
@ -183,10 +186,12 @@ class ExtractLook(openpype.api.Extractor):
|
|||
instance: Instance to process.
|
||||
|
||||
"""
|
||||
_scene_type = self.get_maya_scene_type(instance)
|
||||
|
||||
# Define extract output file path
|
||||
dir_path = self.staging_dir(instance)
|
||||
maya_fname = "{0}.{1}".format(instance.name, self.scene_type)
|
||||
json_fname = "{0}.json".format(instance.name)
|
||||
json_fname = "{0}.{1}".format(instance.name, self.look_data_type)
|
||||
|
||||
# Make texture dump folder
|
||||
maya_path = os.path.join(dir_path, maya_fname)
|
||||
|
|
@ -196,10 +201,102 @@ class ExtractLook(openpype.api.Extractor):
|
|||
|
||||
# Remove all members of the sets so they are not included in the
|
||||
# exported file by accident
|
||||
self.log.info("Extract sets (Maya ASCII) ...")
|
||||
self.log.info("Extract sets (%s) ..." % _scene_type)
|
||||
lookdata = instance.data["lookData"]
|
||||
relationships = lookdata["relationships"]
|
||||
sets = relationships.keys()
|
||||
if not sets:
|
||||
self.log.info("No sets found")
|
||||
return
|
||||
|
||||
results = self.process_resources(instance, staging_dir=dir_path)
|
||||
transfers = results["fileTransfers"]
|
||||
hardlinks = results["fileHardlinks"]
|
||||
hashes = results["fileHashes"]
|
||||
remap = results["attrRemap"]
|
||||
|
||||
# Extract in correct render layer
|
||||
layer = instance.data.get("renderlayer", "defaultRenderLayer")
|
||||
with lib.renderlayer(layer):
|
||||
# TODO: Ensure membership edits don't become renderlayer overrides
|
||||
with lib.empty_sets(sets, force=True):
|
||||
# To avoid Maya trying to automatically remap the file
|
||||
# textures relative to the `workspace -directory` we force
|
||||
# it to a fake temporary workspace. This fixes textures
|
||||
# getting incorrectly remapped. (LKD-17, PLN-101)
|
||||
with no_workspace_dir():
|
||||
with lib.attribute_values(remap):
|
||||
with avalon.maya.maintained_selection():
|
||||
cmds.select(sets, noExpand=True)
|
||||
cmds.file(
|
||||
maya_path,
|
||||
force=True,
|
||||
typ=_scene_type,
|
||||
exportSelected=True,
|
||||
preserveReferences=False,
|
||||
channels=True,
|
||||
constraints=True,
|
||||
expressions=True,
|
||||
constructionHistory=True,
|
||||
)
|
||||
|
||||
# Write the JSON data
|
||||
self.log.info("Extract json..")
|
||||
data = {
|
||||
"attributes": lookdata["attributes"],
|
||||
"relationships": relationships
|
||||
}
|
||||
|
||||
with open(json_path, "w") as f:
|
||||
json.dump(data, f)
|
||||
|
||||
if "files" not in instance.data:
|
||||
instance.data["files"] = []
|
||||
if "hardlinks" not in instance.data:
|
||||
instance.data["hardlinks"] = []
|
||||
if "transfers" not in instance.data:
|
||||
instance.data["transfers"] = []
|
||||
|
||||
instance.data["files"].append(maya_fname)
|
||||
instance.data["files"].append(json_fname)
|
||||
|
||||
if instance.data.get("representations") is None:
|
||||
instance.data["representations"] = []
|
||||
|
||||
instance.data["representations"].append(
|
||||
{
|
||||
"name": self.scene_type,
|
||||
"ext": self.scene_type,
|
||||
"files": os.path.basename(maya_fname),
|
||||
"stagingDir": os.path.dirname(maya_fname),
|
||||
}
|
||||
)
|
||||
instance.data["representations"].append(
|
||||
{
|
||||
"name": self.look_data_type,
|
||||
"ext": self.look_data_type,
|
||||
"files": os.path.basename(json_fname),
|
||||
"stagingDir": os.path.dirname(json_fname),
|
||||
}
|
||||
)
|
||||
|
||||
# Set up the resources transfers/links for the integrator
|
||||
instance.data["transfers"].extend(transfers)
|
||||
instance.data["hardlinks"].extend(hardlinks)
|
||||
|
||||
# Source hash for the textures
|
||||
instance.data["sourceHashes"] = hashes
|
||||
|
||||
"""
|
||||
self.log.info("Returning colorspaces to their original values ...")
|
||||
for attr, value in remap.items():
|
||||
self.log.info(" - {}: {}".format(attr, value))
|
||||
cmds.setAttr(attr, value, type="string")
|
||||
"""
|
||||
self.log.info("Extracted instance '%s' to: %s" % (instance.name,
|
||||
maya_path))
|
||||
|
||||
def process_resources(self, instance, staging_dir):
|
||||
|
||||
# Extract the textures to transfer, possibly convert with maketx and
|
||||
# remap the node paths to the destination path. Note that a source
|
||||
|
|
@ -218,7 +315,6 @@ class ExtractLook(openpype.api.Extractor):
|
|||
color_space = resource.get("color_space")
|
||||
|
||||
for f in resource["files"]:
|
||||
|
||||
files_metadata[os.path.normpath(f)] = {
|
||||
"color_space": color_space}
|
||||
# files.update(os.path.normpath(f))
|
||||
|
|
@ -244,7 +340,7 @@ class ExtractLook(openpype.api.Extractor):
|
|||
source, mode, texture_hash = self._process_texture(
|
||||
filepath,
|
||||
do_maketx,
|
||||
staging=dir_path,
|
||||
staging=staging_dir,
|
||||
linearize=linearize,
|
||||
force=force_copy
|
||||
)
|
||||
|
|
@ -299,85 +395,13 @@ class ExtractLook(openpype.api.Extractor):
|
|||
|
||||
self.log.info("Finished remapping destinations ...")
|
||||
|
||||
# Extract in correct render layer
|
||||
layer = instance.data.get("renderlayer", "defaultRenderLayer")
|
||||
with lib.renderlayer(layer):
|
||||
# TODO: Ensure membership edits don't become renderlayer overrides
|
||||
with lib.empty_sets(sets, force=True):
|
||||
# To avoid Maya trying to automatically remap the file
|
||||
# textures relative to the `workspace -directory` we force
|
||||
# it to a fake temporary workspace. This fixes textures
|
||||
# getting incorrectly remapped. (LKD-17, PLN-101)
|
||||
with no_workspace_dir():
|
||||
with lib.attribute_values(remap):
|
||||
with avalon.maya.maintained_selection():
|
||||
cmds.select(sets, noExpand=True)
|
||||
cmds.file(
|
||||
maya_path,
|
||||
force=True,
|
||||
typ="mayaAscii",
|
||||
exportSelected=True,
|
||||
preserveReferences=False,
|
||||
channels=True,
|
||||
constraints=True,
|
||||
expressions=True,
|
||||
constructionHistory=True,
|
||||
)
|
||||
|
||||
# Write the JSON data
|
||||
self.log.info("Extract json..")
|
||||
data = {
|
||||
"attributes": lookdata["attributes"],
|
||||
"relationships": relationships
|
||||
return {
|
||||
"fileTransfers": transfers,
|
||||
"fileHardlinks": hardlinks,
|
||||
"fileHashes": hashes,
|
||||
"attrRemap": remap,
|
||||
}
|
||||
|
||||
with open(json_path, "w") as f:
|
||||
json.dump(data, f)
|
||||
|
||||
if "files" not in instance.data:
|
||||
instance.data["files"] = []
|
||||
if "hardlinks" not in instance.data:
|
||||
instance.data["hardlinks"] = []
|
||||
if "transfers" not in instance.data:
|
||||
instance.data["transfers"] = []
|
||||
|
||||
instance.data["files"].append(maya_fname)
|
||||
instance.data["files"].append(json_fname)
|
||||
|
||||
instance.data["representations"] = []
|
||||
instance.data["representations"].append(
|
||||
{
|
||||
"name": "ma",
|
||||
"ext": "ma",
|
||||
"files": os.path.basename(maya_fname),
|
||||
"stagingDir": os.path.dirname(maya_fname),
|
||||
}
|
||||
)
|
||||
instance.data["representations"].append(
|
||||
{
|
||||
"name": "json",
|
||||
"ext": "json",
|
||||
"files": os.path.basename(json_fname),
|
||||
"stagingDir": os.path.dirname(json_fname),
|
||||
}
|
||||
)
|
||||
|
||||
# Set up the resources transfers/links for the integrator
|
||||
instance.data["transfers"].extend(transfers)
|
||||
instance.data["hardlinks"].extend(hardlinks)
|
||||
|
||||
# Source hash for the textures
|
||||
instance.data["sourceHashes"] = hashes
|
||||
|
||||
"""
|
||||
self.log.info("Returning colorspaces to their original values ...")
|
||||
for attr, value in remap.items():
|
||||
self.log.info(" - {}: {}".format(attr, value))
|
||||
cmds.setAttr(attr, value, type="string")
|
||||
"""
|
||||
self.log.info("Extracted instance '%s' to: %s" % (instance.name,
|
||||
maya_path))
|
||||
|
||||
def resource_destination(self, instance, filepath, do_maketx):
|
||||
"""Get resource destination path.
|
||||
|
||||
|
|
@ -467,3 +491,26 @@ class ExtractLook(openpype.api.Extractor):
|
|||
return converted, COPY, texture_hash
|
||||
|
||||
return filepath, COPY, texture_hash
|
||||
|
||||
|
||||
class ExtractModelRenderSets(ExtractLook):
|
||||
"""Extract model render attribute sets as model metadata
|
||||
|
||||
Only extracts the render attrib sets (NO shadingEngines) alongside
|
||||
a .json file that stores it relationships for the sets and "attribute"
|
||||
data for the instance members.
|
||||
|
||||
"""
|
||||
|
||||
label = "Model Render Sets"
|
||||
hosts = ["maya"]
|
||||
families = ["model"]
|
||||
scene_type_prefix = "meta.render."
|
||||
look_data_type = "meta.render.json"
|
||||
|
||||
def get_maya_scene_type(self, instance):
|
||||
typ = super(ExtractModelRenderSets, self).get_maya_scene_type(instance)
|
||||
# add prefix
|
||||
self.scene_type = self.scene_type_prefix + self.scene_type
|
||||
|
||||
return typ
|
||||
|
|
|
|||
|
|
@ -17,6 +17,7 @@ class ExtractMayaSceneRaw(openpype.api.Extractor):
|
|||
label = "Maya Scene (Raw)"
|
||||
hosts = ["maya"]
|
||||
families = ["mayaAscii",
|
||||
"mayaScene",
|
||||
"setdress",
|
||||
"layout",
|
||||
"camerarig",
|
||||
|
|
|
|||
|
|
@ -5,6 +5,8 @@ from __future__ import absolute_import
|
|||
import pyblish.api
|
||||
import openpype.api
|
||||
|
||||
from maya import cmds
|
||||
|
||||
|
||||
class SelectInvalidInstances(pyblish.api.Action):
|
||||
"""Select invalid instances in Outliner."""
|
||||
|
|
@ -18,13 +20,12 @@ class SelectInvalidInstances(pyblish.api.Action):
|
|||
# Get the errored instances
|
||||
failed = []
|
||||
for result in context.data["results"]:
|
||||
if result["error"] is None:
|
||||
continue
|
||||
if result["instance"] is None:
|
||||
continue
|
||||
if result["instance"] in failed:
|
||||
continue
|
||||
if result["plugin"] != plugin:
|
||||
if (
|
||||
result["error"] is None
|
||||
or result["instance"] is None
|
||||
or result["instance"] in failed
|
||||
or result["plugin"] != plugin
|
||||
):
|
||||
continue
|
||||
|
||||
failed.append(result["instance"])
|
||||
|
|
@ -44,25 +45,10 @@ class SelectInvalidInstances(pyblish.api.Action):
|
|||
self.deselect()
|
||||
|
||||
def select(self, instances):
|
||||
if "nuke" in pyblish.api.registered_hosts():
|
||||
import avalon.nuke.lib
|
||||
import nuke
|
||||
avalon.nuke.lib.select_nodes(
|
||||
[nuke.toNode(str(x)) for x in instances]
|
||||
)
|
||||
|
||||
if "maya" in pyblish.api.registered_hosts():
|
||||
from maya import cmds
|
||||
cmds.select(instances, replace=True, noExpand=True)
|
||||
cmds.select(instances, replace=True, noExpand=True)
|
||||
|
||||
def deselect(self):
|
||||
if "nuke" in pyblish.api.registered_hosts():
|
||||
import avalon.nuke.lib
|
||||
avalon.nuke.lib.reset_selection()
|
||||
|
||||
if "maya" in pyblish.api.registered_hosts():
|
||||
from maya import cmds
|
||||
cmds.select(deselect=True)
|
||||
cmds.select(deselect=True)
|
||||
|
||||
|
||||
class RepairSelectInvalidInstances(pyblish.api.Action):
|
||||
|
|
@ -92,23 +78,14 @@ class RepairSelectInvalidInstances(pyblish.api.Action):
|
|||
|
||||
context_asset = context.data["assetEntity"]["name"]
|
||||
for instance in instances:
|
||||
if "nuke" in pyblish.api.registered_hosts():
|
||||
import openpype.hosts.nuke.api as nuke_api
|
||||
origin_node = instance[0]
|
||||
nuke_api.lib.recreate_instance(
|
||||
origin_node, avalon_data={"asset": context_asset}
|
||||
)
|
||||
else:
|
||||
self.set_attribute(instance, context_asset)
|
||||
self.set_attribute(instance, context_asset)
|
||||
|
||||
def set_attribute(self, instance, context_asset):
|
||||
if "maya" in pyblish.api.registered_hosts():
|
||||
from maya import cmds
|
||||
cmds.setAttr(
|
||||
instance.data.get("name") + ".asset",
|
||||
context_asset,
|
||||
type="string"
|
||||
)
|
||||
cmds.setAttr(
|
||||
instance.data.get("name") + ".asset",
|
||||
context_asset,
|
||||
type="string"
|
||||
)
|
||||
|
||||
|
||||
class ValidateInstanceInContext(pyblish.api.InstancePlugin):
|
||||
|
|
@ -124,7 +101,7 @@ class ValidateInstanceInContext(pyblish.api.InstancePlugin):
|
|||
order = openpype.api.ValidateContentsOrder
|
||||
label = "Instance in same Context"
|
||||
optional = True
|
||||
hosts = ["maya", "nuke"]
|
||||
hosts = ["maya"]
|
||||
actions = [SelectInvalidInstances, RepairSelectInvalidInstances]
|
||||
|
||||
def process(self, instance):
|
||||
|
|
@ -0,0 +1,47 @@
|
|||
import pyblish.api
|
||||
import maya.cmds as cmds
|
||||
import openpype.api
|
||||
import os
|
||||
|
||||
|
||||
class ValidateLoadedPlugin(pyblish.api.ContextPlugin):
|
||||
"""Ensure there are no unauthorized loaded plugins"""
|
||||
|
||||
label = "Loaded Plugin"
|
||||
order = pyblish.api.ValidatorOrder
|
||||
host = ["maya"]
|
||||
actions = [openpype.api.RepairContextAction]
|
||||
|
||||
@classmethod
|
||||
def get_invalid(cls, context):
|
||||
|
||||
invalid = []
|
||||
loaded_plugin = cmds.pluginInfo(query=True, listPlugins=True)
|
||||
# get variable from OpenPype settings
|
||||
whitelist_native_plugins = cls.whitelist_native_plugins
|
||||
authorized_plugins = cls.authorized_plugins or []
|
||||
|
||||
for plugin in loaded_plugin:
|
||||
if not whitelist_native_plugins and os.getenv('MAYA_LOCATION') \
|
||||
in cmds.pluginInfo(plugin, query=True, path=True):
|
||||
continue
|
||||
if plugin not in authorized_plugins:
|
||||
invalid.append(plugin)
|
||||
|
||||
return invalid
|
||||
|
||||
def process(self, context):
|
||||
|
||||
invalid = self.get_invalid(context)
|
||||
if invalid:
|
||||
raise RuntimeError(
|
||||
"Found forbidden plugin name: {}".format(", ".join(invalid))
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def repair(cls, context):
|
||||
"""Unload forbidden plugins"""
|
||||
|
||||
for plugin in cls.get_invalid(context):
|
||||
cmds.pluginInfo(plugin, edit=True, autoload=False)
|
||||
cmds.unloadPlugin(plugin, force=True)
|
||||
|
|
@ -76,7 +76,7 @@ class ValidateRenderSettings(pyblish.api.InstancePlugin):
|
|||
r'%a|<aov>|<renderpass>', re.IGNORECASE)
|
||||
R_LAYER_TOKEN = re.compile(
|
||||
r'%l|<layer>|<renderlayer>', re.IGNORECASE)
|
||||
R_CAMERA_TOKEN = re.compile(r'%c|<camera>', re.IGNORECASE)
|
||||
R_CAMERA_TOKEN = re.compile(r'%c|Camera>')
|
||||
R_SCENE_TOKEN = re.compile(r'%s|<scene>', re.IGNORECASE)
|
||||
|
||||
DEFAULT_PADDING = 4
|
||||
|
|
@ -126,7 +126,9 @@ class ValidateRenderSettings(pyblish.api.InstancePlugin):
|
|||
if len(cameras) > 1 and not re.search(cls.R_CAMERA_TOKEN, prefix):
|
||||
invalid = True
|
||||
cls.log.error("Wrong image prefix [ {} ] - "
|
||||
"doesn't have: '<camera>' token".format(prefix))
|
||||
"doesn't have: '<Camera>' token".format(prefix))
|
||||
cls.log.error(
|
||||
"Note that to needs to have capital 'C' at the beginning")
|
||||
|
||||
# renderer specific checks
|
||||
if renderer == "vray":
|
||||
|
|
|
|||
|
|
@ -0,0 +1,25 @@
|
|||
|
||||
import pyblish.api
|
||||
import openpype.api
|
||||
|
||||
|
||||
class ValidateSetdressRoot(pyblish.api.InstancePlugin):
|
||||
"""
|
||||
"""
|
||||
|
||||
order = openpype.api.ValidateContentsOrder
|
||||
label = "SetDress Root"
|
||||
hosts = ["maya"]
|
||||
families = ["setdress"]
|
||||
|
||||
def process(self, instance):
|
||||
from maya import cmds
|
||||
|
||||
if instance.data.get("exactSetMembersOnly"):
|
||||
return
|
||||
|
||||
set_member = instance.data["setMembers"]
|
||||
root = cmds.ls(set_member, assemblies=True, long=True)
|
||||
|
||||
if not root or root[0] not in set_member:
|
||||
raise Exception("Setdress top root node is not being published.")
|
||||
|
|
@ -21,6 +21,7 @@ def add_implementation_envs(env, _app):
|
|||
new_nuke_paths.append(norm_path)
|
||||
|
||||
env["NUKE_PATH"] = os.pathsep.join(new_nuke_paths)
|
||||
env.pop("QT_AUTO_SCREEN_SCALE_FACTOR", None)
|
||||
|
||||
# Try to add QuickTime to PATH
|
||||
quick_time_path = "C:/Program Files (x86)/QuickTime/QTSystem"
|
||||
|
|
|
|||
|
|
@ -7,7 +7,6 @@ from collections import OrderedDict
|
|||
|
||||
|
||||
from avalon import api, io, lib
|
||||
from avalon.tools import workfiles
|
||||
import avalon.nuke
|
||||
from avalon.nuke import lib as anlib
|
||||
from avalon.nuke import (
|
||||
|
|
@ -24,7 +23,7 @@ from openpype.api import (
|
|||
get_current_project_settings,
|
||||
ApplicationManager
|
||||
)
|
||||
|
||||
from openpype.tools.utils import host_tools
|
||||
import nuke
|
||||
|
||||
from .utils import set_context_favorites
|
||||
|
|
@ -287,15 +286,16 @@ def script_name():
|
|||
|
||||
def add_button_write_to_read(node):
|
||||
name = "createReadNode"
|
||||
label = "Cread Read From Rendered"
|
||||
value = "import write_to_read;write_to_read.write_to_read(nuke.thisNode())"
|
||||
label = "Create Read From Rendered"
|
||||
value = "import write_to_read;\
|
||||
write_to_read.write_to_read(nuke.thisNode(), allow_relative=False)"
|
||||
knob = nuke.PyScript_Knob(name, label, value)
|
||||
knob.clearFlag(nuke.STARTLINE)
|
||||
node.addKnob(knob)
|
||||
|
||||
|
||||
def create_write_node(name, data, input=None, prenodes=None,
|
||||
review=True, linked_knobs=None):
|
||||
review=True, linked_knobs=None, farm=True):
|
||||
''' Creating write node which is group node
|
||||
|
||||
Arguments:
|
||||
|
|
@ -421,7 +421,15 @@ def create_write_node(name, data, input=None, prenodes=None,
|
|||
))
|
||||
continue
|
||||
|
||||
if knob and value:
|
||||
if not knob and not value:
|
||||
continue
|
||||
|
||||
log.info((knob, value))
|
||||
|
||||
if isinstance(value, str):
|
||||
if "[" in value:
|
||||
now_node[knob].setExpression(value)
|
||||
else:
|
||||
now_node[knob].setValue(value)
|
||||
|
||||
# connect to previous node
|
||||
|
|
@ -466,7 +474,7 @@ def create_write_node(name, data, input=None, prenodes=None,
|
|||
# imprinting group node
|
||||
anlib.set_avalon_knob_data(GN, data["avalon"])
|
||||
anlib.add_publish_knob(GN)
|
||||
add_rendering_knobs(GN)
|
||||
add_rendering_knobs(GN, farm)
|
||||
|
||||
if review:
|
||||
add_review_knob(GN)
|
||||
|
|
@ -526,7 +534,7 @@ def create_write_node(name, data, input=None, prenodes=None,
|
|||
return GN
|
||||
|
||||
|
||||
def add_rendering_knobs(node):
|
||||
def add_rendering_knobs(node, farm=True):
|
||||
''' Adds additional rendering knobs to given node
|
||||
|
||||
Arguments:
|
||||
|
|
@ -535,9 +543,13 @@ def add_rendering_knobs(node):
|
|||
Return:
|
||||
node (obj): with added knobs
|
||||
'''
|
||||
knob_options = [
|
||||
"Use existing frames", "Local"]
|
||||
if farm:
|
||||
knob_options.append("On farm")
|
||||
|
||||
if "render" not in node.knobs():
|
||||
knob = nuke.Enumeration_Knob("render", "", [
|
||||
"Use existing frames", "Local", "On farm"])
|
||||
knob = nuke.Enumeration_Knob("render", "", knob_options)
|
||||
knob.clearFlag(nuke.STARTLINE)
|
||||
node.addKnob(knob)
|
||||
return node
|
||||
|
|
@ -1019,27 +1031,6 @@ class WorkfileSettings(object):
|
|||
log.error(msg)
|
||||
nuke.message(msg)
|
||||
|
||||
bbox = self._asset_entity.get('data', {}).get('crop')
|
||||
|
||||
if bbox:
|
||||
try:
|
||||
x, y, r, t = bbox.split(".")
|
||||
data.update(
|
||||
{
|
||||
"x": int(x),
|
||||
"y": int(y),
|
||||
"r": int(r),
|
||||
"t": int(t),
|
||||
}
|
||||
)
|
||||
except Exception as e:
|
||||
bbox = None
|
||||
msg = ("{}:{} \nFormat:Crop need to be set with dots, "
|
||||
"example: 0.0.1920.1080, "
|
||||
"/nSetting to default").format(__name__, e)
|
||||
log.error(msg)
|
||||
nuke.message(msg)
|
||||
|
||||
existing_format = None
|
||||
for format in nuke.formats():
|
||||
if data["name"] == format.name():
|
||||
|
|
@ -1051,12 +1042,6 @@ class WorkfileSettings(object):
|
|||
existing_format.setWidth(data["width"])
|
||||
existing_format.setHeight(data["height"])
|
||||
existing_format.setPixelAspect(data["pixel_aspect"])
|
||||
|
||||
if bbox:
|
||||
existing_format.setX(data["x"])
|
||||
existing_format.setY(data["y"])
|
||||
existing_format.setR(data["r"])
|
||||
existing_format.setT(data["t"])
|
||||
else:
|
||||
format_string = self.make_format_string(**data)
|
||||
log.info("Creating new format: {}".format(format_string))
|
||||
|
|
@ -1676,7 +1661,7 @@ def launch_workfiles_app():
|
|||
|
||||
if not opnl.workfiles_launched:
|
||||
opnl.workfiles_launched = True
|
||||
workfiles.show(os.environ["AVALON_WORKDIR"])
|
||||
host_tools.show_workfiles()
|
||||
|
||||
|
||||
def process_workfile_builder():
|
||||
|
|
|
|||
|
|
@ -4,7 +4,7 @@ from avalon.api import Session
|
|||
|
||||
from .lib import WorkfileSettings
|
||||
from openpype.api import Logger, BuildWorkfile, get_current_project_settings
|
||||
from openpype.tools import workfiles
|
||||
from openpype.tools.utils import host_tools
|
||||
|
||||
log = Logger().get_logger(__name__)
|
||||
|
||||
|
|
@ -25,7 +25,7 @@ def install():
|
|||
menu.removeItem(rm_item[1].name())
|
||||
menu.addCommand(
|
||||
name,
|
||||
workfiles.show,
|
||||
host_tools.show_workfiles,
|
||||
index=2
|
||||
)
|
||||
menu.addSeparator(index=3)
|
||||
|
|
|
|||
|
|
@ -1,4 +1,10 @@
|
|||
import random
|
||||
import string
|
||||
|
||||
import avalon.nuke
|
||||
from avalon.nuke import lib as anlib
|
||||
from avalon import api
|
||||
|
||||
from openpype.api import (
|
||||
get_current_project_settings,
|
||||
PypeCreatorMixin
|
||||
|
|
@ -23,3 +29,68 @@ class PypeCreator(PypeCreatorMixin, avalon.nuke.pipeline.Creator):
|
|||
self.log.error(msg + '\n\nPlease use other subset name!')
|
||||
raise NameError("`{0}: {1}".format(__name__, msg))
|
||||
return
|
||||
|
||||
|
||||
def get_review_presets_config():
|
||||
settings = get_current_project_settings()
|
||||
review_profiles = (
|
||||
settings["global"]
|
||||
["publish"]
|
||||
["ExtractReview"]
|
||||
["profiles"]
|
||||
)
|
||||
|
||||
outputs = {}
|
||||
for profile in review_profiles:
|
||||
outputs.update(profile.get("outputs", {}))
|
||||
|
||||
return [str(name) for name, _prop in outputs.items()]
|
||||
|
||||
|
||||
class NukeLoader(api.Loader):
|
||||
container_id_knob = "containerId"
|
||||
container_id = ''.join(random.choice(
|
||||
string.ascii_uppercase + string.digits) for _ in range(10))
|
||||
|
||||
def get_container_id(self, node):
|
||||
id_knob = node.knobs().get(self.container_id_knob)
|
||||
return id_knob.value() if id_knob else None
|
||||
|
||||
def get_members(self, source):
|
||||
"""Return nodes that has same 'containerId' as `source`"""
|
||||
source_id = self.get_container_id(source)
|
||||
return [node for node in nuke.allNodes(recurseGroups=True)
|
||||
if self.get_container_id(node) == source_id
|
||||
and node is not source] if source_id else []
|
||||
|
||||
def set_as_member(self, node):
|
||||
source_id = self.get_container_id(node)
|
||||
|
||||
if source_id:
|
||||
node[self.container_id_knob].setValue(self.container_id)
|
||||
else:
|
||||
HIDEN_FLAG = 0x00040000
|
||||
_knob = anlib.Knobby(
|
||||
"String_Knob",
|
||||
self.container_id,
|
||||
flags=[nuke.READ_ONLY, HIDEN_FLAG])
|
||||
knob = _knob.create(self.container_id_knob)
|
||||
node.addKnob(knob)
|
||||
|
||||
def clear_members(self, parent_node):
|
||||
members = self.get_members(parent_node)
|
||||
|
||||
dependent_nodes = None
|
||||
for node in members:
|
||||
_depndc = [n for n in node.dependent() if n not in members]
|
||||
if not _depndc:
|
||||
continue
|
||||
|
||||
dependent_nodes = _depndc
|
||||
break
|
||||
|
||||
for member in members:
|
||||
self.log.info("removing node: `{}".format(member.name()))
|
||||
nuke.delete(member)
|
||||
|
||||
return dependent_nodes
|
||||
|
|
|
|||
|
|
@ -99,7 +99,7 @@ class CreateWriteRender(plugin.PypeCreator):
|
|||
"fpath_template": ("{work}/renders/nuke/{subset}"
|
||||
"/{subset}.{frame}.{ext}")})
|
||||
|
||||
# add crop node to cut off all outside of format bounding box
|
||||
# add reformat node to cut off all outside of format bounding box
|
||||
# get width and height
|
||||
try:
|
||||
width, height = (selected_node.width(), selected_node.height())
|
||||
|
|
@ -109,15 +109,11 @@ class CreateWriteRender(plugin.PypeCreator):
|
|||
|
||||
_prenodes = [
|
||||
{
|
||||
"name": "Crop01",
|
||||
"class": "Crop",
|
||||
"name": "Reformat01",
|
||||
"class": "Reformat",
|
||||
"knobs": [
|
||||
("box", [
|
||||
0.0,
|
||||
0.0,
|
||||
width,
|
||||
height
|
||||
])
|
||||
("resize", 0),
|
||||
("black_outside", 1),
|
||||
],
|
||||
"dependent": None
|
||||
}
|
||||
|
|
|
|||
141
openpype/hosts/nuke/plugins/create/create_write_still.py
Normal file
141
openpype/hosts/nuke/plugins/create/create_write_still.py
Normal file
|
|
@ -0,0 +1,141 @@
|
|||
from collections import OrderedDict
|
||||
from openpype.hosts.nuke.api import (
|
||||
plugin,
|
||||
lib)
|
||||
import nuke
|
||||
|
||||
|
||||
class CreateWriteStill(plugin.PypeCreator):
|
||||
# change this to template preset
|
||||
name = "WriteStillFrame"
|
||||
label = "Create Write Still Image"
|
||||
hosts = ["nuke"]
|
||||
n_class = "Write"
|
||||
family = "still"
|
||||
icon = "image"
|
||||
defaults = [
|
||||
"ImageFrame{:0>4}".format(nuke.frame()),
|
||||
"MPFrame{:0>4}".format(nuke.frame()),
|
||||
"LayoutFrame{:0>4}".format(nuke.frame())
|
||||
]
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
super(CreateWriteStill, self).__init__(*args, **kwargs)
|
||||
|
||||
data = OrderedDict()
|
||||
|
||||
data["family"] = self.family
|
||||
data["families"] = self.n_class
|
||||
|
||||
for k, v in self.data.items():
|
||||
if k not in data.keys():
|
||||
data.update({k: v})
|
||||
|
||||
self.data = data
|
||||
self.nodes = nuke.selectedNodes()
|
||||
self.log.debug("_ self.data: '{}'".format(self.data))
|
||||
|
||||
def process(self):
|
||||
|
||||
inputs = []
|
||||
outputs = []
|
||||
instance = nuke.toNode(self.data["subset"])
|
||||
selected_node = None
|
||||
|
||||
# use selection
|
||||
if (self.options or {}).get("useSelection"):
|
||||
nodes = self.nodes
|
||||
|
||||
if not (len(nodes) < 2):
|
||||
msg = ("Select only one node. "
|
||||
"The node you want to connect to, "
|
||||
"or tick off `Use selection`")
|
||||
self.log.error(msg)
|
||||
nuke.message(msg)
|
||||
return
|
||||
|
||||
if len(nodes) == 0:
|
||||
msg = (
|
||||
"No nodes selected. Please select a single node to connect"
|
||||
" to or tick off `Use selection`"
|
||||
)
|
||||
self.log.error(msg)
|
||||
nuke.message(msg)
|
||||
return
|
||||
|
||||
selected_node = nodes[0]
|
||||
inputs = [selected_node]
|
||||
outputs = selected_node.dependent()
|
||||
|
||||
if instance:
|
||||
if (instance.name() in selected_node.name()):
|
||||
selected_node = instance.dependencies()[0]
|
||||
|
||||
# if node already exist
|
||||
if instance:
|
||||
# collect input / outputs
|
||||
inputs = instance.dependencies()
|
||||
outputs = instance.dependent()
|
||||
selected_node = inputs[0]
|
||||
# remove old one
|
||||
nuke.delete(instance)
|
||||
|
||||
# recreate new
|
||||
write_data = {
|
||||
"nodeclass": self.n_class,
|
||||
"families": [self.family],
|
||||
"avalon": self.data
|
||||
}
|
||||
|
||||
# add creator data
|
||||
creator_data = {"creator": self.__class__.__name__}
|
||||
self.data.update(creator_data)
|
||||
write_data.update(creator_data)
|
||||
|
||||
self.log.info("Adding template path from plugin")
|
||||
write_data.update({
|
||||
"fpath_template": (
|
||||
"{work}/renders/nuke/{subset}/{subset}.{ext}")})
|
||||
|
||||
_prenodes = [
|
||||
{
|
||||
"name": "FrameHold01",
|
||||
"class": "FrameHold",
|
||||
"knobs": [
|
||||
("first_frame", nuke.frame())
|
||||
],
|
||||
"dependent": None
|
||||
}
|
||||
]
|
||||
|
||||
write_node = lib.create_write_node(
|
||||
self.name,
|
||||
write_data,
|
||||
input=selected_node,
|
||||
review=False,
|
||||
prenodes=_prenodes,
|
||||
farm=False,
|
||||
linked_knobs=["channels", "___", "first", "last", "use_limit"])
|
||||
|
||||
# relinking to collected connections
|
||||
for i, input in enumerate(inputs):
|
||||
write_node.setInput(i, input)
|
||||
|
||||
write_node.autoplace()
|
||||
|
||||
for output in outputs:
|
||||
output.setInput(0, write_node)
|
||||
|
||||
# link frame hold to group node
|
||||
write_node.begin()
|
||||
for n in nuke.allNodes():
|
||||
# get write node
|
||||
if n.Class() in "Write":
|
||||
w_node = n
|
||||
write_node.end()
|
||||
|
||||
w_node["use_limit"].setValue(True)
|
||||
w_node["first"].setValue(nuke.frame())
|
||||
w_node["last"].setValue(nuke.frame())
|
||||
|
||||
return write_node
|
||||
37
openpype/hosts/nuke/plugins/inventory/repair_old_loaders.py
Normal file
37
openpype/hosts/nuke/plugins/inventory/repair_old_loaders.py
Normal file
|
|
@ -0,0 +1,37 @@
|
|||
from avalon import api, style
|
||||
from avalon.nuke import lib as anlib
|
||||
from openpype.api import (
|
||||
Logger)
|
||||
|
||||
|
||||
class RepairOldLoaders(api.InventoryAction):
|
||||
|
||||
label = "Repair Old Loaders"
|
||||
icon = "gears"
|
||||
color = style.colors.alert
|
||||
|
||||
log = Logger().get_logger(__name__)
|
||||
|
||||
def process(self, containers):
|
||||
import nuke
|
||||
new_loader = "LoadClip"
|
||||
|
||||
for cdata in containers:
|
||||
orig_loader = cdata["loader"]
|
||||
orig_name = cdata["objectName"]
|
||||
if orig_loader not in ["LoadSequence", "LoadMov"]:
|
||||
self.log.warning(
|
||||
"This repair action is only working on "
|
||||
"`LoadSequence` and `LoadMov` Loaders")
|
||||
continue
|
||||
|
||||
new_name = orig_name.replace(orig_loader, new_loader)
|
||||
node = nuke.toNode(cdata["objectName"])
|
||||
|
||||
cdata.update({
|
||||
"loader": new_loader,
|
||||
"objectName": new_name
|
||||
})
|
||||
node["name"].setValue(new_name)
|
||||
# get data from avalon knob
|
||||
anlib.set_avalon_knob_data(node, cdata)
|
||||
|
|
@ -8,10 +8,10 @@ class SelectContainers(api.InventoryAction):
|
|||
color = "#d8d8d8"
|
||||
|
||||
def process(self, containers):
|
||||
|
||||
import nuke
|
||||
import avalon.nuke
|
||||
|
||||
nodes = [i["_node"] for i in containers]
|
||||
nodes = [nuke.toNode(i["objectName"]) for i in containers]
|
||||
|
||||
with avalon.nuke.viewer_update_and_undo_stop():
|
||||
# clear previous_selection
|
||||
|
|
|
|||
|
|
@ -1,68 +0,0 @@
|
|||
# from avalon import api, style
|
||||
# from avalon.vendor.Qt import QtGui, QtWidgets
|
||||
#
|
||||
# import avalon.fusion
|
||||
#
|
||||
#
|
||||
# class FusionSetToolColor(api.InventoryAction):
|
||||
# """Update the color of the selected tools"""
|
||||
#
|
||||
# label = "Set Tool Color"
|
||||
# icon = "plus"
|
||||
# color = "#d8d8d8"
|
||||
# _fallback_color = QtGui.QColor(1.0, 1.0, 1.0)
|
||||
#
|
||||
# def process(self, containers):
|
||||
# """Color all selected tools the selected colors"""
|
||||
#
|
||||
# result = []
|
||||
# comp = avalon.fusion.get_current_comp()
|
||||
#
|
||||
# # Get tool color
|
||||
# first = containers[0]
|
||||
# tool = first["_node"]
|
||||
# color = tool.TileColor
|
||||
#
|
||||
# if color is not None:
|
||||
# qcolor = QtGui.QColor().fromRgbF(color["R"], color["G"], color["B"])
|
||||
# else:
|
||||
# qcolor = self._fallback_color
|
||||
#
|
||||
# # Launch pick color
|
||||
# picked_color = self.get_color_picker(qcolor)
|
||||
# if not picked_color:
|
||||
# return
|
||||
#
|
||||
# with avalon.fusion.comp_lock_and_undo_chunk(comp):
|
||||
# for container in containers:
|
||||
# # Convert color to RGB 0-1 floats
|
||||
# rgb_f = picked_color.getRgbF()
|
||||
# rgb_f_table = {"R": rgb_f[0], "G": rgb_f[1], "B": rgb_f[2]}
|
||||
#
|
||||
# # Update tool
|
||||
# tool = container["_node"]
|
||||
# tool.TileColor = rgb_f_table
|
||||
#
|
||||
# result.append(container)
|
||||
#
|
||||
# return result
|
||||
#
|
||||
# def get_color_picker(self, color):
|
||||
# """Launch color picker and return chosen color
|
||||
#
|
||||
# Args:
|
||||
# color(QtGui.QColor): Start color to display
|
||||
#
|
||||
# Returns:
|
||||
# QtGui.QColor
|
||||
#
|
||||
# """
|
||||
#
|
||||
# color_dialog = QtWidgets.QColorDialog(color)
|
||||
# color_dialog.setStyleSheet(style.load_stylesheet())
|
||||
#
|
||||
# accepted = color_dialog.exec_()
|
||||
# if not accepted:
|
||||
# return
|
||||
#
|
||||
# return color_dialog.selectedColor()
|
||||
371
openpype/hosts/nuke/plugins/load/load_clip.py
Normal file
371
openpype/hosts/nuke/plugins/load/load_clip.py
Normal file
|
|
@ -0,0 +1,371 @@
|
|||
import nuke
|
||||
from avalon.vendor import qargparse
|
||||
from avalon import api, io
|
||||
|
||||
from openpype.hosts.nuke.api.lib import (
|
||||
get_imageio_input_colorspace
|
||||
)
|
||||
from avalon.nuke import (
|
||||
containerise,
|
||||
update_container,
|
||||
viewer_update_and_undo_stop,
|
||||
maintained_selection
|
||||
)
|
||||
from openpype.hosts.nuke.api import plugin
|
||||
|
||||
|
||||
class LoadClip(plugin.NukeLoader):
|
||||
"""Load clip into Nuke
|
||||
|
||||
Either it is image sequence or video file.
|
||||
"""
|
||||
|
||||
families = [
|
||||
"source",
|
||||
"plate",
|
||||
"render",
|
||||
"prerender",
|
||||
"review"
|
||||
]
|
||||
representations = [
|
||||
"exr",
|
||||
"dpx",
|
||||
"mov",
|
||||
"review",
|
||||
"mp4"
|
||||
]
|
||||
|
||||
label = "Load Clip"
|
||||
order = -20
|
||||
icon = "file-video-o"
|
||||
color = "white"
|
||||
|
||||
script_start = int(nuke.root()["first_frame"].value())
|
||||
|
||||
# option gui
|
||||
defaults = {
|
||||
"start_at_workfile": True
|
||||
}
|
||||
|
||||
options = [
|
||||
qargparse.Boolean(
|
||||
"start_at_workfile",
|
||||
help="Load at workfile start frame",
|
||||
default=True
|
||||
)
|
||||
]
|
||||
|
||||
node_name_template = "{class_name}_{ext}"
|
||||
|
||||
@classmethod
|
||||
def get_representations(cls):
|
||||
return (
|
||||
cls.representations
|
||||
+ cls._representations
|
||||
+ plugin.get_review_presets_config()
|
||||
)
|
||||
|
||||
def load(self, context, name, namespace, options):
|
||||
|
||||
is_sequence = len(context["representation"]["files"]) > 1
|
||||
|
||||
file = self.fname.replace("\\", "/")
|
||||
|
||||
start_at_workfile = options.get(
|
||||
"start_at_workfile", self.defaults["start_at_workfile"])
|
||||
|
||||
version = context['version']
|
||||
version_data = version.get("data", {})
|
||||
repr_id = context["representation"]["_id"]
|
||||
colorspace = version_data.get("colorspace")
|
||||
iio_colorspace = get_imageio_input_colorspace(file)
|
||||
repr_cont = context["representation"]["context"]
|
||||
|
||||
self.log.info("version_data: {}\n".format(version_data))
|
||||
self.log.debug(
|
||||
"Representation id `{}` ".format(repr_id))
|
||||
|
||||
self.handle_start = version_data.get("handleStart", 0)
|
||||
self.handle_end = version_data.get("handleEnd", 0)
|
||||
|
||||
first = version_data.get("frameStart", None)
|
||||
last = version_data.get("frameEnd", None)
|
||||
first -= self.handle_start
|
||||
last += self.handle_end
|
||||
|
||||
if not is_sequence:
|
||||
duration = last - first + 1
|
||||
first = 1
|
||||
last = first + duration
|
||||
elif "#" not in file:
|
||||
frame = repr_cont.get("frame")
|
||||
assert frame, "Representation is not sequence"
|
||||
|
||||
padding = len(frame)
|
||||
file = file.replace(frame, "#" * padding)
|
||||
|
||||
# Fallback to asset name when namespace is None
|
||||
if namespace is None:
|
||||
namespace = context['asset']['name']
|
||||
|
||||
if not file:
|
||||
self.log.warning(
|
||||
"Representation id `{}` is failing to load".format(repr_id))
|
||||
return
|
||||
|
||||
name_data = {
|
||||
"asset": repr_cont["asset"],
|
||||
"subset": repr_cont["subset"],
|
||||
"representation": context["representation"]["name"],
|
||||
"ext": repr_cont["representation"],
|
||||
"id": context["representation"]["_id"],
|
||||
"class_name": self.__class__.__name__
|
||||
}
|
||||
|
||||
read_name = self.node_name_template.format(**name_data)
|
||||
|
||||
# Create the Loader with the filename path set
|
||||
read_node = nuke.createNode(
|
||||
"Read",
|
||||
"name {}".format(read_name))
|
||||
|
||||
# to avoid multiple undo steps for rest of process
|
||||
# we will switch off undo-ing
|
||||
with viewer_update_and_undo_stop():
|
||||
read_node["file"].setValue(file)
|
||||
|
||||
# Set colorspace defined in version data
|
||||
if colorspace:
|
||||
read_node["colorspace"].setValue(str(colorspace))
|
||||
elif iio_colorspace is not None:
|
||||
read_node["colorspace"].setValue(iio_colorspace)
|
||||
|
||||
self.set_range_to_node(read_node, first, last, start_at_workfile)
|
||||
|
||||
# add additional metadata from the version to imprint Avalon knob
|
||||
add_keys = ["frameStart", "frameEnd",
|
||||
"source", "colorspace", "author", "fps", "version",
|
||||
"handleStart", "handleEnd"]
|
||||
|
||||
data_imprint = {}
|
||||
for k in add_keys:
|
||||
if k == 'version':
|
||||
data_imprint.update({k: context["version"]['name']})
|
||||
else:
|
||||
data_imprint.update(
|
||||
{k: context["version"]['data'].get(k, str(None))})
|
||||
|
||||
data_imprint.update({"objectName": read_name})
|
||||
|
||||
read_node["tile_color"].setValue(int("0x4ecd25ff", 16))
|
||||
|
||||
container = containerise(
|
||||
read_node,
|
||||
name=name,
|
||||
namespace=namespace,
|
||||
context=context,
|
||||
loader=self.__class__.__name__,
|
||||
data=data_imprint)
|
||||
|
||||
if version_data.get("retime", None):
|
||||
self.make_retimes(read_node, version_data)
|
||||
|
||||
self.set_as_member(read_node)
|
||||
|
||||
return container
|
||||
|
||||
def switch(self, container, representation):
|
||||
self.update(container, representation)
|
||||
|
||||
def update(self, container, representation):
|
||||
"""Update the Loader's path
|
||||
|
||||
Nuke automatically tries to reset some variables when changing
|
||||
the loader's path to a new file. These automatic changes are to its
|
||||
inputs:
|
||||
|
||||
"""
|
||||
|
||||
is_sequence = len(representation["files"]) > 1
|
||||
|
||||
read_node = nuke.toNode(container['objectName'])
|
||||
file = api.get_representation_path(representation).replace("\\", "/")
|
||||
|
||||
start_at_workfile = bool("start at" in read_node['frame_mode'].value())
|
||||
|
||||
version = io.find_one({
|
||||
"type": "version",
|
||||
"_id": representation["parent"]
|
||||
})
|
||||
version_data = version.get("data", {})
|
||||
repr_id = representation["_id"]
|
||||
colorspace = version_data.get("colorspace")
|
||||
iio_colorspace = get_imageio_input_colorspace(file)
|
||||
repr_cont = representation["context"]
|
||||
|
||||
self.handle_start = version_data.get("handleStart", 0)
|
||||
self.handle_end = version_data.get("handleEnd", 0)
|
||||
|
||||
first = version_data.get("frameStart", None)
|
||||
last = version_data.get("frameEnd", None)
|
||||
first -= self.handle_start
|
||||
last += self.handle_end
|
||||
|
||||
if not is_sequence:
|
||||
duration = last - first + 1
|
||||
first = 1
|
||||
last = first + duration
|
||||
elif "#" not in file:
|
||||
frame = repr_cont.get("frame")
|
||||
assert frame, "Representation is not sequence"
|
||||
|
||||
padding = len(frame)
|
||||
file = file.replace(frame, "#" * padding)
|
||||
|
||||
if not file:
|
||||
self.log.warning(
|
||||
"Representation id `{}` is failing to load".format(repr_id))
|
||||
return
|
||||
|
||||
read_node["file"].setValue(file)
|
||||
|
||||
# to avoid multiple undo steps for rest of process
|
||||
# we will switch off undo-ing
|
||||
with viewer_update_and_undo_stop():
|
||||
|
||||
# Set colorspace defined in version data
|
||||
if colorspace:
|
||||
read_node["colorspace"].setValue(str(colorspace))
|
||||
elif iio_colorspace is not None:
|
||||
read_node["colorspace"].setValue(iio_colorspace)
|
||||
|
||||
self.set_range_to_node(read_node, first, last, start_at_workfile)
|
||||
|
||||
updated_dict = {
|
||||
"representation": str(representation["_id"]),
|
||||
"frameStart": str(first),
|
||||
"frameEnd": str(last),
|
||||
"version": str(version.get("name")),
|
||||
"colorspace": colorspace,
|
||||
"source": version_data.get("source"),
|
||||
"handleStart": str(self.handle_start),
|
||||
"handleEnd": str(self.handle_end),
|
||||
"fps": str(version_data.get("fps")),
|
||||
"author": version_data.get("author"),
|
||||
"outputDir": version_data.get("outputDir"),
|
||||
}
|
||||
|
||||
# change color of read_node
|
||||
# get all versions in list
|
||||
versions = io.find({
|
||||
"type": "version",
|
||||
"parent": version["parent"]
|
||||
}).distinct('name')
|
||||
|
||||
max_version = max(versions)
|
||||
|
||||
if version.get("name") not in [max_version]:
|
||||
read_node["tile_color"].setValue(int("0xd84f20ff", 16))
|
||||
else:
|
||||
read_node["tile_color"].setValue(int("0x4ecd25ff", 16))
|
||||
|
||||
# Update the imprinted representation
|
||||
update_container(
|
||||
read_node,
|
||||
updated_dict
|
||||
)
|
||||
self.log.info("udated to version: {}".format(version.get("name")))
|
||||
|
||||
if version_data.get("retime", None):
|
||||
self.make_retimes(read_node, version_data)
|
||||
else:
|
||||
self.clear_members(read_node)
|
||||
|
||||
self.set_as_member(read_node)
|
||||
|
||||
def set_range_to_node(self, read_node, first, last, start_at_workfile):
|
||||
read_node['origfirst'].setValue(int(first))
|
||||
read_node['first'].setValue(int(first))
|
||||
read_node['origlast'].setValue(int(last))
|
||||
read_node['last'].setValue(int(last))
|
||||
|
||||
# set start frame depending on workfile or version
|
||||
self.loader_shift(read_node, start_at_workfile)
|
||||
|
||||
def remove(self, container):
|
||||
|
||||
from avalon.nuke import viewer_update_and_undo_stop
|
||||
|
||||
read_node = nuke.toNode(container['objectName'])
|
||||
assert read_node.Class() == "Read", "Must be Read"
|
||||
|
||||
with viewer_update_and_undo_stop():
|
||||
members = self.get_members(read_node)
|
||||
nuke.delete(read_node)
|
||||
for member in members:
|
||||
nuke.delete(member)
|
||||
|
||||
def make_retimes(self, parent_node, version_data):
|
||||
''' Create all retime and timewarping nodes with coppied animation '''
|
||||
speed = version_data.get('speed', 1)
|
||||
time_warp_nodes = version_data.get('timewarps', [])
|
||||
last_node = None
|
||||
source_id = self.get_container_id(parent_node)
|
||||
self.log.info("__ source_id: {}".format(source_id))
|
||||
self.log.info("__ members: {}".format(self.get_members(parent_node)))
|
||||
dependent_nodes = self.clear_members(parent_node)
|
||||
|
||||
with maintained_selection():
|
||||
parent_node['selected'].setValue(True)
|
||||
|
||||
if speed != 1:
|
||||
rtn = nuke.createNode(
|
||||
"Retime",
|
||||
"speed {}".format(speed))
|
||||
|
||||
rtn["before"].setValue("continue")
|
||||
rtn["after"].setValue("continue")
|
||||
rtn["input.first_lock"].setValue(True)
|
||||
rtn["input.first"].setValue(
|
||||
self.script_start
|
||||
)
|
||||
self.set_as_member(rtn)
|
||||
last_node = rtn
|
||||
|
||||
if time_warp_nodes != []:
|
||||
start_anim = self.script_start + (self.handle_start / speed)
|
||||
for timewarp in time_warp_nodes:
|
||||
twn = nuke.createNode(
|
||||
timewarp["Class"],
|
||||
"name {}".format(timewarp["name"])
|
||||
)
|
||||
if isinstance(timewarp["lookup"], list):
|
||||
# if array for animation
|
||||
twn["lookup"].setAnimated()
|
||||
for i, value in enumerate(timewarp["lookup"]):
|
||||
twn["lookup"].setValueAt(
|
||||
(start_anim + i) + value,
|
||||
(start_anim + i))
|
||||
else:
|
||||
# if static value `int`
|
||||
twn["lookup"].setValue(timewarp["lookup"])
|
||||
|
||||
self.set_as_member(twn)
|
||||
last_node = twn
|
||||
|
||||
if dependent_nodes:
|
||||
# connect to original inputs
|
||||
for i, n in enumerate(dependent_nodes):
|
||||
last_node.setInput(i, n)
|
||||
|
||||
def loader_shift(self, read_node, workfile_start=False):
|
||||
""" Set start frame of read node to a workfile start
|
||||
|
||||
Args:
|
||||
read_node (nuke.Node): The nuke's read node
|
||||
workfile_start (bool): set workfile start frame if true
|
||||
|
||||
"""
|
||||
if workfile_start:
|
||||
read_node['frame_mode'].setValue("start at")
|
||||
read_node['frame'].setValue(str(self.script_start))
|
||||
|
|
@ -12,8 +12,16 @@ from openpype.hosts.nuke.api.lib import (
|
|||
class LoadImage(api.Loader):
|
||||
"""Load still image into Nuke"""
|
||||
|
||||
families = ["render", "source", "plate", "review", "image"]
|
||||
representations = ["exr", "dpx", "jpg", "jpeg", "png", "psd"]
|
||||
families = [
|
||||
"render2d",
|
||||
"source",
|
||||
"plate",
|
||||
"render",
|
||||
"prerender",
|
||||
"review",
|
||||
"image"
|
||||
]
|
||||
representations = ["exr", "dpx", "jpg", "jpeg", "png", "psd", "tiff"]
|
||||
|
||||
label = "Load Image"
|
||||
order = -10
|
||||
|
|
@ -33,6 +41,10 @@ class LoadImage(api.Loader):
|
|||
)
|
||||
]
|
||||
|
||||
@classmethod
|
||||
def get_representations(cls):
|
||||
return cls.representations + cls._representations
|
||||
|
||||
def load(self, context, name, namespace, options):
|
||||
from avalon.nuke import (
|
||||
containerise,
|
||||
|
|
|
|||
|
|
@ -1,347 +0,0 @@
|
|||
import nuke
|
||||
from avalon.vendor import qargparse
|
||||
from avalon import api, io
|
||||
from openpype.api import get_current_project_settings
|
||||
from openpype.hosts.nuke.api.lib import (
|
||||
get_imageio_input_colorspace
|
||||
)
|
||||
|
||||
|
||||
def add_review_presets_config():
|
||||
returning = {
|
||||
"families": list(),
|
||||
"representations": list()
|
||||
}
|
||||
settings = get_current_project_settings()
|
||||
review_profiles = (
|
||||
settings["global"]
|
||||
["publish"]
|
||||
["ExtractReview"]
|
||||
["profiles"]
|
||||
)
|
||||
|
||||
outputs = {}
|
||||
for profile in review_profiles:
|
||||
outputs.update(profile.get("outputs", {}))
|
||||
|
||||
for output, properities in outputs.items():
|
||||
returning["representations"].append(output)
|
||||
returning["families"] += properities.get("families", [])
|
||||
|
||||
return returning
|
||||
|
||||
|
||||
class LoadMov(api.Loader):
|
||||
"""Load mov file into Nuke"""
|
||||
families = ["render", "source", "plate", "review"]
|
||||
representations = ["mov", "review", "mp4"]
|
||||
|
||||
label = "Load mov"
|
||||
order = -10
|
||||
icon = "code-fork"
|
||||
color = "orange"
|
||||
|
||||
first_frame = nuke.root()["first_frame"].value()
|
||||
|
||||
# options gui
|
||||
defaults = {
|
||||
"start_at_workfile": True
|
||||
}
|
||||
|
||||
options = [
|
||||
qargparse.Boolean(
|
||||
"start_at_workfile",
|
||||
help="Load at workfile start frame",
|
||||
default=True
|
||||
)
|
||||
]
|
||||
|
||||
node_name_template = "{class_name}_{ext}"
|
||||
|
||||
def load(self, context, name, namespace, options):
|
||||
from avalon.nuke import (
|
||||
containerise,
|
||||
viewer_update_and_undo_stop
|
||||
)
|
||||
|
||||
start_at_workfile = options.get(
|
||||
"start_at_workfile", self.defaults["start_at_workfile"])
|
||||
|
||||
version = context['version']
|
||||
version_data = version.get("data", {})
|
||||
repr_id = context["representation"]["_id"]
|
||||
|
||||
self.handle_start = version_data.get("handleStart", 0)
|
||||
self.handle_end = version_data.get("handleEnd", 0)
|
||||
|
||||
orig_first = version_data.get("frameStart")
|
||||
orig_last = version_data.get("frameEnd")
|
||||
diff = orig_first - 1
|
||||
|
||||
first = orig_first - diff
|
||||
last = orig_last - diff
|
||||
|
||||
colorspace = version_data.get("colorspace")
|
||||
repr_cont = context["representation"]["context"]
|
||||
|
||||
self.log.debug(
|
||||
"Representation id `{}` ".format(repr_id))
|
||||
|
||||
context["representation"]["_id"]
|
||||
# create handles offset (only to last, because of mov)
|
||||
last += self.handle_start + self.handle_end
|
||||
|
||||
# Fallback to asset name when namespace is None
|
||||
if namespace is None:
|
||||
namespace = context['asset']['name']
|
||||
|
||||
file = self.fname
|
||||
|
||||
if not file:
|
||||
self.log.warning(
|
||||
"Representation id `{}` is failing to load".format(repr_id))
|
||||
return
|
||||
|
||||
file = file.replace("\\", "/")
|
||||
|
||||
name_data = {
|
||||
"asset": repr_cont["asset"],
|
||||
"subset": repr_cont["subset"],
|
||||
"representation": context["representation"]["name"],
|
||||
"ext": repr_cont["representation"],
|
||||
"id": context["representation"]["_id"],
|
||||
"class_name": self.__class__.__name__
|
||||
}
|
||||
|
||||
read_name = self.node_name_template.format(**name_data)
|
||||
|
||||
read_node = nuke.createNode(
|
||||
"Read",
|
||||
"name {}".format(read_name)
|
||||
)
|
||||
|
||||
# to avoid multiple undo steps for rest of process
|
||||
# we will switch off undo-ing
|
||||
with viewer_update_and_undo_stop():
|
||||
read_node["file"].setValue(file)
|
||||
|
||||
read_node["origfirst"].setValue(first)
|
||||
read_node["first"].setValue(first)
|
||||
read_node["origlast"].setValue(last)
|
||||
read_node["last"].setValue(last)
|
||||
read_node['frame_mode'].setValue("start at")
|
||||
|
||||
if start_at_workfile:
|
||||
# start at workfile start
|
||||
read_node['frame'].setValue(str(self.first_frame))
|
||||
else:
|
||||
# start at version frame start
|
||||
read_node['frame'].setValue(
|
||||
str(orig_first - self.handle_start))
|
||||
|
||||
if colorspace:
|
||||
read_node["colorspace"].setValue(str(colorspace))
|
||||
|
||||
preset_clrsp = get_imageio_input_colorspace(file)
|
||||
|
||||
if preset_clrsp is not None:
|
||||
read_node["colorspace"].setValue(preset_clrsp)
|
||||
|
||||
# add additional metadata from the version to imprint Avalon knob
|
||||
add_keys = [
|
||||
"frameStart", "frameEnd", "handles", "source", "author",
|
||||
"fps", "version", "handleStart", "handleEnd"
|
||||
]
|
||||
|
||||
data_imprint = {}
|
||||
for key in add_keys:
|
||||
if key == 'version':
|
||||
data_imprint.update({
|
||||
key: context["version"]['name']
|
||||
})
|
||||
else:
|
||||
data_imprint.update({
|
||||
key: context["version"]['data'].get(key, str(None))
|
||||
})
|
||||
|
||||
data_imprint.update({"objectName": read_name})
|
||||
|
||||
read_node["tile_color"].setValue(int("0x4ecd25ff", 16))
|
||||
|
||||
if version_data.get("retime", None):
|
||||
speed = version_data.get("speed", 1)
|
||||
time_warp_nodes = version_data.get("timewarps", [])
|
||||
self.make_retimes(speed, time_warp_nodes)
|
||||
|
||||
return containerise(
|
||||
read_node,
|
||||
name=name,
|
||||
namespace=namespace,
|
||||
context=context,
|
||||
loader=self.__class__.__name__,
|
||||
data=data_imprint
|
||||
)
|
||||
|
||||
def switch(self, container, representation):
|
||||
self.update(container, representation)
|
||||
|
||||
def update(self, container, representation):
|
||||
"""Update the Loader's path
|
||||
|
||||
Nuke automatically tries to reset some variables when changing
|
||||
the loader's path to a new file. These automatic changes are to its
|
||||
inputs:
|
||||
|
||||
"""
|
||||
|
||||
from avalon.nuke import (
|
||||
update_container
|
||||
)
|
||||
|
||||
read_node = nuke.toNode(container['objectName'])
|
||||
|
||||
assert read_node.Class() == "Read", "Must be Read"
|
||||
|
||||
file = self.fname
|
||||
|
||||
if not file:
|
||||
repr_id = representation["_id"]
|
||||
self.log.warning(
|
||||
"Representation id `{}` is failing to load".format(repr_id))
|
||||
return
|
||||
|
||||
file = file.replace("\\", "/")
|
||||
|
||||
# Get start frame from version data
|
||||
version = io.find_one({
|
||||
"type": "version",
|
||||
"_id": representation["parent"]
|
||||
})
|
||||
|
||||
# get all versions in list
|
||||
versions = io.find({
|
||||
"type": "version",
|
||||
"parent": version["parent"]
|
||||
}).distinct('name')
|
||||
|
||||
max_version = max(versions)
|
||||
|
||||
version_data = version.get("data", {})
|
||||
|
||||
orig_first = version_data.get("frameStart")
|
||||
orig_last = version_data.get("frameEnd")
|
||||
diff = orig_first - 1
|
||||
|
||||
# set first to 1
|
||||
first = orig_first - diff
|
||||
last = orig_last - diff
|
||||
self.handle_start = version_data.get("handleStart", 0)
|
||||
self.handle_end = version_data.get("handleEnd", 0)
|
||||
colorspace = version_data.get("colorspace")
|
||||
|
||||
if first is None:
|
||||
self.log.warning((
|
||||
"Missing start frame for updated version"
|
||||
"assuming starts at frame 0 for: "
|
||||
"{} ({})").format(
|
||||
read_node['name'].value(), representation))
|
||||
first = 0
|
||||
|
||||
# create handles offset (only to last, because of mov)
|
||||
last += self.handle_start + self.handle_end
|
||||
|
||||
read_node["file"].setValue(file)
|
||||
|
||||
# Set the global in to the start frame of the sequence
|
||||
read_node["origfirst"].setValue(first)
|
||||
read_node["first"].setValue(first)
|
||||
read_node["origlast"].setValue(last)
|
||||
read_node["last"].setValue(last)
|
||||
read_node['frame_mode'].setValue("start at")
|
||||
|
||||
if int(float(self.first_frame)) == int(
|
||||
float(read_node['frame'].value())):
|
||||
# start at workfile start
|
||||
read_node['frame'].setValue(str(self.first_frame))
|
||||
else:
|
||||
# start at version frame start
|
||||
read_node['frame'].setValue(str(orig_first - self.handle_start))
|
||||
|
||||
if colorspace:
|
||||
read_node["colorspace"].setValue(str(colorspace))
|
||||
|
||||
preset_clrsp = get_imageio_input_colorspace(file)
|
||||
|
||||
if preset_clrsp is not None:
|
||||
read_node["colorspace"].setValue(preset_clrsp)
|
||||
|
||||
updated_dict = {}
|
||||
updated_dict.update({
|
||||
"representation": str(representation["_id"]),
|
||||
"frameStart": str(first),
|
||||
"frameEnd": str(last),
|
||||
"version": str(version.get("name")),
|
||||
"colorspace": version_data.get("colorspace"),
|
||||
"source": version_data.get("source"),
|
||||
"handleStart": str(self.handle_start),
|
||||
"handleEnd": str(self.handle_end),
|
||||
"fps": str(version_data.get("fps")),
|
||||
"author": version_data.get("author"),
|
||||
"outputDir": version_data.get("outputDir")
|
||||
})
|
||||
|
||||
# change color of node
|
||||
if version.get("name") not in [max_version]:
|
||||
read_node["tile_color"].setValue(int("0xd84f20ff", 16))
|
||||
else:
|
||||
read_node["tile_color"].setValue(int("0x4ecd25ff", 16))
|
||||
|
||||
if version_data.get("retime", None):
|
||||
speed = version_data.get("speed", 1)
|
||||
time_warp_nodes = version_data.get("timewarps", [])
|
||||
self.make_retimes(speed, time_warp_nodes)
|
||||
|
||||
# Update the imprinted representation
|
||||
update_container(
|
||||
read_node, updated_dict
|
||||
)
|
||||
self.log.info("udated to version: {}".format(version.get("name")))
|
||||
|
||||
def remove(self, container):
|
||||
|
||||
from avalon.nuke import viewer_update_and_undo_stop
|
||||
|
||||
read_node = nuke.toNode(container['objectName'])
|
||||
assert read_node.Class() == "Read", "Must be Read"
|
||||
|
||||
with viewer_update_and_undo_stop():
|
||||
nuke.delete(read_node)
|
||||
|
||||
def make_retimes(self, speed, time_warp_nodes):
|
||||
''' Create all retime and timewarping nodes with coppied animation '''
|
||||
if speed != 1:
|
||||
rtn = nuke.createNode(
|
||||
"Retime",
|
||||
"speed {}".format(speed))
|
||||
rtn["before"].setValue("continue")
|
||||
rtn["after"].setValue("continue")
|
||||
rtn["input.first_lock"].setValue(True)
|
||||
rtn["input.first"].setValue(
|
||||
self.first_frame
|
||||
)
|
||||
|
||||
if time_warp_nodes != []:
|
||||
start_anim = self.first_frame + (self.handle_start / speed)
|
||||
for timewarp in time_warp_nodes:
|
||||
twn = nuke.createNode(timewarp["Class"],
|
||||
"name {}".format(timewarp["name"]))
|
||||
if isinstance(timewarp["lookup"], list):
|
||||
# if array for animation
|
||||
twn["lookup"].setAnimated()
|
||||
for i, value in enumerate(timewarp["lookup"]):
|
||||
twn["lookup"].setValueAt(
|
||||
(start_anim + i) + value,
|
||||
(start_anim + i))
|
||||
else:
|
||||
# if static value `int`
|
||||
twn["lookup"].setValue(timewarp["lookup"])
|
||||
|
|
@ -1,317 +0,0 @@
|
|||
import nuke
|
||||
from avalon.vendor import qargparse
|
||||
from avalon import api, io
|
||||
from openpype.hosts.nuke.api.lib import (
|
||||
get_imageio_input_colorspace
|
||||
)
|
||||
|
||||
|
||||
class LoadSequence(api.Loader):
|
||||
"""Load image sequence into Nuke"""
|
||||
|
||||
families = ["render", "source", "plate", "review"]
|
||||
representations = ["exr", "dpx"]
|
||||
|
||||
label = "Load Image Sequence"
|
||||
order = -20
|
||||
icon = "file-video-o"
|
||||
color = "white"
|
||||
|
||||
script_start = nuke.root()["first_frame"].value()
|
||||
|
||||
# option gui
|
||||
defaults = {
|
||||
"start_at_workfile": True
|
||||
}
|
||||
|
||||
options = [
|
||||
qargparse.Boolean(
|
||||
"start_at_workfile",
|
||||
help="Load at workfile start frame",
|
||||
default=True
|
||||
)
|
||||
]
|
||||
|
||||
node_name_template = "{class_name}_{ext}"
|
||||
|
||||
def load(self, context, name, namespace, options):
|
||||
from avalon.nuke import (
|
||||
containerise,
|
||||
viewer_update_and_undo_stop
|
||||
)
|
||||
|
||||
start_at_workfile = options.get(
|
||||
"start_at_workfile", self.defaults["start_at_workfile"])
|
||||
|
||||
version = context['version']
|
||||
version_data = version.get("data", {})
|
||||
repr_id = context["representation"]["_id"]
|
||||
|
||||
self.log.info("version_data: {}\n".format(version_data))
|
||||
self.log.debug(
|
||||
"Representation id `{}` ".format(repr_id))
|
||||
|
||||
self.first_frame = int(nuke.root()["first_frame"].getValue())
|
||||
self.handle_start = version_data.get("handleStart", 0)
|
||||
self.handle_end = version_data.get("handleEnd", 0)
|
||||
|
||||
first = version_data.get("frameStart", None)
|
||||
last = version_data.get("frameEnd", None)
|
||||
|
||||
# Fallback to asset name when namespace is None
|
||||
if namespace is None:
|
||||
namespace = context['asset']['name']
|
||||
|
||||
first -= self.handle_start
|
||||
last += self.handle_end
|
||||
|
||||
file = self.fname
|
||||
|
||||
if not file:
|
||||
repr_id = context["representation"]["_id"]
|
||||
self.log.warning(
|
||||
"Representation id `{}` is failing to load".format(repr_id))
|
||||
return
|
||||
|
||||
file = file.replace("\\", "/")
|
||||
|
||||
repr_cont = context["representation"]["context"]
|
||||
if "#" not in file:
|
||||
frame = repr_cont.get("frame")
|
||||
if frame:
|
||||
padding = len(frame)
|
||||
file = file.replace(frame, "#" * padding)
|
||||
|
||||
name_data = {
|
||||
"asset": repr_cont["asset"],
|
||||
"subset": repr_cont["subset"],
|
||||
"representation": context["representation"]["name"],
|
||||
"ext": repr_cont["representation"],
|
||||
"id": context["representation"]["_id"],
|
||||
"class_name": self.__class__.__name__
|
||||
}
|
||||
|
||||
read_name = self.node_name_template.format(**name_data)
|
||||
|
||||
# Create the Loader with the filename path set
|
||||
read_node = nuke.createNode(
|
||||
"Read",
|
||||
"name {}".format(read_name))
|
||||
|
||||
# to avoid multiple undo steps for rest of process
|
||||
# we will switch off undo-ing
|
||||
with viewer_update_and_undo_stop():
|
||||
read_node["file"].setValue(file)
|
||||
|
||||
# Set colorspace defined in version data
|
||||
colorspace = context["version"]["data"].get("colorspace")
|
||||
if colorspace:
|
||||
read_node["colorspace"].setValue(str(colorspace))
|
||||
|
||||
preset_clrsp = get_imageio_input_colorspace(file)
|
||||
|
||||
if preset_clrsp is not None:
|
||||
read_node["colorspace"].setValue(preset_clrsp)
|
||||
|
||||
# set start frame depending on workfile or version
|
||||
self.loader_shift(read_node, start_at_workfile)
|
||||
read_node["origfirst"].setValue(int(first))
|
||||
read_node["first"].setValue(int(first))
|
||||
read_node["origlast"].setValue(int(last))
|
||||
read_node["last"].setValue(int(last))
|
||||
|
||||
# add additional metadata from the version to imprint Avalon knob
|
||||
add_keys = ["frameStart", "frameEnd",
|
||||
"source", "colorspace", "author", "fps", "version",
|
||||
"handleStart", "handleEnd"]
|
||||
|
||||
data_imprint = {}
|
||||
for k in add_keys:
|
||||
if k == 'version':
|
||||
data_imprint.update({k: context["version"]['name']})
|
||||
else:
|
||||
data_imprint.update(
|
||||
{k: context["version"]['data'].get(k, str(None))})
|
||||
|
||||
data_imprint.update({"objectName": read_name})
|
||||
|
||||
read_node["tile_color"].setValue(int("0x4ecd25ff", 16))
|
||||
|
||||
if version_data.get("retime", None):
|
||||
speed = version_data.get("speed", 1)
|
||||
time_warp_nodes = version_data.get("timewarps", [])
|
||||
self.make_retimes(speed, time_warp_nodes)
|
||||
|
||||
return containerise(read_node,
|
||||
name=name,
|
||||
namespace=namespace,
|
||||
context=context,
|
||||
loader=self.__class__.__name__,
|
||||
data=data_imprint)
|
||||
|
||||
def switch(self, container, representation):
|
||||
self.update(container, representation)
|
||||
|
||||
def update(self, container, representation):
|
||||
"""Update the Loader's path
|
||||
|
||||
Nuke automatically tries to reset some variables when changing
|
||||
the loader's path to a new file. These automatic changes are to its
|
||||
inputs:
|
||||
|
||||
"""
|
||||
|
||||
from avalon.nuke import (
|
||||
update_container
|
||||
)
|
||||
|
||||
read_node = nuke.toNode(container['objectName'])
|
||||
|
||||
assert read_node.Class() == "Read", "Must be Read"
|
||||
|
||||
repr_cont = representation["context"]
|
||||
|
||||
file = api.get_representation_path(representation)
|
||||
|
||||
if not file:
|
||||
repr_id = representation["_id"]
|
||||
self.log.warning(
|
||||
"Representation id `{}` is failing to load".format(repr_id))
|
||||
return
|
||||
|
||||
file = file.replace("\\", "/")
|
||||
|
||||
if "#" not in file:
|
||||
frame = repr_cont.get("frame")
|
||||
if frame:
|
||||
padding = len(frame)
|
||||
file = file.replace(frame, "#" * padding)
|
||||
|
||||
# Get start frame from version data
|
||||
version = io.find_one({
|
||||
"type": "version",
|
||||
"_id": representation["parent"]
|
||||
})
|
||||
|
||||
# get all versions in list
|
||||
versions = io.find({
|
||||
"type": "version",
|
||||
"parent": version["parent"]
|
||||
}).distinct('name')
|
||||
|
||||
max_version = max(versions)
|
||||
|
||||
version_data = version.get("data", {})
|
||||
|
||||
self.first_frame = int(nuke.root()["first_frame"].getValue())
|
||||
self.handle_start = version_data.get("handleStart", 0)
|
||||
self.handle_end = version_data.get("handleEnd", 0)
|
||||
|
||||
first = version_data.get("frameStart")
|
||||
last = version_data.get("frameEnd")
|
||||
|
||||
if first is None:
|
||||
self.log.warning(
|
||||
"Missing start frame for updated version"
|
||||
"assuming starts at frame 0 for: "
|
||||
"{} ({})".format(read_node['name'].value(), representation))
|
||||
first = 0
|
||||
|
||||
first -= self.handle_start
|
||||
last += self.handle_end
|
||||
|
||||
read_node["file"].setValue(file)
|
||||
|
||||
# set start frame depending on workfile or version
|
||||
self.loader_shift(
|
||||
read_node,
|
||||
bool("start at" in read_node['frame_mode'].value()))
|
||||
|
||||
read_node["origfirst"].setValue(int(first))
|
||||
read_node["first"].setValue(int(first))
|
||||
read_node["origlast"].setValue(int(last))
|
||||
read_node["last"].setValue(int(last))
|
||||
|
||||
updated_dict = {}
|
||||
updated_dict.update({
|
||||
"representation": str(representation["_id"]),
|
||||
"frameStart": str(first),
|
||||
"frameEnd": str(last),
|
||||
"version": str(version.get("name")),
|
||||
"colorspace": version_data.get("colorspace"),
|
||||
"source": version_data.get("source"),
|
||||
"handleStart": str(self.handle_start),
|
||||
"handleEnd": str(self.handle_end),
|
||||
"fps": str(version_data.get("fps")),
|
||||
"author": version_data.get("author"),
|
||||
"outputDir": version_data.get("outputDir"),
|
||||
})
|
||||
|
||||
# change color of read_node
|
||||
if version.get("name") not in [max_version]:
|
||||
read_node["tile_color"].setValue(int("0xd84f20ff", 16))
|
||||
else:
|
||||
read_node["tile_color"].setValue(int("0x4ecd25ff", 16))
|
||||
|
||||
if version_data.get("retime", None):
|
||||
speed = version_data.get("speed", 1)
|
||||
time_warp_nodes = version_data.get("timewarps", [])
|
||||
self.make_retimes(speed, time_warp_nodes)
|
||||
|
||||
# Update the imprinted representation
|
||||
update_container(
|
||||
read_node,
|
||||
updated_dict
|
||||
)
|
||||
self.log.info("udated to version: {}".format(version.get("name")))
|
||||
|
||||
def remove(self, container):
|
||||
|
||||
from avalon.nuke import viewer_update_and_undo_stop
|
||||
|
||||
read_node = nuke.toNode(container['objectName'])
|
||||
assert read_node.Class() == "Read", "Must be Read"
|
||||
|
||||
with viewer_update_and_undo_stop():
|
||||
nuke.delete(read_node)
|
||||
|
||||
def make_retimes(self, speed, time_warp_nodes):
|
||||
''' Create all retime and timewarping nodes with coppied animation '''
|
||||
if speed != 1:
|
||||
rtn = nuke.createNode(
|
||||
"Retime",
|
||||
"speed {}".format(speed))
|
||||
rtn["before"].setValue("continue")
|
||||
rtn["after"].setValue("continue")
|
||||
rtn["input.first_lock"].setValue(True)
|
||||
rtn["input.first"].setValue(
|
||||
self.first_frame
|
||||
)
|
||||
|
||||
if time_warp_nodes != []:
|
||||
start_anim = self.first_frame + (self.handle_start / speed)
|
||||
for timewarp in time_warp_nodes:
|
||||
twn = nuke.createNode(timewarp["Class"],
|
||||
"name {}".format(timewarp["name"]))
|
||||
if isinstance(timewarp["lookup"], list):
|
||||
# if array for animation
|
||||
twn["lookup"].setAnimated()
|
||||
for i, value in enumerate(timewarp["lookup"]):
|
||||
twn["lookup"].setValueAt(
|
||||
(start_anim + i) + value,
|
||||
(start_anim + i))
|
||||
else:
|
||||
# if static value `int`
|
||||
twn["lookup"].setValue(timewarp["lookup"])
|
||||
|
||||
def loader_shift(self, read_node, workfile_start=False):
|
||||
""" Set start frame of read node to a workfile start
|
||||
|
||||
Args:
|
||||
read_node (nuke.Node): The nuke's read node
|
||||
workfile_start (bool): set workfile start frame if true
|
||||
|
||||
"""
|
||||
if workfile_start:
|
||||
read_node['frame_mode'].setValue("start at")
|
||||
read_node['frame'].setValue(str(self.script_start))
|
||||
|
|
@ -17,7 +17,7 @@ class NukeRenderLocal(openpype.api.Extractor):
|
|||
order = pyblish.api.ExtractorOrder
|
||||
label = "Render Local"
|
||||
hosts = ["nuke"]
|
||||
families = ["render.local", "prerender.local"]
|
||||
families = ["render.local", "prerender.local", "still.local"]
|
||||
|
||||
def process(self, instance):
|
||||
families = instance.data["families"]
|
||||
|
|
@ -66,13 +66,23 @@ class NukeRenderLocal(openpype.api.Extractor):
|
|||
instance.data["representations"] = []
|
||||
|
||||
collected_frames = os.listdir(out_dir)
|
||||
repre = {
|
||||
'name': ext,
|
||||
'ext': ext,
|
||||
'frameStart': "%0{}d".format(len(str(last_frame))) % first_frame,
|
||||
'files': collected_frames,
|
||||
"stagingDir": out_dir
|
||||
}
|
||||
|
||||
if len(collected_frames) == 1:
|
||||
repre = {
|
||||
'name': ext,
|
||||
'ext': ext,
|
||||
'files': collected_frames.pop(),
|
||||
"stagingDir": out_dir
|
||||
}
|
||||
else:
|
||||
repre = {
|
||||
'name': ext,
|
||||
'ext': ext,
|
||||
'frameStart': "%0{}d".format(
|
||||
len(str(last_frame))) % first_frame,
|
||||
'files': collected_frames,
|
||||
"stagingDir": out_dir
|
||||
}
|
||||
instance.data["representations"].append(repre)
|
||||
|
||||
self.log.info("Extracted instance '{0}' to: {1}".format(
|
||||
|
|
@ -89,6 +99,9 @@ class NukeRenderLocal(openpype.api.Extractor):
|
|||
instance.data['family'] = 'prerender'
|
||||
families.remove('prerender.local')
|
||||
families.insert(0, "prerender")
|
||||
elif "still.local" in families:
|
||||
instance.data['family'] = 'image'
|
||||
families.remove('still.local')
|
||||
instance.data["families"] = families
|
||||
|
||||
collections, remainder = clique.assemble(collected_frames)
|
||||
|
|
|
|||
|
|
@ -9,7 +9,7 @@ class IncrementScriptVersion(pyblish.api.ContextPlugin):
|
|||
order = pyblish.api.IntegratorOrder + 0.9
|
||||
label = "Increment Script Version"
|
||||
optional = True
|
||||
families = ["workfile", "render", "render.local", "render.farm"]
|
||||
families = ["workfile"]
|
||||
hosts = ['nuke']
|
||||
|
||||
def process(self, context):
|
||||
|
|
|
|||
|
|
@ -13,7 +13,7 @@ class PreCollectNukeInstances(pyblish.api.ContextPlugin):
|
|||
hosts = ["nuke", "nukeassist"]
|
||||
|
||||
# presets
|
||||
sync_workfile_version = False
|
||||
sync_workfile_version_on_families = []
|
||||
|
||||
def process(self, context):
|
||||
asset_data = io.find_one({
|
||||
|
|
@ -120,11 +120,12 @@ class PreCollectNukeInstances(pyblish.api.ContextPlugin):
|
|||
# sync workfile version
|
||||
_families_test = [family] + families
|
||||
self.log.debug("__ _families_test: `{}`".format(_families_test))
|
||||
if not next((f for f in _families_test
|
||||
if "prerender" in f),
|
||||
None) and self.sync_workfile_version:
|
||||
# get version to instance for integration
|
||||
instance.data['version'] = instance.context.data['version']
|
||||
for family_test in _families_test:
|
||||
if family_test in self.sync_workfile_version_on_families:
|
||||
self.log.debug("Syncing version with workfile for '{}'"
|
||||
.format(family_test))
|
||||
# get version to instance for integration
|
||||
instance.data['version'] = instance.context.data['version']
|
||||
|
||||
instance.data.update({
|
||||
"subset": subset,
|
||||
|
|
|
|||
|
|
@ -64,7 +64,7 @@ class CollectNukeWrites(pyblish.api.InstancePlugin):
|
|||
)
|
||||
|
||||
if [fm for fm in _families_test
|
||||
if fm in ["render", "prerender"]]:
|
||||
if fm in ["render", "prerender", "still"]]:
|
||||
if "representations" not in instance.data:
|
||||
instance.data["representations"] = list()
|
||||
|
||||
|
|
@ -100,7 +100,13 @@ class CollectNukeWrites(pyblish.api.InstancePlugin):
|
|||
frame_start_str, frame_slate_str)
|
||||
collected_frames.insert(0, slate_frame)
|
||||
|
||||
representation['files'] = collected_frames
|
||||
if collected_frames_len == 1:
|
||||
representation['files'] = collected_frames.pop()
|
||||
if "still" in _families_test:
|
||||
instance.data['family'] = 'image'
|
||||
instance.data["families"].remove('still')
|
||||
else:
|
||||
representation['files'] = collected_frames
|
||||
instance.data["representations"].append(representation)
|
||||
except Exception:
|
||||
instance.data["representations"].append(representation)
|
||||
|
|
|
|||
|
|
@ -0,0 +1,110 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
"""Validate if instance asset is the same as context asset."""
|
||||
from __future__ import absolute_import
|
||||
|
||||
import nuke
|
||||
|
||||
import pyblish.api
|
||||
import openpype.api
|
||||
import avalon.nuke.lib
|
||||
import openpype.hosts.nuke.api as nuke_api
|
||||
|
||||
|
||||
class SelectInvalidInstances(pyblish.api.Action):
|
||||
"""Select invalid instances in Outliner."""
|
||||
|
||||
label = "Select Instances"
|
||||
icon = "briefcase"
|
||||
on = "failed"
|
||||
|
||||
def process(self, context, plugin):
|
||||
"""Process invalid validators and select invalid instances."""
|
||||
# Get the errored instances
|
||||
failed = []
|
||||
for result in context.data["results"]:
|
||||
if (
|
||||
result["error"] is None
|
||||
or result["instance"] is None
|
||||
or result["instance"] in failed
|
||||
or result["plugin"] != plugin
|
||||
):
|
||||
continue
|
||||
|
||||
failed.append(result["instance"])
|
||||
|
||||
# Apply pyblish.logic to get the instances for the plug-in
|
||||
instances = pyblish.api.instances_by_plugin(failed, plugin)
|
||||
|
||||
if instances:
|
||||
self.log.info(
|
||||
"Selecting invalid nodes: %s" % ", ".join(
|
||||
[str(x) for x in instances]
|
||||
)
|
||||
)
|
||||
self.select(instances)
|
||||
else:
|
||||
self.log.info("No invalid nodes found.")
|
||||
self.deselect()
|
||||
|
||||
def select(self, instances):
|
||||
avalon.nuke.lib.select_nodes(
|
||||
[nuke.toNode(str(x)) for x in instances]
|
||||
)
|
||||
|
||||
def deselect(self):
|
||||
avalon.nuke.lib.reset_selection()
|
||||
|
||||
|
||||
class RepairSelectInvalidInstances(pyblish.api.Action):
|
||||
"""Repair the instance asset."""
|
||||
|
||||
label = "Repair"
|
||||
icon = "wrench"
|
||||
on = "failed"
|
||||
|
||||
def process(self, context, plugin):
|
||||
# Get the errored instances
|
||||
failed = []
|
||||
for result in context.data["results"]:
|
||||
if (
|
||||
result["error"] is None
|
||||
or result["instance"] is None
|
||||
or result["instance"] in failed
|
||||
or result["plugin"] != plugin
|
||||
):
|
||||
continue
|
||||
|
||||
failed.append(result["instance"])
|
||||
|
||||
# Apply pyblish.logic to get the instances for the plug-in
|
||||
instances = pyblish.api.instances_by_plugin(failed, plugin)
|
||||
|
||||
context_asset = context.data["assetEntity"]["name"]
|
||||
for instance in instances:
|
||||
origin_node = instance[0]
|
||||
nuke_api.lib.recreate_instance(
|
||||
origin_node, avalon_data={"asset": context_asset}
|
||||
)
|
||||
|
||||
|
||||
class ValidateInstanceInContext(pyblish.api.InstancePlugin):
|
||||
"""Validator to check if instance asset match context asset.
|
||||
|
||||
When working in per-shot style you always publish data in context of
|
||||
current asset (shot). This validator checks if this is so. It is optional
|
||||
so it can be disabled when needed.
|
||||
|
||||
Action on this validator will select invalid instances in Outliner.
|
||||
"""
|
||||
|
||||
order = openpype.api.ValidateContentsOrder
|
||||
label = "Instance in same Context"
|
||||
hosts = ["nuke"]
|
||||
actions = [SelectInvalidInstances, RepairSelectInvalidInstances]
|
||||
optional = True
|
||||
|
||||
def process(self, instance):
|
||||
asset = instance.data.get("asset")
|
||||
context_asset = instance.context.data["assetEntity"]["name"]
|
||||
msg = "{} has asset {}".format(instance.name, asset)
|
||||
assert asset == context_asset, msg
|
||||
|
|
@ -56,8 +56,8 @@ class ValidateOutputResolution(pyblish.api.InstancePlugin):
|
|||
|
||||
def process(self, instance):
|
||||
|
||||
# Skip bounding box check if a crop node exists.
|
||||
if instance[0].dependencies()[0].Class() == "Crop":
|
||||
# Skip bounding box check if a reformat node exists.
|
||||
if instance[0].dependencies()[0].Class() == "Reformat":
|
||||
return
|
||||
|
||||
msg = "Bounding box is outside the format."
|
||||
|
|
|
|||
33
openpype/hosts/nuke/plugins/publish/validate_proxy_mode.py
Normal file
33
openpype/hosts/nuke/plugins/publish/validate_proxy_mode.py
Normal file
|
|
@ -0,0 +1,33 @@
|
|||
import pyblish
|
||||
import nuke
|
||||
|
||||
|
||||
class FixProxyMode(pyblish.api.Action):
|
||||
"""
|
||||
Togger off proxy switch OFF
|
||||
"""
|
||||
|
||||
label = "Proxy toggle to OFF"
|
||||
icon = "wrench"
|
||||
on = "failed"
|
||||
|
||||
def process(self, context, plugin):
|
||||
rootNode = nuke.root()
|
||||
rootNode["proxy"].setValue(False)
|
||||
|
||||
|
||||
@pyblish.api.log
|
||||
class ValidateProxyMode(pyblish.api.ContextPlugin):
|
||||
"""Validate active proxy mode"""
|
||||
|
||||
order = pyblish.api.ValidatorOrder
|
||||
label = "Validate Proxy Mode"
|
||||
hosts = ["nuke"]
|
||||
actions = [FixProxyMode]
|
||||
|
||||
def process(self, context):
|
||||
|
||||
rootNode = nuke.root()
|
||||
isProxy = rootNode["proxy"].value()
|
||||
|
||||
assert not isProxy, "Proxy mode should be toggled OFF"
|
||||
|
|
@ -55,7 +55,7 @@ class ValidateRenderedFrames(pyblish.api.InstancePlugin):
|
|||
""" Validates file output. """
|
||||
|
||||
order = pyblish.api.ValidatorOrder + 0.1
|
||||
families = ["render", "prerender"]
|
||||
families = ["render", "prerender", "still"]
|
||||
|
||||
label = "Validate rendered frame"
|
||||
hosts = ["nuke", "nukestudio"]
|
||||
|
|
@ -71,6 +71,9 @@ class ValidateRenderedFrames(pyblish.api.InstancePlugin):
|
|||
self.log.error(msg)
|
||||
raise ValidationException(msg)
|
||||
|
||||
if isinstance(repre["files"], str):
|
||||
return
|
||||
|
||||
collections, remainder = clique.assemble(repre["files"])
|
||||
self.log.info("collections: {}".format(str(collections)))
|
||||
self.log.info("remainder: {}".format(str(remainder)))
|
||||
|
|
|
|||
|
|
@ -9,7 +9,9 @@ SINGLE_FILE_FORMATS = ['avi', 'mp4', 'mxf', 'mov', 'mpg', 'mpeg', 'wmv', 'm4v',
|
|||
'm2v']
|
||||
|
||||
|
||||
def evaluate_filepath_new(k_value, k_eval, project_dir, first_frame):
|
||||
def evaluate_filepath_new(
|
||||
k_value, k_eval, project_dir, first_frame, allow_relative):
|
||||
|
||||
# get combined relative path
|
||||
combined_relative_path = None
|
||||
if k_eval is not None and project_dir is not None:
|
||||
|
|
@ -26,8 +28,9 @@ def evaluate_filepath_new(k_value, k_eval, project_dir, first_frame):
|
|||
combined_relative_path = None
|
||||
|
||||
try:
|
||||
k_value = k_value % first_frame
|
||||
if os.path.exists(k_value):
|
||||
# k_value = k_value % first_frame
|
||||
if os.path.isdir(os.path.basename(k_value)):
|
||||
# doesn't check for file, only parent dir
|
||||
filepath = k_value
|
||||
elif os.path.exists(k_eval):
|
||||
filepath = k_eval
|
||||
|
|
@ -37,10 +40,12 @@ def evaluate_filepath_new(k_value, k_eval, project_dir, first_frame):
|
|||
|
||||
filepath = os.path.abspath(filepath)
|
||||
except Exception as E:
|
||||
log.error("Cannot create Read node. Perhaps it needs to be rendered first :) Error: `{}`".format(E))
|
||||
log.error("Cannot create Read node. Perhaps it needs to be \
|
||||
rendered first :) Error: `{}`".format(E))
|
||||
return None
|
||||
|
||||
filepath = filepath.replace('\\', '/')
|
||||
# assumes last number is a sequence counter
|
||||
current_frame = re.findall(r'\d+', filepath)[-1]
|
||||
padding = len(current_frame)
|
||||
basename = filepath[: filepath.rfind(current_frame)]
|
||||
|
|
@ -51,11 +56,13 @@ def evaluate_filepath_new(k_value, k_eval, project_dir, first_frame):
|
|||
pass
|
||||
else:
|
||||
# Image sequence needs hashes
|
||||
# to do still with no number not handled
|
||||
filepath = basename + '#' * padding + '.' + filetype
|
||||
|
||||
# relative path? make it relative again
|
||||
if not isinstance(project_dir, type(None)):
|
||||
filepath = filepath.replace(project_dir, '.')
|
||||
if allow_relative:
|
||||
if (not isinstance(project_dir, type(None))) and project_dir != "":
|
||||
filepath = filepath.replace(project_dir, '.')
|
||||
|
||||
# get first and last frame from disk
|
||||
frames = []
|
||||
|
|
@ -95,41 +102,40 @@ def create_read_node(ndata, comp_start):
|
|||
return
|
||||
|
||||
|
||||
def write_to_read(gn):
|
||||
def write_to_read(gn,
|
||||
allow_relative=False):
|
||||
|
||||
comp_start = nuke.Root().knob('first_frame').value()
|
||||
comp_end = nuke.Root().knob('last_frame').value()
|
||||
project_dir = nuke.Root().knob('project_directory').getValue()
|
||||
if not os.path.exists(project_dir):
|
||||
project_dir = nuke.Root().knob('project_directory').evaluate()
|
||||
|
||||
group_read_nodes = []
|
||||
|
||||
with gn:
|
||||
height = gn.screenHeight() # get group height and position
|
||||
new_xpos = int(gn.knob('xpos').value())
|
||||
new_ypos = int(gn.knob('ypos').value()) + height + 20
|
||||
group_writes = [n for n in nuke.allNodes() if n.Class() == "Write"]
|
||||
print("__ group_writes: {}".format(group_writes))
|
||||
if group_writes != []:
|
||||
# there can be only 1 write node, taking first
|
||||
n = group_writes[0]
|
||||
|
||||
if n.knob('file') is not None:
|
||||
file_path_new = evaluate_filepath_new(
|
||||
myfile, firstFrame, lastFrame = evaluate_filepath_new(
|
||||
n.knob('file').getValue(),
|
||||
n.knob('file').evaluate(),
|
||||
project_dir,
|
||||
comp_start
|
||||
comp_start,
|
||||
allow_relative
|
||||
)
|
||||
if not file_path_new:
|
||||
if not myfile:
|
||||
return
|
||||
|
||||
myfiletranslated, firstFrame, lastFrame = file_path_new
|
||||
# get node data
|
||||
ndata = {
|
||||
'filepath': myfiletranslated,
|
||||
'firstframe': firstFrame,
|
||||
'lastframe': lastFrame,
|
||||
'filepath': myfile,
|
||||
'firstframe': int(firstFrame),
|
||||
'lastframe': int(lastFrame),
|
||||
'new_xpos': new_xpos,
|
||||
'new_ypos': new_ypos,
|
||||
'colorspace': n.knob('colorspace').getValue(),
|
||||
|
|
@ -139,7 +145,6 @@ def write_to_read(gn):
|
|||
}
|
||||
group_read_nodes.append(ndata)
|
||||
|
||||
|
||||
# create reads in one go
|
||||
for oneread in group_read_nodes:
|
||||
# create read node
|
||||
|
|
|
|||
|
|
@ -17,6 +17,10 @@ class ExtractReview(openpype.api.Extractor):
|
|||
hosts = ["photoshop"]
|
||||
families = ["review"]
|
||||
|
||||
# Extract Options
|
||||
jpg_options = None
|
||||
mov_options = None
|
||||
|
||||
def process(self, instance):
|
||||
staging_dir = self.staging_dir(instance)
|
||||
self.log.info("Outputting image to {}".format(staging_dir))
|
||||
|
|
@ -53,14 +57,16 @@ class ExtractReview(openpype.api.Extractor):
|
|||
"name": "jpg",
|
||||
"ext": "jpg",
|
||||
"files": output_image,
|
||||
"stagingDir": staging_dir
|
||||
"stagingDir": staging_dir,
|
||||
"tags": self.jpg_options['tags']
|
||||
})
|
||||
instance.data["stagingDir"] = staging_dir
|
||||
|
||||
# Generate thumbnail.
|
||||
thumbnail_path = os.path.join(staging_dir, "thumbnail.jpg")
|
||||
args = [
|
||||
"{}".format(ffmpeg_path), "-y",
|
||||
ffmpeg_path,
|
||||
"-y",
|
||||
"-i", output_image_path,
|
||||
"-vf", "scale=300:-1",
|
||||
"-vframes", "1",
|
||||
|
|
@ -78,7 +84,8 @@ class ExtractReview(openpype.api.Extractor):
|
|||
# Generate mov.
|
||||
mov_path = os.path.join(staging_dir, "review.mov")
|
||||
args = [
|
||||
ffmpeg_path, "-y",
|
||||
ffmpeg_path,
|
||||
"-y",
|
||||
"-i", output_image_path,
|
||||
"-vf", "pad=ceil(iw/2)*2:ceil(ih/2)*2",
|
||||
"-vframes", "1",
|
||||
|
|
@ -95,7 +102,7 @@ class ExtractReview(openpype.api.Extractor):
|
|||
"frameEnd": 1,
|
||||
"fps": 25,
|
||||
"preview": True,
|
||||
"tags": ["review", "ftrackreview"]
|
||||
"tags": self.mov_options['tags']
|
||||
})
|
||||
|
||||
# Required for extract_review plugin (L222 onwards).
|
||||
|
|
|
|||
|
|
@ -8,13 +8,7 @@ from .pipeline import (
|
|||
launch_workfiles_app
|
||||
)
|
||||
|
||||
from avalon.tools import (
|
||||
creator,
|
||||
loader,
|
||||
sceneinventory,
|
||||
libraryloader,
|
||||
subsetmanager
|
||||
)
|
||||
from openpype.tools.utils import host_tools
|
||||
|
||||
|
||||
def load_stylesheet():
|
||||
|
|
@ -30,7 +24,7 @@ def load_stylesheet():
|
|||
|
||||
class Spacer(QtWidgets.QWidget):
|
||||
def __init__(self, height, *args, **kwargs):
|
||||
super(self.__class__, self).__init__(*args, **kwargs)
|
||||
super(Spacer, self).__init__(*args, **kwargs)
|
||||
|
||||
self.setFixedHeight(height)
|
||||
|
||||
|
|
@ -47,7 +41,7 @@ class Spacer(QtWidgets.QWidget):
|
|||
|
||||
class OpenPypeMenu(QtWidgets.QWidget):
|
||||
def __init__(self, *args, **kwargs):
|
||||
super(self.__class__, self).__init__(*args, **kwargs)
|
||||
super(OpenPypeMenu, self).__init__(*args, **kwargs)
|
||||
|
||||
self.setObjectName("OpenPypeMenu")
|
||||
|
||||
|
|
@ -117,7 +111,7 @@ class OpenPypeMenu(QtWidgets.QWidget):
|
|||
|
||||
def on_create_clicked(self):
|
||||
print("Clicked Create")
|
||||
creator.show()
|
||||
host_tools.show_creator()
|
||||
|
||||
def on_publish_clicked(self):
|
||||
print("Clicked Publish")
|
||||
|
|
@ -125,19 +119,19 @@ class OpenPypeMenu(QtWidgets.QWidget):
|
|||
|
||||
def on_load_clicked(self):
|
||||
print("Clicked Load")
|
||||
loader.show(use_context=True)
|
||||
host_tools.show_loader(use_context=True)
|
||||
|
||||
def on_inventory_clicked(self):
|
||||
print("Clicked Inventory")
|
||||
sceneinventory.show()
|
||||
host_tools.show_scene_inventory()
|
||||
|
||||
def on_subsetm_clicked(self):
|
||||
print("Clicked Subset Manager")
|
||||
subsetmanager.show()
|
||||
host_tools.show_subset_manager()
|
||||
|
||||
def on_libload_clicked(self):
|
||||
print("Clicked Library")
|
||||
libraryloader.show()
|
||||
host_tools.show_library_loader()
|
||||
|
||||
def on_rename_clicked(self):
|
||||
print("Clicked Rename")
|
||||
|
|
|
|||
|
|
@ -4,7 +4,6 @@ Basic avalon integration
|
|||
import os
|
||||
import contextlib
|
||||
from collections import OrderedDict
|
||||
from avalon.tools import workfiles
|
||||
from avalon import api as avalon
|
||||
from avalon import schema
|
||||
from avalon.pipeline import AVALON_CONTAINER_ID
|
||||
|
|
@ -12,6 +11,7 @@ from pyblish import api as pyblish
|
|||
from openpype.api import Logger
|
||||
from . import lib
|
||||
from . import PLUGINS_DIR
|
||||
from openpype.tools.utils import host_tools
|
||||
log = Logger().get_logger(__name__)
|
||||
|
||||
PUBLISH_PATH = os.path.join(PLUGINS_DIR, "publish")
|
||||
|
|
@ -212,14 +212,12 @@ def update_container(timeline_item, data=None):
|
|||
|
||||
|
||||
def launch_workfiles_app(*args):
|
||||
workdir = os.environ["AVALON_WORKDIR"]
|
||||
workfiles.show(workdir)
|
||||
host_tools.show_workfiles()
|
||||
|
||||
|
||||
def publish(parent):
|
||||
"""Shorthand to publish from within host"""
|
||||
from avalon.tools import publish
|
||||
return publish.show(parent)
|
||||
return host_tools.show_publish()
|
||||
|
||||
|
||||
@contextlib.contextmanager
|
||||
|
|
|
|||
|
|
@ -58,7 +58,7 @@ class ExtractThumbnailSP(pyblish.api.InstancePlugin):
|
|||
# use first frame as thumbnail if is sequence of jpegs
|
||||
full_thumbnail_path = os.path.join(
|
||||
thumbnail_repre["stagingDir"], file
|
||||
)
|
||||
)
|
||||
self.log.info(
|
||||
"For thumbnail is used file: {}".format(full_thumbnail_path)
|
||||
)
|
||||
|
|
@ -116,7 +116,7 @@ class ExtractThumbnailSP(pyblish.api.InstancePlugin):
|
|||
|
||||
# create new thumbnail representation
|
||||
representation = {
|
||||
'name': 'jpg',
|
||||
'name': 'thumbnail',
|
||||
'ext': 'jpg',
|
||||
'files': filename,
|
||||
"stagingDir": staging_dir,
|
||||
|
|
|
|||
|
|
@ -59,32 +59,35 @@ class ExtractTrimVideoAudio(openpype.api.Extractor):
|
|||
if "trimming" not in fml
|
||||
]
|
||||
|
||||
args = [
|
||||
f"\"{ffmpeg_path}\"",
|
||||
ffmpeg_args = [
|
||||
ffmpeg_path,
|
||||
"-ss", str(start / fps),
|
||||
"-i", f"\"{video_file_path}\"",
|
||||
"-i", video_file_path,
|
||||
"-t", str(dur / fps)
|
||||
]
|
||||
if ext in [".mov", ".mp4"]:
|
||||
args.extend([
|
||||
ffmpeg_args.extend([
|
||||
"-crf", "18",
|
||||
"-pix_fmt", "yuv420p"])
|
||||
"-pix_fmt", "yuv420p"
|
||||
])
|
||||
elif ext in ".wav":
|
||||
args.extend([
|
||||
"-vn -acodec pcm_s16le",
|
||||
"-ar 48000 -ac 2"
|
||||
ffmpeg_args.extend([
|
||||
"-vn",
|
||||
"-acodec", "pcm_s16le",
|
||||
"-ar", "48000",
|
||||
"-ac", "2"
|
||||
])
|
||||
|
||||
# add output path
|
||||
args.append(f"\"{clip_trimed_path}\"")
|
||||
ffmpeg_args.append(clip_trimed_path)
|
||||
|
||||
self.log.info(f"Processing: {args}")
|
||||
ffmpeg_args = " ".join(args)
|
||||
joined_args = " ".join(ffmpeg_args)
|
||||
self.log.info(f"Processing: {joined_args}")
|
||||
openpype.api.run_subprocess(
|
||||
ffmpeg_args, shell=True, logger=self.log
|
||||
ffmpeg_args, logger=self.log
|
||||
)
|
||||
|
||||
repr = {
|
||||
repre = {
|
||||
"name": ext[1:],
|
||||
"ext": ext[1:],
|
||||
"files": os.path.basename(clip_trimed_path),
|
||||
|
|
@ -97,10 +100,10 @@ class ExtractTrimVideoAudio(openpype.api.Extractor):
|
|||
}
|
||||
|
||||
if ext in [".mov", ".mp4"]:
|
||||
repr.update({
|
||||
repre.update({
|
||||
"thumbnail": True,
|
||||
"tags": ["review", "ftrackreview", "delete"]})
|
||||
|
||||
instance.data["representations"].append(repr)
|
||||
instance.data["representations"].append(repre)
|
||||
|
||||
self.log.debug(f"Instance data: {pformat(instance.data)}")
|
||||
|
|
|
|||
|
|
@ -1,6 +1,8 @@
|
|||
import os
|
||||
import logging
|
||||
|
||||
import requests
|
||||
|
||||
import avalon.api
|
||||
import pyblish.api
|
||||
from avalon.tvpaint import pipeline
|
||||
|
|
@ -8,6 +10,7 @@ from avalon.tvpaint.communication_server import register_localization_file
|
|||
from .lib import set_context_settings
|
||||
|
||||
from openpype.hosts import tvpaint
|
||||
from openpype.api import get_current_project_settings
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
|
@ -51,6 +54,19 @@ def initial_launch():
|
|||
set_context_settings()
|
||||
|
||||
|
||||
def application_exit():
|
||||
data = get_current_project_settings()
|
||||
stop_timer = data["tvpaint"]["stop_timer_on_application_exit"]
|
||||
|
||||
if not stop_timer:
|
||||
return
|
||||
|
||||
# Stop application timer.
|
||||
webserver_url = os.environ.get("OPENPYPE_WEBSERVER_URL")
|
||||
rest_api_url = "{}/timers_manager/stop_timer".format(webserver_url)
|
||||
requests.post(rest_api_url)
|
||||
|
||||
|
||||
def install():
|
||||
log.info("OpenPype - Installing TVPaint integration")
|
||||
localization_file = os.path.join(HOST_DIR, "resources", "avalon.loc")
|
||||
|
|
@ -67,6 +83,7 @@ def install():
|
|||
pyblish.api.register_callback("instanceToggled", on_instance_toggle)
|
||||
|
||||
avalon.api.on("application.launched", initial_launch)
|
||||
avalon.api.on("application.exit", application_exit)
|
||||
|
||||
|
||||
def uninstall():
|
||||
|
|
|
|||
|
|
@ -3,4 +3,17 @@ from avalon.tvpaint import pipeline
|
|||
|
||||
|
||||
class Creator(PypeCreatorMixin, pipeline.Creator):
|
||||
pass
|
||||
@classmethod
|
||||
def get_dynamic_data(cls, *args, **kwargs):
|
||||
dynamic_data = super(Creator, cls).get_dynamic_data(*args, **kwargs)
|
||||
|
||||
# Change asset and name by current workfile context
|
||||
workfile_context = pipeline.get_current_workfile_context()
|
||||
asset_name = workfile_context.get("asset")
|
||||
task_name = workfile_context.get("task")
|
||||
if "asset" not in dynamic_data and asset_name:
|
||||
dynamic_data["asset"] = asset_name
|
||||
|
||||
if "task" not in dynamic_data and task_name:
|
||||
dynamic_data["task"] = task_name
|
||||
return dynamic_data
|
||||
|
|
|
|||
|
|
@ -606,7 +606,7 @@ class ExtractSequence(pyblish.api.Extractor):
|
|||
self._copy_image(eq_frame_filepath, new_filepath)
|
||||
layer_files_by_frame[frame_idx] = new_filepath
|
||||
|
||||
elif pre_behavior == "loop":
|
||||
elif pre_behavior in ("loop", "repeat"):
|
||||
# Loop backwards from last frame of layer
|
||||
for frame_idx in reversed(range(mark_in_index, frame_start_index)):
|
||||
eq_frame_idx_offset = (
|
||||
|
|
@ -678,7 +678,7 @@ class ExtractSequence(pyblish.api.Extractor):
|
|||
self._copy_image(eq_frame_filepath, new_filepath)
|
||||
layer_files_by_frame[frame_idx] = new_filepath
|
||||
|
||||
elif post_behavior == "loop":
|
||||
elif post_behavior in ("loop", "repeat"):
|
||||
# Loop backwards from last frame of layer
|
||||
for frame_idx in range(frame_end_index + 1, mark_out_index + 1):
|
||||
eq_frame_idx = frame_idx % frame_count
|
||||
|
|
|
|||
|
|
@ -10,6 +10,7 @@ Provides:
|
|||
import os
|
||||
import json
|
||||
import clique
|
||||
import tempfile
|
||||
|
||||
import pyblish.api
|
||||
from avalon import io
|
||||
|
|
@ -94,7 +95,7 @@ class CollectPublishedFiles(pyblish.api.ContextPlugin):
|
|||
instance.data["families"] = families
|
||||
instance.data["version"] = \
|
||||
self._get_last_version(asset, subset) + 1
|
||||
instance.data["stagingDir"] = task_dir
|
||||
instance.data["stagingDir"] = tempfile.mkdtemp()
|
||||
instance.data["source"] = "webpublisher"
|
||||
|
||||
# to store logging info into DB openpype.webpublishes
|
||||
|
|
@ -113,6 +114,8 @@ class CollectPublishedFiles(pyblish.api.ContextPlugin):
|
|||
instance.data["frameEnd"] = \
|
||||
instance.data["representations"][0]["frameEnd"]
|
||||
else:
|
||||
instance.data["frameStart"] = 0
|
||||
instance.data["frameEnd"] = 1
|
||||
instance.data["representations"] = self._get_single_repre(
|
||||
task_dir, task_data["files"], tags
|
||||
)
|
||||
|
|
@ -174,7 +177,11 @@ class CollectPublishedFiles(pyblish.api.ContextPlugin):
|
|||
(family, [families], subset_template_name, tags) tuple
|
||||
AssertionError if not matching family found
|
||||
"""
|
||||
task_obj = settings.get(task_type)
|
||||
task_type = task_type.lower()
|
||||
lower_cased_task_types = {}
|
||||
for t_type, task in settings.items():
|
||||
lower_cased_task_types[t_type.lower()] = task
|
||||
task_obj = lower_cased_task_types.get(task_type)
|
||||
assert task_obj, "No family configuration for '{}'".format(task_type)
|
||||
|
||||
found_family = None
|
||||
|
|
|
|||
|
|
@ -27,6 +27,7 @@ from .execute import (
|
|||
get_pype_execute_args,
|
||||
execute,
|
||||
run_subprocess,
|
||||
path_to_subprocess_arg,
|
||||
CREATE_NO_WINDOW
|
||||
)
|
||||
from .log import PypeLogger, timeit
|
||||
|
|
@ -59,6 +60,11 @@ from .python_module_tools import (
|
|||
import_module_from_dirpath
|
||||
)
|
||||
|
||||
from .profiles_filtering import (
|
||||
compile_list_of_regexes,
|
||||
filter_profiles
|
||||
)
|
||||
|
||||
from .avalon_context import (
|
||||
CURRENT_DOC_SCHEMAS,
|
||||
PROJECT_NAME_ALLOWED_SYMBOLS,
|
||||
|
|
@ -118,13 +124,9 @@ from .applications import (
|
|||
prepare_host_environments,
|
||||
prepare_context_environments,
|
||||
get_app_environments_for_context,
|
||||
apply_project_environments_value,
|
||||
|
||||
compile_list_of_regexes
|
||||
apply_project_environments_value
|
||||
)
|
||||
|
||||
from .profiles_filtering import filter_profiles
|
||||
|
||||
from .plugin_tools import (
|
||||
TaskNotSetError,
|
||||
get_subset_name,
|
||||
|
|
@ -171,6 +173,8 @@ __all__ = [
|
|||
"get_pype_execute_args",
|
||||
"execute",
|
||||
"run_subprocess",
|
||||
"path_to_subprocess_arg",
|
||||
"CREATE_NO_WINDOW",
|
||||
|
||||
"env_value_to_bool",
|
||||
"get_paths_from_environ",
|
||||
|
|
|
|||
|
|
@ -25,6 +25,7 @@ from . import (
|
|||
PypeLogger,
|
||||
Anatomy
|
||||
)
|
||||
from .profiles_filtering import filter_profiles
|
||||
from .local_settings import get_openpype_username
|
||||
from .avalon_context import (
|
||||
get_workdir_data,
|
||||
|
|
@ -1161,8 +1162,12 @@ def prepare_host_environments(data, implementation_envs=True):
|
|||
if final_env is None:
|
||||
final_env = loaded_env
|
||||
|
||||
keys_to_remove = set(data["env"].keys()) - set(final_env.keys())
|
||||
|
||||
# Update env
|
||||
data["env"].update(final_env)
|
||||
for key in keys_to_remove:
|
||||
data["env"].pop(key, None)
|
||||
|
||||
|
||||
def apply_project_environments_value(project_name, env, project_settings=None):
|
||||
|
|
@ -1244,6 +1249,9 @@ def prepare_context_environments(data):
|
|||
asset_tasks = asset_doc.get("data", {}).get("tasks") or {}
|
||||
task_info = asset_tasks.get(task_name) or {}
|
||||
task_type = task_info.get("type")
|
||||
# Temp solution how to pass task type to `_prepare_last_workfile`
|
||||
data["task_type"] = task_type
|
||||
|
||||
workfile_template_key = get_workfile_template_key(
|
||||
task_type,
|
||||
app.host_name,
|
||||
|
|
@ -1320,13 +1328,14 @@ def _prepare_last_workfile(data, workdir, workfile_template_key):
|
|||
workdir_data = copy.deepcopy(_workdir_data)
|
||||
project_name = data["project_name"]
|
||||
task_name = data["task_name"]
|
||||
task_type = data["task_type"]
|
||||
start_last_workfile = should_start_last_workfile(
|
||||
project_name, app.host_name, task_name
|
||||
project_name, app.host_name, task_name, task_type
|
||||
)
|
||||
data["start_last_workfile"] = start_last_workfile
|
||||
|
||||
workfile_startup = should_workfile_tool_start(
|
||||
project_name, app.host_name, task_name
|
||||
project_name, app.host_name, task_name, task_type
|
||||
)
|
||||
data["workfile_startup"] = workfile_startup
|
||||
|
||||
|
|
@ -1344,23 +1353,23 @@ def _prepare_last_workfile(data, workdir, workfile_template_key):
|
|||
)
|
||||
|
||||
# Last workfile path
|
||||
last_workfile_path = ""
|
||||
extensions = avalon.api.HOST_WORKFILE_EXTENSIONS.get(
|
||||
app.host_name
|
||||
)
|
||||
if extensions:
|
||||
anatomy = data["anatomy"]
|
||||
# Find last workfile
|
||||
file_template = anatomy.templates[workfile_template_key]["file"]
|
||||
workdir_data.update({
|
||||
"version": 1,
|
||||
"user": get_openpype_username(),
|
||||
"ext": extensions[0]
|
||||
})
|
||||
last_workfile_path = data.get("last_workfile_path") or ""
|
||||
if not last_workfile_path:
|
||||
extensions = avalon.api.HOST_WORKFILE_EXTENSIONS.get(app.host_name)
|
||||
|
||||
last_workfile_path = avalon.api.last_workfile(
|
||||
workdir, file_template, workdir_data, extensions, True
|
||||
)
|
||||
if extensions:
|
||||
anatomy = data["anatomy"]
|
||||
# Find last workfile
|
||||
file_template = anatomy.templates["work"]["file"]
|
||||
workdir_data.update({
|
||||
"version": 1,
|
||||
"user": get_openpype_username(),
|
||||
"ext": extensions[0]
|
||||
})
|
||||
|
||||
last_workfile_path = avalon.api.last_workfile(
|
||||
workdir, file_template, workdir_data, extensions, True
|
||||
)
|
||||
|
||||
if os.path.exists(last_workfile_path):
|
||||
log.debug((
|
||||
|
|
@ -1375,54 +1384,8 @@ def _prepare_last_workfile(data, workdir, workfile_template_key):
|
|||
data["last_workfile_path"] = last_workfile_path
|
||||
|
||||
|
||||
def get_option_from_settings(
|
||||
startup_presets, host_name, task_name, default_output
|
||||
):
|
||||
host_name_lowered = host_name.lower()
|
||||
task_name_lowered = task_name.lower()
|
||||
|
||||
max_points = 2
|
||||
matching_points = -1
|
||||
matching_item = None
|
||||
for item in startup_presets:
|
||||
hosts = item.get("hosts") or tuple()
|
||||
tasks = item.get("tasks") or tuple()
|
||||
|
||||
hosts_lowered = tuple(_host_name.lower() for _host_name in hosts)
|
||||
# Skip item if has set hosts and current host is not in
|
||||
if hosts_lowered and host_name_lowered not in hosts_lowered:
|
||||
continue
|
||||
|
||||
tasks_lowered = tuple(_task_name.lower() for _task_name in tasks)
|
||||
# Skip item if has set tasks and current task is not in
|
||||
if tasks_lowered:
|
||||
task_match = False
|
||||
for task_regex in compile_list_of_regexes(tasks_lowered):
|
||||
if re.match(task_regex, task_name_lowered):
|
||||
task_match = True
|
||||
break
|
||||
|
||||
if not task_match:
|
||||
continue
|
||||
|
||||
points = int(bool(hosts_lowered)) + int(bool(tasks_lowered))
|
||||
if points > matching_points:
|
||||
matching_item = item
|
||||
matching_points = points
|
||||
|
||||
if matching_points == max_points:
|
||||
break
|
||||
|
||||
if matching_item is not None:
|
||||
output = matching_item.get("enabled")
|
||||
if output is None:
|
||||
output = default_output
|
||||
return output
|
||||
return default_output
|
||||
|
||||
|
||||
def should_start_last_workfile(
|
||||
project_name, host_name, task_name, default_output=False
|
||||
project_name, host_name, task_name, task_type, default_output=False
|
||||
):
|
||||
"""Define if host should start last version workfile if possible.
|
||||
|
||||
|
|
@ -1444,7 +1407,7 @@ def should_start_last_workfile(
|
|||
"""
|
||||
|
||||
project_settings = get_project_settings(project_name)
|
||||
startup_presets = (
|
||||
profiles = (
|
||||
project_settings
|
||||
["global"]
|
||||
["tools"]
|
||||
|
|
@ -1452,15 +1415,27 @@ def should_start_last_workfile(
|
|||
["last_workfile_on_startup"]
|
||||
)
|
||||
|
||||
if not startup_presets:
|
||||
if not profiles:
|
||||
return default_output
|
||||
|
||||
return get_option_from_settings(
|
||||
startup_presets, host_name, task_name, default_output)
|
||||
filter_data = {
|
||||
"tasks": task_name,
|
||||
"task_types": task_type,
|
||||
"hosts": host_name
|
||||
}
|
||||
matching_item = filter_profiles(profiles, filter_data)
|
||||
|
||||
output = None
|
||||
if matching_item:
|
||||
output = matching_item.get("enabled")
|
||||
|
||||
if output is None:
|
||||
return default_output
|
||||
return output
|
||||
|
||||
|
||||
def should_workfile_tool_start(
|
||||
project_name, host_name, task_name, default_output=False
|
||||
project_name, host_name, task_name, task_type, default_output=False
|
||||
):
|
||||
"""Define if host should start workfile tool at host launch.
|
||||
|
||||
|
|
@ -1482,7 +1457,7 @@ def should_workfile_tool_start(
|
|||
"""
|
||||
|
||||
project_settings = get_project_settings(project_name)
|
||||
startup_presets = (
|
||||
profiles = (
|
||||
project_settings
|
||||
["global"]
|
||||
["tools"]
|
||||
|
|
@ -1490,27 +1465,20 @@ def should_workfile_tool_start(
|
|||
["open_workfile_tool_on_startup"]
|
||||
)
|
||||
|
||||
if not startup_presets:
|
||||
if not profiles:
|
||||
return default_output
|
||||
|
||||
return get_option_from_settings(
|
||||
startup_presets, host_name, task_name, default_output)
|
||||
filter_data = {
|
||||
"tasks": task_name,
|
||||
"task_types": task_type,
|
||||
"hosts": host_name
|
||||
}
|
||||
matching_item = filter_profiles(profiles, filter_data)
|
||||
|
||||
output = None
|
||||
if matching_item:
|
||||
output = matching_item.get("enabled")
|
||||
|
||||
def compile_list_of_regexes(in_list):
|
||||
"""Convert strings in entered list to compiled regex objects."""
|
||||
regexes = list()
|
||||
if not in_list:
|
||||
return regexes
|
||||
|
||||
for item in in_list:
|
||||
if not item:
|
||||
continue
|
||||
try:
|
||||
regexes.append(re.compile(item))
|
||||
except TypeError:
|
||||
print((
|
||||
"Invalid type \"{}\" value \"{}\"."
|
||||
" Expected string based object. Skipping."
|
||||
).format(str(type(item)), str(item)))
|
||||
return regexes
|
||||
if output is None:
|
||||
return default_output
|
||||
return output
|
||||
|
|
|
|||
|
|
@ -10,6 +10,7 @@ import functools
|
|||
|
||||
from openpype.settings import get_project_settings
|
||||
from .anatomy import Anatomy
|
||||
from .profiles_filtering import filter_profiles
|
||||
|
||||
# avalon module is not imported at the top
|
||||
# - may not be in path at the time of pype.lib initialization
|
||||
|
|
@ -453,8 +454,6 @@ def get_workfile_template_key(
|
|||
if not profiles:
|
||||
return default
|
||||
|
||||
from .profiles_filtering import filter_profiles
|
||||
|
||||
profile_filter = {
|
||||
"task_types": task_type,
|
||||
"hosts": host_name
|
||||
|
|
@ -791,7 +790,9 @@ class BuildWorkfile:
|
|||
current_task_name = avalon.io.Session["AVALON_TASK"]
|
||||
|
||||
# Load workfile presets for task
|
||||
self.build_presets = self.get_build_presets(current_task_name)
|
||||
self.build_presets = self.get_build_presets(
|
||||
current_task_name, current_asset_entity
|
||||
)
|
||||
|
||||
# Skip if there are any presets for task
|
||||
if not self.build_presets:
|
||||
|
|
@ -875,7 +876,7 @@ class BuildWorkfile:
|
|||
return loaded_containers
|
||||
|
||||
@with_avalon
|
||||
def get_build_presets(self, task_name):
|
||||
def get_build_presets(self, task_name, asset_doc):
|
||||
""" Returns presets to build workfile for task name.
|
||||
|
||||
Presets are loaded for current project set in
|
||||
|
|
@ -889,30 +890,33 @@ class BuildWorkfile:
|
|||
(dict): preset per entered task name
|
||||
"""
|
||||
host_name = avalon.api.registered_host().__name__.rsplit(".", 1)[-1]
|
||||
presets = get_project_settings(avalon.io.Session["AVALON_PROJECT"])
|
||||
project_settings = get_project_settings(
|
||||
avalon.io.Session["AVALON_PROJECT"]
|
||||
)
|
||||
|
||||
host_settings = project_settings.get(host_name) or {}
|
||||
# Get presets for host
|
||||
wb_settings = presets.get(host_name, {}).get("workfile_builder")
|
||||
|
||||
wb_settings = host_settings.get("workfile_builder")
|
||||
if not wb_settings:
|
||||
# backward compatibility
|
||||
wb_settings = presets.get(host_name, {}).get("workfile_build")
|
||||
wb_settings = host_settings.get("workfile_build") or {}
|
||||
|
||||
builder_presets = wb_settings.get("profiles")
|
||||
builder_profiles = wb_settings.get("profiles")
|
||||
if not builder_profiles:
|
||||
return None
|
||||
|
||||
if not builder_presets:
|
||||
return
|
||||
|
||||
task_name_low = task_name.lower()
|
||||
per_task_preset = None
|
||||
for preset in builder_presets:
|
||||
preset_tasks = preset.get("tasks") or []
|
||||
preset_tasks_low = [task.lower() for task in preset_tasks]
|
||||
if task_name_low in preset_tasks_low:
|
||||
per_task_preset = preset
|
||||
break
|
||||
|
||||
return per_task_preset
|
||||
task_type = (
|
||||
asset_doc
|
||||
.get("data", {})
|
||||
.get("tasks", {})
|
||||
.get(task_name, {})
|
||||
.get("type")
|
||||
)
|
||||
filter_data = {
|
||||
"task_types": task_type,
|
||||
"tasks": task_name
|
||||
}
|
||||
return filter_profiles(builder_profiles, filter_data)
|
||||
|
||||
def _filter_build_profiles(self, build_profiles, loaders_by_name):
|
||||
""" Filter build profiles by loaders and prepare process data.
|
||||
|
|
|
|||
|
|
@ -1,9 +1,11 @@
|
|||
"""Functions useful for delivery action or loader"""
|
||||
import os
|
||||
import shutil
|
||||
import glob
|
||||
import clique
|
||||
import collections
|
||||
|
||||
|
||||
def collect_frames(files):
|
||||
"""
|
||||
Returns dict of source path and its frame, if from sequence
|
||||
|
|
@ -228,7 +230,16 @@ def process_sequence(
|
|||
Returns:
|
||||
(collections.defaultdict , int)
|
||||
"""
|
||||
if not os.path.exists(src_path):
|
||||
|
||||
def hash_path_exist(myPath):
|
||||
res = myPath.replace('#', '*')
|
||||
glob_search_results = glob.glob(res)
|
||||
if len(glob_search_results) > 0:
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
|
||||
if not hash_path_exist(src_path):
|
||||
msg = "{} doesn't exist for {}".format(src_path,
|
||||
repre["_id"])
|
||||
report_items["Source file was not found"].append(msg)
|
||||
|
|
|
|||
|
|
@ -1,11 +1,10 @@
|
|||
import logging
|
||||
import os
|
||||
import shlex
|
||||
import subprocess
|
||||
import platform
|
||||
|
||||
from .log import PypeLogger as Logger
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
# MSDN process creation flag (Windows only)
|
||||
CREATE_NO_WINDOW = 0x08000000
|
||||
|
||||
|
|
@ -100,7 +99,9 @@ def run_subprocess(*args, **kwargs):
|
|||
filtered_env = {str(k): str(v) for k, v in env.items()}
|
||||
|
||||
# Use lib's logger if was not passed with kwargs.
|
||||
logger = kwargs.pop("logger", log)
|
||||
logger = kwargs.pop("logger", None)
|
||||
if logger is None:
|
||||
logger = Logger.get_logger("run_subprocess")
|
||||
|
||||
# set overrides
|
||||
kwargs['stdout'] = kwargs.get('stdout', subprocess.PIPE)
|
||||
|
|
@ -138,6 +139,14 @@ def run_subprocess(*args, **kwargs):
|
|||
return full_output
|
||||
|
||||
|
||||
def path_to_subprocess_arg(path):
|
||||
"""Prepare path for subprocess arguments.
|
||||
|
||||
Returned path can be wrapped with quotes or kept as is.
|
||||
"""
|
||||
return subprocess.list2cmdline([path])
|
||||
|
||||
|
||||
def get_pype_execute_args(*args):
|
||||
"""Arguments to run pype command.
|
||||
|
||||
|
|
|
|||
|
|
@ -3,6 +3,7 @@ import sys
|
|||
import time
|
||||
import logging
|
||||
import pymongo
|
||||
import certifi
|
||||
|
||||
if sys.version_info[0] == 2:
|
||||
from urlparse import urlparse, parse_qs
|
||||
|
|
@ -85,12 +86,33 @@ def get_default_components():
|
|||
return decompose_url(mongo_url)
|
||||
|
||||
|
||||
def extract_port_from_url(url):
|
||||
parsed_url = urlparse(url)
|
||||
if parsed_url.scheme is None:
|
||||
_url = "mongodb://{}".format(url)
|
||||
parsed_url = urlparse(_url)
|
||||
return parsed_url.port
|
||||
def should_add_certificate_path_to_mongo_url(mongo_url):
|
||||
"""Check if should add ca certificate to mongo url.
|
||||
|
||||
Since 30.9.2021 cloud mongo requires newer certificates that are not
|
||||
available on most of workstation. This adds path to certifi certificate
|
||||
which is valid for it. To add the certificate path url must have scheme
|
||||
'mongodb+srv' or has 'ssl=true' or 'tls=true' in url query.
|
||||
"""
|
||||
parsed = urlparse(mongo_url)
|
||||
query = parse_qs(parsed.query)
|
||||
lowered_query_keys = set(key.lower() for key in query.keys())
|
||||
add_certificate = False
|
||||
# Check if url 'ssl' or 'tls' are set to 'true'
|
||||
for key in ("ssl", "tls"):
|
||||
if key in query and "true" in query["ssl"]:
|
||||
add_certificate = True
|
||||
break
|
||||
|
||||
# Check if url contains 'mongodb+srv'
|
||||
if not add_certificate and parsed.scheme == "mongodb+srv":
|
||||
add_certificate = True
|
||||
|
||||
# Check if url does already contain certificate path
|
||||
if add_certificate and "tlscafile" in lowered_query_keys:
|
||||
add_certificate = False
|
||||
|
||||
return add_certificate
|
||||
|
||||
|
||||
def validate_mongo_connection(mongo_uri):
|
||||
|
|
@ -106,26 +128,9 @@ def validate_mongo_connection(mongo_uri):
|
|||
passed so probably couldn't connect to mongo server.
|
||||
|
||||
"""
|
||||
parsed = urlparse(mongo_uri)
|
||||
# Force validation of scheme
|
||||
if parsed.scheme not in ["mongodb", "mongodb+srv"]:
|
||||
raise pymongo.errors.InvalidURI((
|
||||
"Invalid URI scheme:"
|
||||
" URI must begin with 'mongodb://' or 'mongodb+srv://'"
|
||||
))
|
||||
# we have mongo connection string. Let's try if we can connect.
|
||||
components = decompose_url(mongo_uri)
|
||||
mongo_args = {
|
||||
"host": compose_url(**components),
|
||||
"serverSelectionTimeoutMS": 1000
|
||||
}
|
||||
port = components.get("port")
|
||||
if port is not None:
|
||||
mongo_args["port"] = int(port)
|
||||
|
||||
# Create connection
|
||||
client = pymongo.MongoClient(**mongo_args)
|
||||
client.server_info()
|
||||
client = OpenPypeMongoConnection.create_connection(
|
||||
mongo_uri, retry_attempts=1
|
||||
)
|
||||
client.close()
|
||||
|
||||
|
||||
|
|
@ -151,6 +156,8 @@ class OpenPypeMongoConnection:
|
|||
# Naive validation of existing connection
|
||||
try:
|
||||
connection.server_info()
|
||||
with connection.start_session():
|
||||
pass
|
||||
except Exception:
|
||||
connection = None
|
||||
|
||||
|
|
@ -162,38 +169,53 @@ class OpenPypeMongoConnection:
|
|||
return connection
|
||||
|
||||
@classmethod
|
||||
def create_connection(cls, mongo_url, timeout=None):
|
||||
def create_connection(cls, mongo_url, timeout=None, retry_attempts=None):
|
||||
parsed = urlparse(mongo_url)
|
||||
# Force validation of scheme
|
||||
if parsed.scheme not in ["mongodb", "mongodb+srv"]:
|
||||
raise pymongo.errors.InvalidURI((
|
||||
"Invalid URI scheme:"
|
||||
" URI must begin with 'mongodb://' or 'mongodb+srv://'"
|
||||
))
|
||||
|
||||
if timeout is None:
|
||||
timeout = int(os.environ.get("AVALON_TIMEOUT") or 1000)
|
||||
|
||||
kwargs = {
|
||||
"host": mongo_url,
|
||||
"serverSelectionTimeoutMS": timeout
|
||||
}
|
||||
if should_add_certificate_path_to_mongo_url(mongo_url):
|
||||
kwargs["ssl_ca_certs"] = certifi.where()
|
||||
|
||||
port = extract_port_from_url(mongo_url)
|
||||
if port is not None:
|
||||
kwargs["port"] = int(port)
|
||||
mongo_client = pymongo.MongoClient(mongo_url, **kwargs)
|
||||
|
||||
mongo_client = pymongo.MongoClient(**kwargs)
|
||||
if retry_attempts is None:
|
||||
retry_attempts = 3
|
||||
|
||||
for _retry in range(3):
|
||||
elif not retry_attempts:
|
||||
retry_attempts = 1
|
||||
|
||||
last_exc = None
|
||||
valid = False
|
||||
t1 = time.time()
|
||||
for attempt in range(1, retry_attempts + 1):
|
||||
try:
|
||||
t1 = time.time()
|
||||
mongo_client.server_info()
|
||||
|
||||
except Exception:
|
||||
cls.log.warning("Retrying...")
|
||||
time.sleep(1)
|
||||
timeout *= 1.5
|
||||
|
||||
else:
|
||||
with mongo_client.start_session():
|
||||
pass
|
||||
valid = True
|
||||
break
|
||||
|
||||
else:
|
||||
raise IOError((
|
||||
"ERROR: Couldn't connect to {} in less than {:.3f}ms"
|
||||
).format(mongo_url, timeout))
|
||||
except Exception as exc:
|
||||
last_exc = exc
|
||||
if attempt < retry_attempts:
|
||||
cls.log.warning(
|
||||
"Attempt {} failed. Retrying... ".format(attempt)
|
||||
)
|
||||
time.sleep(1)
|
||||
|
||||
if not valid:
|
||||
raise last_exc
|
||||
|
||||
cls.log.info("Connected to {}, delay {:.3f}s".format(
|
||||
mongo_url, time.time() - t1
|
||||
|
|
|
|||
|
|
@ -6,6 +6,7 @@ import logging
|
|||
import re
|
||||
import json
|
||||
import tempfile
|
||||
import distutils
|
||||
|
||||
from .execute import run_subprocess
|
||||
from .profiles_filtering import filter_profiles
|
||||
|
|
@ -35,7 +36,8 @@ def get_subset_name(
|
|||
project_name=None,
|
||||
host_name=None,
|
||||
default_template=None,
|
||||
dynamic_data=None
|
||||
dynamic_data=None,
|
||||
dbcon=None
|
||||
):
|
||||
if not family:
|
||||
return ""
|
||||
|
|
@ -46,13 +48,42 @@ def get_subset_name(
|
|||
# Use only last part of class family value split by dot (`.`)
|
||||
family = family.rsplit(".", 1)[-1]
|
||||
|
||||
if project_name is None:
|
||||
import avalon.api
|
||||
|
||||
project_name = avalon.api.Session["AVALON_PROJECT"]
|
||||
|
||||
# Function should expect asset document instead of asset id
|
||||
# - that way `dbcon` is not needed
|
||||
if dbcon is None:
|
||||
from avalon.api import AvalonMongoDB
|
||||
|
||||
dbcon = AvalonMongoDB()
|
||||
dbcon.Session["AVALON_PROJECT"] = project_name
|
||||
|
||||
dbcon.install()
|
||||
|
||||
asset_doc = dbcon.find_one(
|
||||
{
|
||||
"type": "asset",
|
||||
"_id": asset_id
|
||||
},
|
||||
{
|
||||
"data.tasks": True
|
||||
}
|
||||
)
|
||||
asset_tasks = asset_doc.get("data", {}).get("tasks") or {}
|
||||
task_info = asset_tasks.get(task_name) or {}
|
||||
task_type = task_info.get("type")
|
||||
|
||||
# Get settings
|
||||
tools_settings = get_project_settings(project_name)["global"]["tools"]
|
||||
profiles = tools_settings["creator"]["subset_name_profiles"]
|
||||
filtering_criteria = {
|
||||
"families": family,
|
||||
"hosts": host_name,
|
||||
"tasks": task_name
|
||||
"tasks": task_name,
|
||||
"task_types": task_type
|
||||
}
|
||||
|
||||
matching_profile = filter_profiles(profiles, filtering_criteria)
|
||||
|
|
@ -347,7 +378,7 @@ def oiio_supported():
|
|||
"""
|
||||
Checks if oiiotool is configured for this platform.
|
||||
|
||||
Expects full path to executable.
|
||||
Triggers simple subprocess, handles exception if fails.
|
||||
|
||||
'should_decompress' will throw exception if configured,
|
||||
but not present or not working.
|
||||
|
|
@ -355,7 +386,10 @@ def oiio_supported():
|
|||
(bool)
|
||||
"""
|
||||
oiio_path = get_oiio_tools_path()
|
||||
if not oiio_path or not os.path.exists(oiio_path):
|
||||
if oiio_path:
|
||||
oiio_path = distutils.spawn.find_executable(oiio_path)
|
||||
|
||||
if not oiio_path:
|
||||
log.debug("OIIOTool is not configured or not present at {}".
|
||||
format(oiio_path))
|
||||
return False
|
||||
|
|
|
|||
|
|
@ -1,10 +1,28 @@
|
|||
import re
|
||||
import logging
|
||||
from .applications import compile_list_of_regexes
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def compile_list_of_regexes(in_list):
|
||||
"""Convert strings in entered list to compiled regex objects."""
|
||||
regexes = list()
|
||||
if not in_list:
|
||||
return regexes
|
||||
|
||||
for item in in_list:
|
||||
if not item:
|
||||
continue
|
||||
try:
|
||||
regexes.append(re.compile(item))
|
||||
except TypeError:
|
||||
print((
|
||||
"Invalid type \"{}\" value \"{}\"."
|
||||
" Expected string based object. Skipping."
|
||||
).format(str(type(item)), str(item)))
|
||||
return regexes
|
||||
|
||||
|
||||
def _profile_exclusion(matching_profiles, logger):
|
||||
"""Find out most matching profile byt host, task and family match.
|
||||
|
||||
|
|
|
|||
|
|
@ -58,6 +58,17 @@ def is_running_from_build():
|
|||
return True
|
||||
|
||||
|
||||
def is_running_staging():
|
||||
"""Currently used OpenPype is staging version.
|
||||
|
||||
Returns:
|
||||
bool: True if openpype version containt 'staging'.
|
||||
"""
|
||||
if "staging" in get_openpype_version():
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
def get_pype_info():
|
||||
"""Information about currently used Pype process."""
|
||||
executable_args = get_pype_execute_args()
|
||||
|
|
|
|||
|
|
@ -48,7 +48,7 @@ class _ModuleClass(object):
|
|||
|
||||
def __getattr__(self, attr_name):
|
||||
if attr_name not in self.__attributes__:
|
||||
if attr_name in ("__path__"):
|
||||
if attr_name in ("__path__", "__file__"):
|
||||
return None
|
||||
raise ImportError("No module named {}.{}".format(
|
||||
self.name, attr_name
|
||||
|
|
@ -104,6 +104,9 @@ class _InterfacesClass(_ModuleClass):
|
|||
"""
|
||||
def __getattr__(self, attr_name):
|
||||
if attr_name not in self.__attributes__:
|
||||
if attr_name in ("__path__", "__file__"):
|
||||
return None
|
||||
|
||||
# Fake Interface if is not missing
|
||||
self.__attributes__[attr_name] = type(
|
||||
attr_name,
|
||||
|
|
@ -417,7 +420,6 @@ class OpenPypeModule:
|
|||
"""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def connect_with_modules(self, enabled_modules):
|
||||
"""Connect with other enabled modules."""
|
||||
pass
|
||||
|
|
@ -438,10 +440,6 @@ class OpenPypeAddOn(OpenPypeModule):
|
|||
"""Initialization is not be required for most of addons."""
|
||||
pass
|
||||
|
||||
def connect_with_modules(self, enabled_modules):
|
||||
"""Do not require to implement connection with modules for addon."""
|
||||
pass
|
||||
|
||||
|
||||
class ModulesManager:
|
||||
"""Manager of Pype modules helps to load and prepare them to work.
|
||||
|
|
|
|||
|
|
@ -2,13 +2,10 @@ import os
|
|||
import openpype
|
||||
from openpype import resources
|
||||
from openpype.modules import OpenPypeModule
|
||||
from openpype_interfaces import (
|
||||
ITrayModule,
|
||||
IWebServerRoutes
|
||||
)
|
||||
from openpype_interfaces import ITrayModule
|
||||
|
||||
|
||||
class AvalonModule(OpenPypeModule, ITrayModule, IWebServerRoutes):
|
||||
class AvalonModule(OpenPypeModule, ITrayModule):
|
||||
name = "avalon"
|
||||
|
||||
def initialize(self, modules_settings):
|
||||
|
|
@ -55,12 +52,12 @@ class AvalonModule(OpenPypeModule, ITrayModule, IWebServerRoutes):
|
|||
def tray_init(self):
|
||||
# Add library tool
|
||||
try:
|
||||
from avalon.tools.libraryloader import app
|
||||
from avalon import style
|
||||
from Qt import QtGui
|
||||
from avalon import style
|
||||
from openpype.tools.libraryloader import LibraryLoaderWindow
|
||||
|
||||
self.libraryloader = app.Window(
|
||||
icon=QtGui.QIcon(resources.pype_icon_filepath()),
|
||||
self.libraryloader = LibraryLoaderWindow(
|
||||
icon=QtGui.QIcon(resources.get_openpype_icon_filepath()),
|
||||
show_projects=True,
|
||||
show_libraries=True
|
||||
)
|
||||
|
|
@ -71,16 +68,6 @@ class AvalonModule(OpenPypeModule, ITrayModule, IWebServerRoutes):
|
|||
exc_info=True
|
||||
)
|
||||
|
||||
def connect_with_modules(self, _enabled_modules):
|
||||
return
|
||||
|
||||
def webserver_initialization(self, server_manager):
|
||||
"""Implementation of IWebServerRoutes interface."""
|
||||
|
||||
if self.tray_initialized:
|
||||
from .rest_api import AvalonRestApiResource
|
||||
self.rest_api_obj = AvalonRestApiResource(self, server_manager)
|
||||
|
||||
# Definition of Tray menu
|
||||
def tray_menu(self, tray_menu):
|
||||
from Qt import QtWidgets
|
||||
|
|
@ -108,3 +95,10 @@ class AvalonModule(OpenPypeModule, ITrayModule, IWebServerRoutes):
|
|||
# for Windows
|
||||
self.libraryloader.activateWindow()
|
||||
self.libraryloader.refresh()
|
||||
|
||||
# Webserver module implementation
|
||||
def webserver_initialization(self, server_manager):
|
||||
"""Add routes for webserver."""
|
||||
if self.tray_initialized:
|
||||
from .rest_api import AvalonRestApiResource
|
||||
self.rest_api_obj = AvalonRestApiResource(self, server_manager)
|
||||
|
|
|
|||
|
|
@ -10,18 +10,14 @@ from .constants import (
|
|||
from openpype.modules import OpenPypeModule
|
||||
from openpype_interfaces import (
|
||||
ITrayModule,
|
||||
IPluginPaths,
|
||||
IFtrackEventHandlerPaths,
|
||||
ITimersManager
|
||||
IPluginPaths
|
||||
)
|
||||
|
||||
|
||||
class ClockifyModule(
|
||||
OpenPypeModule,
|
||||
ITrayModule,
|
||||
IPluginPaths,
|
||||
IFtrackEventHandlerPaths,
|
||||
ITimersManager
|
||||
IPluginPaths
|
||||
):
|
||||
name = "clockify"
|
||||
|
||||
|
|
@ -39,6 +35,11 @@ class ClockifyModule(
|
|||
|
||||
self.clockapi = ClockifyAPI(master_parent=self)
|
||||
|
||||
# TimersManager attributes
|
||||
# - set `timers_manager_connector` only in `tray_init`
|
||||
self.timers_manager_connector = None
|
||||
self._timers_manager_module = None
|
||||
|
||||
def get_global_environments(self):
|
||||
return {
|
||||
"CLOCKIFY_WORKSPACE": self.workspace_name
|
||||
|
|
@ -61,6 +62,9 @@ class ClockifyModule(
|
|||
self.bool_timer_run = False
|
||||
self.bool_api_key_set = self.clockapi.set_api()
|
||||
|
||||
# Define itself as TimersManager connector
|
||||
self.timers_manager_connector = self
|
||||
|
||||
def tray_start(self):
|
||||
if self.bool_api_key_set is False:
|
||||
self.show_settings()
|
||||
|
|
@ -87,16 +91,13 @@ class ClockifyModule(
|
|||
"actions": [actions_path]
|
||||
}
|
||||
|
||||
def get_event_handler_paths(self):
|
||||
"""Implementaton of IFtrackEventHandlerPaths to get plugin paths."""
|
||||
def get_ftrack_event_handler_paths(self):
|
||||
"""Function for Ftrack module to add ftrack event handler paths."""
|
||||
return {
|
||||
"user": [CLOCKIFY_FTRACK_USER_PATH],
|
||||
"server": [CLOCKIFY_FTRACK_SERVER_PATH]
|
||||
}
|
||||
|
||||
def connect_with_modules(self, *_a, **_kw):
|
||||
return
|
||||
|
||||
def clockify_timer_stopped(self):
|
||||
self.bool_timer_run = False
|
||||
# Call `ITimersManager` method
|
||||
|
|
@ -165,10 +166,6 @@ class ClockifyModule(
|
|||
self.set_menu_visibility()
|
||||
time.sleep(5)
|
||||
|
||||
def stop_timer(self):
|
||||
"""Implementation of ITimersManager."""
|
||||
self.clockapi.finish_time_entry()
|
||||
|
||||
def signed_in(self):
|
||||
if not self.timer_manager:
|
||||
return
|
||||
|
|
@ -179,8 +176,60 @@ class ClockifyModule(
|
|||
if self.timer_manager.is_running:
|
||||
self.start_timer_manager(self.timer_manager.last_task)
|
||||
|
||||
def on_message_widget_close(self):
|
||||
self.message_widget = None
|
||||
|
||||
# Definition of Tray menu
|
||||
def tray_menu(self, parent_menu):
|
||||
# Menu for Tray App
|
||||
from Qt import QtWidgets
|
||||
menu = QtWidgets.QMenu("Clockify", parent_menu)
|
||||
menu.setProperty("submenu", "on")
|
||||
|
||||
# Actions
|
||||
action_show_settings = QtWidgets.QAction("Settings", menu)
|
||||
action_stop_timer = QtWidgets.QAction("Stop timer", menu)
|
||||
|
||||
menu.addAction(action_show_settings)
|
||||
menu.addAction(action_stop_timer)
|
||||
|
||||
action_show_settings.triggered.connect(self.show_settings)
|
||||
action_stop_timer.triggered.connect(self.stop_timer)
|
||||
|
||||
self.action_stop_timer = action_stop_timer
|
||||
|
||||
self.set_menu_visibility()
|
||||
|
||||
parent_menu.addMenu(menu)
|
||||
|
||||
def show_settings(self):
|
||||
self.widget_settings.input_api_key.setText(self.clockapi.get_api_key())
|
||||
self.widget_settings.show()
|
||||
|
||||
def set_menu_visibility(self):
|
||||
self.action_stop_timer.setVisible(self.bool_timer_run)
|
||||
|
||||
# --- TimersManager connection methods ---
|
||||
def register_timers_manager(self, timer_manager_module):
|
||||
"""Store TimersManager for future use."""
|
||||
self._timers_manager_module = timer_manager_module
|
||||
|
||||
def timer_started(self, data):
|
||||
"""Tell TimersManager that timer started."""
|
||||
if self._timers_manager_module is not None:
|
||||
self._timers_manager_module.timer_started(self._module.id, data)
|
||||
|
||||
def timer_stopped(self):
|
||||
"""Tell TimersManager that timer stopped."""
|
||||
if self._timers_manager_module is not None:
|
||||
self._timers_manager_module.timer_stopped(self._module.id)
|
||||
|
||||
def stop_timer(self):
|
||||
"""Called from TimersManager to stop timer."""
|
||||
self.clockapi.finish_time_entry()
|
||||
|
||||
def start_timer(self, input_data):
|
||||
"""Implementation of ITimersManager."""
|
||||
"""Called from TimersManager to start timer."""
|
||||
# If not api key is not entered then skip
|
||||
if not self.clockapi.get_api_key():
|
||||
return
|
||||
|
|
@ -237,36 +286,3 @@ class ClockifyModule(
|
|||
self.clockapi.start_time_entry(
|
||||
description, project_id, tag_ids=tag_ids
|
||||
)
|
||||
|
||||
def on_message_widget_close(self):
|
||||
self.message_widget = None
|
||||
|
||||
# Definition of Tray menu
|
||||
def tray_menu(self, parent_menu):
|
||||
# Menu for Tray App
|
||||
from Qt import QtWidgets
|
||||
menu = QtWidgets.QMenu("Clockify", parent_menu)
|
||||
menu.setProperty("submenu", "on")
|
||||
|
||||
# Actions
|
||||
action_show_settings = QtWidgets.QAction("Settings", menu)
|
||||
action_stop_timer = QtWidgets.QAction("Stop timer", menu)
|
||||
|
||||
menu.addAction(action_show_settings)
|
||||
menu.addAction(action_stop_timer)
|
||||
|
||||
action_show_settings.triggered.connect(self.show_settings)
|
||||
action_stop_timer.triggered.connect(self.stop_timer)
|
||||
|
||||
self.action_stop_timer = action_stop_timer
|
||||
|
||||
self.set_menu_visibility()
|
||||
|
||||
parent_menu.addMenu(menu)
|
||||
|
||||
def show_settings(self):
|
||||
self.widget_settings.input_api_key.setText(self.clockapi.get_api_key())
|
||||
self.widget_settings.show()
|
||||
|
||||
def set_menu_visibility(self):
|
||||
self.action_stop_timer.setVisible(self.bool_timer_run)
|
||||
|
|
|
|||
|
|
@ -13,7 +13,7 @@ class MessageWidget(QtWidgets.QWidget):
|
|||
super(MessageWidget, self).__init__()
|
||||
|
||||
# Icon
|
||||
icon = QtGui.QIcon(resources.pype_icon_filepath())
|
||||
icon = QtGui.QIcon(resources.get_openpype_icon_filepath())
|
||||
self.setWindowIcon(icon)
|
||||
|
||||
self.setWindowFlags(
|
||||
|
|
@ -90,7 +90,7 @@ class ClockifySettings(QtWidgets.QWidget):
|
|||
self.validated = False
|
||||
|
||||
# Icon
|
||||
icon = QtGui.QIcon(resources.pype_icon_filepath())
|
||||
icon = QtGui.QIcon(resources.get_openpype_icon_filepath())
|
||||
self.setWindowIcon(icon)
|
||||
|
||||
self.setWindowTitle("Clockify settings")
|
||||
|
|
|
|||
|
|
@ -26,9 +26,6 @@ class DeadlineModule(OpenPypeModule, IPluginPaths):
|
|||
"not specified. Disabling module."))
|
||||
return
|
||||
|
||||
def connect_with_modules(self, *_a, **_kw):
|
||||
return
|
||||
|
||||
def get_plugin_paths(self):
|
||||
"""Deadline plugin paths."""
|
||||
current_dir = os.path.dirname(os.path.abspath(__file__))
|
||||
|
|
|
|||
|
|
@ -11,7 +11,7 @@ import pyblish.api
|
|||
class CollectDeadlineServerFromInstance(pyblish.api.InstancePlugin):
|
||||
"""Collect Deadline Webservice URL from instance."""
|
||||
|
||||
order = pyblish.api.CollectorOrder
|
||||
order = pyblish.api.CollectorOrder + 0.02
|
||||
label = "Deadline Webservice from the Instance"
|
||||
families = ["rendering"]
|
||||
|
||||
|
|
@ -46,24 +46,25 @@ class CollectDeadlineServerFromInstance(pyblish.api.InstancePlugin):
|
|||
["deadline"]
|
||||
)
|
||||
|
||||
try:
|
||||
default_servers = deadline_settings["deadline_urls"]
|
||||
project_servers = (
|
||||
render_instance.context.data
|
||||
["project_settings"]
|
||||
["deadline"]
|
||||
["deadline_servers"]
|
||||
)
|
||||
deadline_servers = {
|
||||
k: default_servers[k]
|
||||
for k in project_servers
|
||||
if k in default_servers
|
||||
}
|
||||
|
||||
except AttributeError:
|
||||
# Handle situation were we had only one url for deadline.
|
||||
return render_instance.context.data["defaultDeadline"]
|
||||
default_server = render_instance.context.data["defaultDeadline"]
|
||||
instance_server = render_instance.data.get("deadlineServers")
|
||||
if not instance_server:
|
||||
return default_server
|
||||
|
||||
default_servers = deadline_settings["deadline_urls"]
|
||||
project_servers = (
|
||||
render_instance.context.data
|
||||
["project_settings"]
|
||||
["deadline"]
|
||||
["deadline_servers"]
|
||||
)
|
||||
deadline_servers = {
|
||||
k: default_servers[k]
|
||||
for k in project_servers
|
||||
if k in default_servers
|
||||
}
|
||||
# This is Maya specific and may not reflect real selection of deadline
|
||||
# url as dictionary keys in Python 2 are not ordered
|
||||
return deadline_servers[
|
||||
list(deadline_servers.keys())[
|
||||
int(render_instance.data.get("deadlineServers"))
|
||||
|
|
|
|||
|
|
@ -351,6 +351,11 @@ class MayaSubmitDeadline(pyblish.api.InstancePlugin):
|
|||
f.replace(orig_scene, new_scene)
|
||||
)
|
||||
instance.data["expectedFiles"] = [new_exp]
|
||||
|
||||
if instance.data.get("publishRenderMetadataFolder"):
|
||||
instance.data["publishRenderMetadataFolder"] = \
|
||||
instance.data["publishRenderMetadataFolder"].replace(
|
||||
orig_scene, new_scene)
|
||||
self.log.info("Scene name was switched {} -> {}".format(
|
||||
orig_scene, new_scene
|
||||
))
|
||||
|
|
|
|||
|
|
@ -385,6 +385,7 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin):
|
|||
"""
|
||||
task = os.environ["AVALON_TASK"]
|
||||
subset = instance_data["subset"]
|
||||
cameras = instance_data.get("cameras", [])
|
||||
instances = []
|
||||
# go through aovs in expected files
|
||||
for aov, files in exp_files[0].items():
|
||||
|
|
@ -410,7 +411,11 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin):
|
|||
task[0].upper(), task[1:],
|
||||
subset[0].upper(), subset[1:])
|
||||
|
||||
subset_name = '{}_{}'.format(group_name, aov)
|
||||
cam = [c for c in cameras if c in col.head]
|
||||
if cam:
|
||||
subset_name = '{}_{}_{}'.format(group_name, cam, aov)
|
||||
else:
|
||||
subset_name = '{}_{}'.format(group_name, aov)
|
||||
|
||||
if isinstance(col, (list, tuple)):
|
||||
staging = os.path.dirname(col[0])
|
||||
|
|
|
|||
|
|
@ -3,6 +3,7 @@ import json
|
|||
from avalon.api import AvalonMongoDB
|
||||
from openpype.api import ProjectSettings
|
||||
from openpype.lib import create_project
|
||||
from openpype.settings import SaveWarningExc
|
||||
|
||||
from openpype_modules.ftrack.lib import (
|
||||
ServerAction,
|
||||
|
|
@ -312,7 +313,6 @@ class PrepareProjectServer(ServerAction):
|
|||
if not in_data:
|
||||
return
|
||||
|
||||
|
||||
root_values = {}
|
||||
root_key = "__root__"
|
||||
for key in tuple(in_data.keys()):
|
||||
|
|
@ -392,7 +392,12 @@ class PrepareProjectServer(ServerAction):
|
|||
else:
|
||||
attributes_entity[key] = value
|
||||
|
||||
project_settings.save()
|
||||
try:
|
||||
project_settings.save()
|
||||
except SaveWarningExc as exc:
|
||||
self.log.info("Few warnings happened during settings save:")
|
||||
for warning in exc.warnings:
|
||||
self.log.info(str(warning))
|
||||
|
||||
# Change custom attributes on project
|
||||
if custom_attribute_values:
|
||||
|
|
|
|||
|
|
@ -10,6 +10,7 @@ from openpype_modules.ftrack.lib import (
|
|||
CUST_ATTR_GROUP,
|
||||
CUST_ATTR_TOOLS,
|
||||
CUST_ATTR_APPLICATIONS,
|
||||
CUST_ATTR_INTENT,
|
||||
|
||||
default_custom_attributes_definition,
|
||||
app_definitions_from_app_manager,
|
||||
|
|
@ -431,7 +432,7 @@ class CustomAttributes(BaseAction):
|
|||
|
||||
intent_custom_attr_data = {
|
||||
"label": "Intent",
|
||||
"key": "intent",
|
||||
"key": CUST_ATTR_INTENT,
|
||||
"type": "enumerator",
|
||||
"entity_type": "assetversion",
|
||||
"group": CUST_ATTR_GROUP,
|
||||
|
|
|
|||
|
|
@ -23,6 +23,8 @@ class DeleteOldVersions(BaseAction):
|
|||
)
|
||||
icon = statics_icon("ftrack", "action_icons", "OpenPypeAdmin.svg")
|
||||
|
||||
settings_key = "delete_old_versions"
|
||||
|
||||
dbcon = AvalonMongoDB()
|
||||
|
||||
inteface_title = "Choose your preferences"
|
||||
|
|
|
|||
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Add a link
Reference in a new issue