Merge branch 'develop' of github.com:pypeclub/OpenPype into feature/OP-2868_After-effects-creator-Subset-name-change-to-main-rather-than-default

This commit is contained in:
Petr Kalis 2022-04-22 22:30:06 +02:00
commit b46f1bc42d
336 changed files with 9769 additions and 4018 deletions

315
.all-contributorsrc Normal file
View file

@ -0,0 +1,315 @@
{
"projectName": "OpenPype",
"projectOwner": "pypeclub",
"repoType": "github",
"repoHost": "https://github.com",
"files": [
"README.md"
],
"imageSize": 100,
"commit": true,
"commitConvention": "none",
"contributors": [
{
"login": "mkolar",
"name": "Milan Kolar",
"avatar_url": "https://avatars.githubusercontent.com/u/3333008?v=4",
"profile": "http://pype.club/",
"contributions": [
"code",
"doc",
"infra",
"business",
"content",
"fundingFinding",
"maintenance",
"projectManagement",
"review",
"mentoring",
"question"
]
},
{
"login": "jakubjezek001",
"name": "Jakub Ježek",
"avatar_url": "https://avatars.githubusercontent.com/u/40640033?v=4",
"profile": "https://www.linkedin.com/in/jakubjezek79",
"contributions": [
"code",
"doc",
"infra",
"content",
"review",
"maintenance",
"mentoring",
"projectManagement",
"question"
]
},
{
"login": "antirotor",
"name": "Ondřej Samohel",
"avatar_url": "https://avatars.githubusercontent.com/u/33513211?v=4",
"profile": "https://github.com/antirotor",
"contributions": [
"code",
"doc",
"infra",
"content",
"review",
"maintenance",
"mentoring",
"projectManagement",
"question"
]
},
{
"login": "iLLiCiTiT",
"name": "Jakub Trllo",
"avatar_url": "https://avatars.githubusercontent.com/u/43494761?v=4",
"profile": "https://github.com/iLLiCiTiT",
"contributions": [
"code",
"doc",
"infra",
"review",
"maintenance",
"question"
]
},
{
"login": "kalisp",
"name": "Petr Kalis",
"avatar_url": "https://avatars.githubusercontent.com/u/4457962?v=4",
"profile": "https://github.com/kalisp",
"contributions": [
"code",
"doc",
"infra",
"review",
"maintenance",
"question"
]
},
{
"login": "64qam",
"name": "64qam",
"avatar_url": "https://avatars.githubusercontent.com/u/26925793?v=4",
"profile": "https://github.com/64qam",
"contributions": [
"code",
"review",
"doc",
"infra",
"projectManagement",
"maintenance",
"content",
"userTesting"
]
},
{
"login": "BigRoy",
"name": "Roy Nieterau",
"avatar_url": "https://avatars.githubusercontent.com/u/2439881?v=4",
"profile": "http://www.colorbleed.nl/",
"contributions": [
"code",
"doc",
"review",
"mentoring",
"question"
]
},
{
"login": "tokejepsen",
"name": "Toke Jepsen",
"avatar_url": "https://avatars.githubusercontent.com/u/1860085?v=4",
"profile": "https://github.com/tokejepsen",
"contributions": [
"code",
"doc",
"review",
"mentoring",
"question"
]
},
{
"login": "jrsndl",
"name": "Jiri Sindelar",
"avatar_url": "https://avatars.githubusercontent.com/u/45896205?v=4",
"profile": "https://github.com/jrsndl",
"contributions": [
"code",
"review",
"doc",
"content",
"tutorial",
"userTesting"
]
},
{
"login": "simonebarbieri",
"name": "Simone Barbieri",
"avatar_url": "https://avatars.githubusercontent.com/u/1087869?v=4",
"profile": "https://barbierisimone.com/",
"contributions": [
"code",
"doc"
]
},
{
"login": "karimmozilla",
"name": "karimmozilla",
"avatar_url": "https://avatars.githubusercontent.com/u/82811760?v=4",
"profile": "http://karimmozilla.xyz/",
"contributions": [
"code"
]
},
{
"login": "Allan-I",
"name": "Allan I. A.",
"avatar_url": "https://avatars.githubusercontent.com/u/76656700?v=4",
"profile": "https://github.com/Allan-I",
"contributions": [
"code"
]
},
{
"login": "m-u-r-p-h-y",
"name": "murphy",
"avatar_url": "https://avatars.githubusercontent.com/u/352795?v=4",
"profile": "https://www.linkedin.com/in/mmuurrpphhyy/",
"contributions": [
"code",
"review",
"userTesting",
"doc",
"projectManagement"
]
},
{
"login": "aardschok",
"name": "Wijnand Koreman",
"avatar_url": "https://avatars.githubusercontent.com/u/26920875?v=4",
"profile": "https://github.com/aardschok",
"contributions": [
"code"
]
},
{
"login": "zhoub",
"name": "Bo Zhou",
"avatar_url": "https://avatars.githubusercontent.com/u/1798206?v=4",
"profile": "http://jedimaster.cnblogs.com/",
"contributions": [
"code"
]
},
{
"login": "ClementHector",
"name": "Clément Hector",
"avatar_url": "https://avatars.githubusercontent.com/u/7068597?v=4",
"profile": "https://www.linkedin.com/in/clementhector/",
"contributions": [
"code",
"review"
]
},
{
"login": "davidlatwe",
"name": "David Lai",
"avatar_url": "https://avatars.githubusercontent.com/u/3357009?v=4",
"profile": "https://twitter.com/davidlatwe",
"contributions": [
"code",
"review"
]
},
{
"login": "2-REC",
"name": "Derek ",
"avatar_url": "https://avatars.githubusercontent.com/u/42170307?v=4",
"profile": "https://github.com/2-REC",
"contributions": [
"code",
"doc"
]
},
{
"login": "gabormarinov",
"name": "Gábor Marinov",
"avatar_url": "https://avatars.githubusercontent.com/u/8620515?v=4",
"profile": "https://github.com/gabormarinov",
"contributions": [
"code",
"doc"
]
},
{
"login": "icyvapor",
"name": "icyvapor",
"avatar_url": "https://avatars.githubusercontent.com/u/1195278?v=4",
"profile": "https://github.com/icyvapor",
"contributions": [
"code",
"doc"
]
},
{
"login": "jlorrain",
"name": "Jérôme LORRAIN",
"avatar_url": "https://avatars.githubusercontent.com/u/7955673?v=4",
"profile": "https://github.com/jlorrain",
"contributions": [
"code"
]
},
{
"login": "dmo-j-cube",
"name": "David Morris-Oliveros",
"avatar_url": "https://avatars.githubusercontent.com/u/89823400?v=4",
"profile": "https://github.com/dmo-j-cube",
"contributions": [
"code"
]
},
{
"login": "BenoitConnan",
"name": "BenoitConnan",
"avatar_url": "https://avatars.githubusercontent.com/u/82808268?v=4",
"profile": "https://github.com/BenoitConnan",
"contributions": [
"code"
]
},
{
"login": "Malthaldar",
"name": "Malthaldar",
"avatar_url": "https://avatars.githubusercontent.com/u/33671694?v=4",
"profile": "https://github.com/Malthaldar",
"contributions": [
"code"
]
},
{
"login": "svenneve",
"name": "Sven Neve",
"avatar_url": "https://avatars.githubusercontent.com/u/2472863?v=4",
"profile": "http://www.svenneve.com/",
"contributions": [
"code"
]
},
{
"login": "zafrs",
"name": "zafrs",
"avatar_url": "https://avatars.githubusercontent.com/u/26890002?v=4",
"profile": "https://github.com/zafrs",
"contributions": [
"code"
]
}
],
"contributorsPerLine": 7
}

View file

@ -80,7 +80,7 @@ jobs:
git tag -a $tag_name -m "nightly build"
- name: Push to protected main branch
uses: CasperWA/push-protected@v2
uses: CasperWA/push-protected@v2.10.0
with:
token: ${{ secrets.ADMIN_TOKEN }}
branch: main

View file

@ -68,7 +68,7 @@ jobs:
- name: 🔏 Push to protected main branch
if: steps.version.outputs.release_tag != 'skip'
uses: CasperWA/push-protected@v2
uses: CasperWA/push-protected@v2.10.0
with:
token: ${{ secrets.ADMIN_TOKEN }}
branch: main

2
.gitignore vendored
View file

@ -70,6 +70,8 @@ coverage.xml
##################
node_modules
package-lock.json
package.json
yarn.lock
openpype/premiere/ppro/js/debug.log

View file

@ -1,118 +1,177 @@
# Changelog
## [3.9.2-nightly.1](https://github.com/pypeclub/OpenPype/tree/HEAD)
## [3.10.0-nightly.1](https://github.com/pypeclub/OpenPype/tree/HEAD)
[Full Changelog](https://github.com/pypeclub/OpenPype/compare/3.9.1...HEAD)
[Full Changelog](https://github.com/pypeclub/OpenPype/compare/3.9.4...HEAD)
### 📖 Documentation
- Nuke docs with videos [\#3052](https://github.com/pypeclub/OpenPype/pull/3052)
**🚀 Enhancements**
- CI: change the version bump logic [\#2919](https://github.com/pypeclub/OpenPype/pull/2919)
- Deadline: Add headless argument [\#2916](https://github.com/pypeclub/OpenPype/pull/2916)
- Ftrack: Fill workfile in custom attribute [\#2906](https://github.com/pypeclub/OpenPype/pull/2906)
- Settings UI: Add simple tooltips for settings entities [\#2901](https://github.com/pypeclub/OpenPype/pull/2901)
- Update collect\_render.py [\#3055](https://github.com/pypeclub/OpenPype/pull/3055)
**🐛 Bug fixes**
- Ftrack: Missing Ftrack id after editorial publish [\#2905](https://github.com/pypeclub/OpenPype/pull/2905)
- AfterEffects: Fix rendering for single frame in DL [\#2875](https://github.com/pypeclub/OpenPype/pull/2875)
- Nuke: Add aov matching even for remainder and prerender [\#3060](https://github.com/pypeclub/OpenPype/pull/3060)
**🔀 Refactored code**
- General: Move formatting and workfile functions [\#2914](https://github.com/pypeclub/OpenPype/pull/2914)
- General: Move host install [\#3009](https://github.com/pypeclub/OpenPype/pull/3009)
## [3.9.4](https://github.com/pypeclub/OpenPype/tree/3.9.4) (2022-04-15)
[Full Changelog](https://github.com/pypeclub/OpenPype/compare/CI/3.9.4-nightly.2...3.9.4)
### 📖 Documentation
- Documentation: more info about Tasks [\#3062](https://github.com/pypeclub/OpenPype/pull/3062)
- Documentation: Python requirements to 3.7.9 [\#3035](https://github.com/pypeclub/OpenPype/pull/3035)
- Website Docs: Remove unused pages [\#2974](https://github.com/pypeclub/OpenPype/pull/2974)
**🆕 New features**
- General: Local overrides for environment variables [\#3045](https://github.com/pypeclub/OpenPype/pull/3045)
**🚀 Enhancements**
- TVPaint: Added init file for worker to triggers missing sound file dialog [\#3053](https://github.com/pypeclub/OpenPype/pull/3053)
- Ftrack: Custom attributes can be filled in slate values [\#3036](https://github.com/pypeclub/OpenPype/pull/3036)
- Resolve environment variable in google drive credential path [\#3008](https://github.com/pypeclub/OpenPype/pull/3008)
**🐛 Bug fixes**
- GitHub: Updated push-protected action in github workflow [\#3064](https://github.com/pypeclub/OpenPype/pull/3064)
- Nuke: Typos in imports from Nuke implementation [\#3061](https://github.com/pypeclub/OpenPype/pull/3061)
- Hotfix: fixing deadline job publishing [\#3059](https://github.com/pypeclub/OpenPype/pull/3059)
- General: Extract Review handle invalid characters for ffmpeg [\#3050](https://github.com/pypeclub/OpenPype/pull/3050)
- Slate Review: Support to keep format on slate concatenation [\#3049](https://github.com/pypeclub/OpenPype/pull/3049)
- Webpublisher: fix processing of workfile [\#3048](https://github.com/pypeclub/OpenPype/pull/3048)
- Ftrack: Integrate ftrack api fix [\#3044](https://github.com/pypeclub/OpenPype/pull/3044)
- Webpublisher - removed wrong hardcoded family [\#3043](https://github.com/pypeclub/OpenPype/pull/3043)
- LibraryLoader: Use current project for asset query in families filter [\#3042](https://github.com/pypeclub/OpenPype/pull/3042)
- SiteSync: Providers ignore that site is disabled [\#3041](https://github.com/pypeclub/OpenPype/pull/3041)
- Unreal: Creator import fixes [\#3040](https://github.com/pypeclub/OpenPype/pull/3040)
- SiteSync: fix transitive alternate sites, fix dropdown in Local Settings [\#3018](https://github.com/pypeclub/OpenPype/pull/3018)
**Merged pull requests:**
- Deadline: reworked pools assignment [\#3051](https://github.com/pypeclub/OpenPype/pull/3051)
- Houdini: Avoid ImportError on `hdefereval` when Houdini runs without UI [\#2987](https://github.com/pypeclub/OpenPype/pull/2987)
## [3.9.3](https://github.com/pypeclub/OpenPype/tree/3.9.3) (2022-04-07)
[Full Changelog](https://github.com/pypeclub/OpenPype/compare/CI/3.9.3-nightly.2...3.9.3)
### 📖 Documentation
- Website Docs: Manager Ftrack fix broken links [\#2979](https://github.com/pypeclub/OpenPype/pull/2979)
**🆕 New features**
- Ftrack: Add description integrator [\#3027](https://github.com/pypeclub/OpenPype/pull/3027)
- Publishing textures for Unreal [\#2988](https://github.com/pypeclub/OpenPype/pull/2988)
- Maya to Unreal: Static and Skeletal Meshes [\#2978](https://github.com/pypeclub/OpenPype/pull/2978)
**🚀 Enhancements**
- Ftrack: Add more options for note text of integrate ftrack note [\#3025](https://github.com/pypeclub/OpenPype/pull/3025)
- Console Interpreter: Changed how console splitter size are reused on show [\#3016](https://github.com/pypeclub/OpenPype/pull/3016)
- Deadline: Use more suitable name for sequence review logic [\#3015](https://github.com/pypeclub/OpenPype/pull/3015)
- General: default workfile subset name for workfile [\#3011](https://github.com/pypeclub/OpenPype/pull/3011)
- Nuke: add concurrency attr to deadline job [\#3005](https://github.com/pypeclub/OpenPype/pull/3005)
- Deadline: priority configurable in Maya jobs [\#2995](https://github.com/pypeclub/OpenPype/pull/2995)
- Workfiles tool: Save as published workfiles [\#2937](https://github.com/pypeclub/OpenPype/pull/2937)
**🐛 Bug fixes**
- Deadline: Fixed default value of use sequence for review [\#3033](https://github.com/pypeclub/OpenPype/pull/3033)
- Settings UI: Version column can be extended so version are visible [\#3032](https://github.com/pypeclub/OpenPype/pull/3032)
- General: Fix validate asset docs plug-in filename and class name [\#3029](https://github.com/pypeclub/OpenPype/pull/3029)
- General: Fix import after movements [\#3028](https://github.com/pypeclub/OpenPype/pull/3028)
- Harmony: Added creating subset name for workfile from template [\#3024](https://github.com/pypeclub/OpenPype/pull/3024)
- AfterEffects: Added creating subset name for workfile from template [\#3023](https://github.com/pypeclub/OpenPype/pull/3023)
- General: Add example addons to ignored [\#3022](https://github.com/pypeclub/OpenPype/pull/3022)
- Maya: Remove missing import [\#3017](https://github.com/pypeclub/OpenPype/pull/3017)
- Ftrack: multiple reviewable componets [\#3012](https://github.com/pypeclub/OpenPype/pull/3012)
- Tray publisher: Fixes after code movement [\#3010](https://github.com/pypeclub/OpenPype/pull/3010)
- Nuke: fixing unicode type detection in effect loaders [\#3002](https://github.com/pypeclub/OpenPype/pull/3002)
- Nuke: removing redundant Ftrack asset when farm publishing [\#2996](https://github.com/pypeclub/OpenPype/pull/2996)
**🔀 Refactored code**
- General: Move plugins register and discover [\#2935](https://github.com/pypeclub/OpenPype/pull/2935)
**Merged pull requests:**
- Maya: Allow to select invalid camera contents if no cameras found [\#3030](https://github.com/pypeclub/OpenPype/pull/3030)
- General: adding limitations for pyright [\#2994](https://github.com/pypeclub/OpenPype/pull/2994)
## [3.9.2](https://github.com/pypeclub/OpenPype/tree/3.9.2) (2022-04-04)
[Full Changelog](https://github.com/pypeclub/OpenPype/compare/CI/3.9.2-nightly.4...3.9.2)
### 📖 Documentation
- Documentation: Added mention of adding My Drive as a root [\#2999](https://github.com/pypeclub/OpenPype/pull/2999)
- Docs: Added MongoDB requirements [\#2951](https://github.com/pypeclub/OpenPype/pull/2951)
**🆕 New features**
- nuke: bypass baking [\#2992](https://github.com/pypeclub/OpenPype/pull/2992)
**🚀 Enhancements**
- Photoshop: create image without instance [\#3001](https://github.com/pypeclub/OpenPype/pull/3001)
- TVPaint: Render scene family [\#3000](https://github.com/pypeclub/OpenPype/pull/3000)
- Nuke: ReviewDataMov Read RAW attribute [\#2985](https://github.com/pypeclub/OpenPype/pull/2985)
- SiteSync: Added compute\_resource\_sync\_sites to sync\_server\_module [\#2983](https://github.com/pypeclub/OpenPype/pull/2983)
- General: `METADATA\_KEYS` constant as `frozenset` for optimal immutable lookup [\#2980](https://github.com/pypeclub/OpenPype/pull/2980)
- General: Tools with host filters [\#2975](https://github.com/pypeclub/OpenPype/pull/2975)
- Hero versions: Use custom templates [\#2967](https://github.com/pypeclub/OpenPype/pull/2967)
- Slack: Added configurable maximum file size of review upload to Slack [\#2945](https://github.com/pypeclub/OpenPype/pull/2945)
- NewPublisher: Prepared implementation of optional pyblish plugin [\#2943](https://github.com/pypeclub/OpenPype/pull/2943)
- TVPaint: Extractor to convert PNG into EXR [\#2942](https://github.com/pypeclub/OpenPype/pull/2942)
**🐛 Bug fixes**
- Hosts: Remove path existence checks in 'add\_implementation\_envs' [\#3004](https://github.com/pypeclub/OpenPype/pull/3004)
- Fix - remove doubled dot in workfile created from template [\#2998](https://github.com/pypeclub/OpenPype/pull/2998)
- PS: fix renaming subset incorrectly in PS [\#2991](https://github.com/pypeclub/OpenPype/pull/2991)
- Fix: Disable setuptools auto discovery [\#2990](https://github.com/pypeclub/OpenPype/pull/2990)
- AEL: fix opening existing workfile if no scene opened [\#2989](https://github.com/pypeclub/OpenPype/pull/2989)
- Maya: Don't do hardlinks on windows for look publishing [\#2986](https://github.com/pypeclub/OpenPype/pull/2986)
- Settings UI: Fix version completer on linux [\#2981](https://github.com/pypeclub/OpenPype/pull/2981)
- Photoshop: Fix creation of subset names in PS review and workfile [\#2969](https://github.com/pypeclub/OpenPype/pull/2969)
- Slack: Added default for review\_upload\_limit for Slack [\#2965](https://github.com/pypeclub/OpenPype/pull/2965)
- General: OIIO conversion for ffmeg can handle sequences [\#2958](https://github.com/pypeclub/OpenPype/pull/2958)
- Settings: Conditional dictionary avoid invalid logs [\#2956](https://github.com/pypeclub/OpenPype/pull/2956)
- General: Smaller fixes and typos [\#2950](https://github.com/pypeclub/OpenPype/pull/2950)
- LogViewer: Don't refresh on initialization [\#2949](https://github.com/pypeclub/OpenPype/pull/2949)
- nuke: python3 compatibility issue with `iteritems` [\#2948](https://github.com/pypeclub/OpenPype/pull/2948)
- General: anatomy data with correct task short key [\#2947](https://github.com/pypeclub/OpenPype/pull/2947)
- SceneInventory: Fix imports in UI [\#2944](https://github.com/pypeclub/OpenPype/pull/2944)
- Slack: add generic exception [\#2941](https://github.com/pypeclub/OpenPype/pull/2941)
- General: Python specific vendor paths on env injection [\#2939](https://github.com/pypeclub/OpenPype/pull/2939)
- General: More fail safe delete old versions [\#2936](https://github.com/pypeclub/OpenPype/pull/2936)
- Settings UI: Collapsed of collapsible wrapper works as expected [\#2934](https://github.com/pypeclub/OpenPype/pull/2934)
**Merged pull requests:**
- Bump paramiko from 2.9.2 to 2.10.1 [\#2973](https://github.com/pypeclub/OpenPype/pull/2973)
- Bump minimist from 1.2.5 to 1.2.6 in /website [\#2954](https://github.com/pypeclub/OpenPype/pull/2954)
- Bump node-forge from 1.2.1 to 1.3.0 in /website [\#2953](https://github.com/pypeclub/OpenPype/pull/2953)
- Maya - added transparency into review creator [\#2952](https://github.com/pypeclub/OpenPype/pull/2952)
## [3.9.1](https://github.com/pypeclub/OpenPype/tree/3.9.1) (2022-03-18)
[Full Changelog](https://github.com/pypeclub/OpenPype/compare/CI/3.9.1-nightly.3...3.9.1)
**🚀 Enhancements**
- General: Change how OPENPYPE\_DEBUG value is handled [\#2907](https://github.com/pypeclub/OpenPype/pull/2907)
- nuke: imageio adding ocio config version 1.2 [\#2897](https://github.com/pypeclub/OpenPype/pull/2897)
- Flame: support for comment with xml attribute overrides [\#2892](https://github.com/pypeclub/OpenPype/pull/2892)
- Nuke: ExtractReviewSlate can handle more codes and profiles [\#2879](https://github.com/pypeclub/OpenPype/pull/2879)
- Flame: sequence used for reference video [\#2869](https://github.com/pypeclub/OpenPype/pull/2869)
**🐛 Bug fixes**
- General: Fix use of Anatomy roots [\#2904](https://github.com/pypeclub/OpenPype/pull/2904)
- Fixing gap detection in extract review [\#2902](https://github.com/pypeclub/OpenPype/pull/2902)
- Pyblish Pype - ensure current state is correct when entering new group order [\#2899](https://github.com/pypeclub/OpenPype/pull/2899)
- SceneInventory: Fix import of load function [\#2894](https://github.com/pypeclub/OpenPype/pull/2894)
- Harmony - fixed creator issue [\#2891](https://github.com/pypeclub/OpenPype/pull/2891)
- General: Remove forgotten use of avalon Creator [\#2885](https://github.com/pypeclub/OpenPype/pull/2885)
- General: Avoid circular import [\#2884](https://github.com/pypeclub/OpenPype/pull/2884)
- Fixes for attaching loaded containers \(\#2837\) [\#2874](https://github.com/pypeclub/OpenPype/pull/2874)
**🔀 Refactored code**
- General: Reduce style usage to OpenPype repository [\#2889](https://github.com/pypeclub/OpenPype/pull/2889)
- General: Move loader logic from avalon to openpype [\#2886](https://github.com/pypeclub/OpenPype/pull/2886)
## [3.9.0](https://github.com/pypeclub/OpenPype/tree/3.9.0) (2022-03-14)
[Full Changelog](https://github.com/pypeclub/OpenPype/compare/CI/3.9.0-nightly.9...3.9.0)
**Deprecated:**
- AssetCreator: Remove the tool [\#2845](https://github.com/pypeclub/OpenPype/pull/2845)
### 📖 Documentation
- Documentation: Change Photoshop & AfterEffects plugin path [\#2878](https://github.com/pypeclub/OpenPype/pull/2878)
**🚀 Enhancements**
- General: Subset name filtering in ExtractReview outpus [\#2872](https://github.com/pypeclub/OpenPype/pull/2872)
- NewPublisher: Descriptions and Icons in creator dialog [\#2867](https://github.com/pypeclub/OpenPype/pull/2867)
- NewPublisher: Changing task on publishing instance [\#2863](https://github.com/pypeclub/OpenPype/pull/2863)
- TrayPublisher: Choose project widget is more clear [\#2859](https://github.com/pypeclub/OpenPype/pull/2859)
- New: Validation exceptions [\#2841](https://github.com/pypeclub/OpenPype/pull/2841)
- Maya: add loaded containers to published instance [\#2837](https://github.com/pypeclub/OpenPype/pull/2837)
- Ftrack: Can sync fps as string [\#2836](https://github.com/pypeclub/OpenPype/pull/2836)
- General: Custom function for find executable [\#2822](https://github.com/pypeclub/OpenPype/pull/2822)
- General: Color dialog UI fixes [\#2817](https://github.com/pypeclub/OpenPype/pull/2817)
- global: letter box calculated on output as last process [\#2812](https://github.com/pypeclub/OpenPype/pull/2812)
- Nuke: adding Reformat to baking mov plugin [\#2811](https://github.com/pypeclub/OpenPype/pull/2811)
- Manager: Update all to latest button [\#2805](https://github.com/pypeclub/OpenPype/pull/2805)
**🐛 Bug fixes**
- General: Missing time function [\#2877](https://github.com/pypeclub/OpenPype/pull/2877)
- Deadline: Fix plugin name for tile assemble [\#2868](https://github.com/pypeclub/OpenPype/pull/2868)
- Nuke: gizmo precollect fix [\#2866](https://github.com/pypeclub/OpenPype/pull/2866)
- General: Fix hardlink for windows [\#2864](https://github.com/pypeclub/OpenPype/pull/2864)
- General: ffmpeg was crashing on slate merge [\#2860](https://github.com/pypeclub/OpenPype/pull/2860)
- WebPublisher: Video file was published with one too many frame [\#2858](https://github.com/pypeclub/OpenPype/pull/2858)
- New Publisher: Error dialog got right styles [\#2857](https://github.com/pypeclub/OpenPype/pull/2857)
- General: Fix getattr clalback on dynamic modules [\#2855](https://github.com/pypeclub/OpenPype/pull/2855)
- Nuke: slate resolution to input video resolution [\#2853](https://github.com/pypeclub/OpenPype/pull/2853)
- WebPublisher: Fix username stored in DB [\#2852](https://github.com/pypeclub/OpenPype/pull/2852)
- WebPublisher: Fix wrong number of frames for video file [\#2851](https://github.com/pypeclub/OpenPype/pull/2851)
- Nuke: Fix family test in validate\_write\_legacy to work with stillImage [\#2847](https://github.com/pypeclub/OpenPype/pull/2847)
- Nuke: fix multiple baking profile farm publishing [\#2842](https://github.com/pypeclub/OpenPype/pull/2842)
- Blender: Fixed parameters for FBX export of the camera [\#2840](https://github.com/pypeclub/OpenPype/pull/2840)
- Maya: Stop creation of reviews for Cryptomattes [\#2832](https://github.com/pypeclub/OpenPype/pull/2832)
- Deadline: Remove recreated event [\#2828](https://github.com/pypeclub/OpenPype/pull/2828)
- Deadline: Added missing events folder [\#2827](https://github.com/pypeclub/OpenPype/pull/2827)
- Maya: Deformer node ids validation plugin [\#2826](https://github.com/pypeclub/OpenPype/pull/2826)
- Settings: Missing document with OP versions may break start of OpenPype [\#2825](https://github.com/pypeclub/OpenPype/pull/2825)
- Deadline: more detailed temp file name for environment json [\#2824](https://github.com/pypeclub/OpenPype/pull/2824)
- General: Host name was formed from obsolete code [\#2821](https://github.com/pypeclub/OpenPype/pull/2821)
- Settings UI: Fix "Apply from" action [\#2820](https://github.com/pypeclub/OpenPype/pull/2820)
- Ftrack: Job killer with missing user [\#2819](https://github.com/pypeclub/OpenPype/pull/2819)
- Nuke: Use AVALON\_APP to get value for "app" key [\#2818](https://github.com/pypeclub/OpenPype/pull/2818)
- StandalonePublisher: use dynamic groups in subset names [\#2816](https://github.com/pypeclub/OpenPype/pull/2816)
**🔀 Refactored code**
- Refactor: move webserver tool to openpype [\#2876](https://github.com/pypeclub/OpenPype/pull/2876)
- General: Move create logic from avalon to OpenPype [\#2854](https://github.com/pypeclub/OpenPype/pull/2854)
- General: Add vendors from avalon [\#2848](https://github.com/pypeclub/OpenPype/pull/2848)
- General: Basic event system [\#2846](https://github.com/pypeclub/OpenPype/pull/2846)
- General: Move change context functions [\#2839](https://github.com/pypeclub/OpenPype/pull/2839)
- Tools: Don't use avalon tools code [\#2829](https://github.com/pypeclub/OpenPype/pull/2829)
- Move Unreal Implementation to OpenPype [\#2823](https://github.com/pypeclub/OpenPype/pull/2823)
## [3.8.2](https://github.com/pypeclub/OpenPype/tree/3.8.2) (2022-02-07)
[Full Changelog](https://github.com/pypeclub/OpenPype/compare/CI/3.8.2-nightly.3...3.8.2)

View file

@ -1,4 +1,7 @@
<!-- ALL-CONTRIBUTORS-BADGE:START - Do not remove or modify this section -->
[![All Contributors](https://img.shields.io/badge/all_contributors-26-orange.svg?style=flat-square)](#contributors-)
<!-- ALL-CONTRIBUTORS-BADGE:END -->
OpenPype
====
@ -283,3 +286,54 @@ Running tests
To run tests, execute `.\tools\run_tests(.ps1|.sh)`.
**Note that it needs existing virtual environment.**
## Contributors ✨
Thanks goes to these wonderful people ([emoji key](https://allcontributors.org/docs/en/emoji-key)):
<!-- ALL-CONTRIBUTORS-LIST:START - Do not remove or modify this section -->
<!-- prettier-ignore-start -->
<!-- markdownlint-disable -->
<table>
<tr>
<td align="center"><a href="http://pype.club/"><img src="https://avatars.githubusercontent.com/u/3333008?v=4?s=100" width="100px;" alt=""/><br /><sub><b>Milan Kolar</b></sub></a><br /><a href="https://github.com/pypeclub/OpenPype/commits?author=mkolar" title="Code">💻</a> <a href="https://github.com/pypeclub/OpenPype/commits?author=mkolar" title="Documentation">📖</a> <a href="#infra-mkolar" title="Infrastructure (Hosting, Build-Tools, etc)">🚇</a> <a href="#business-mkolar" title="Business development">💼</a> <a href="#content-mkolar" title="Content">🖋</a> <a href="#fundingFinding-mkolar" title="Funding Finding">🔍</a> <a href="#maintenance-mkolar" title="Maintenance">🚧</a> <a href="#projectManagement-mkolar" title="Project Management">📆</a> <a href="https://github.com/pypeclub/OpenPype/pulls?q=is%3Apr+reviewed-by%3Amkolar" title="Reviewed Pull Requests">👀</a> <a href="#mentoring-mkolar" title="Mentoring">🧑‍🏫</a> <a href="#question-mkolar" title="Answering Questions">💬</a></td>
<td align="center"><a href="https://www.linkedin.com/in/jakubjezek79"><img src="https://avatars.githubusercontent.com/u/40640033?v=4?s=100" width="100px;" alt=""/><br /><sub><b>Jakub Ježek</b></sub></a><br /><a href="https://github.com/pypeclub/OpenPype/commits?author=jakubjezek001" title="Code">💻</a> <a href="https://github.com/pypeclub/OpenPype/commits?author=jakubjezek001" title="Documentation">📖</a> <a href="#infra-jakubjezek001" title="Infrastructure (Hosting, Build-Tools, etc)">🚇</a> <a href="#content-jakubjezek001" title="Content">🖋</a> <a href="https://github.com/pypeclub/OpenPype/pulls?q=is%3Apr+reviewed-by%3Ajakubjezek001" title="Reviewed Pull Requests">👀</a> <a href="#maintenance-jakubjezek001" title="Maintenance">🚧</a> <a href="#mentoring-jakubjezek001" title="Mentoring">🧑‍🏫</a> <a href="#projectManagement-jakubjezek001" title="Project Management">📆</a> <a href="#question-jakubjezek001" title="Answering Questions">💬</a></td>
<td align="center"><a href="https://github.com/antirotor"><img src="https://avatars.githubusercontent.com/u/33513211?v=4?s=100" width="100px;" alt=""/><br /><sub><b>Ondřej Samohel</b></sub></a><br /><a href="https://github.com/pypeclub/OpenPype/commits?author=antirotor" title="Code">💻</a> <a href="https://github.com/pypeclub/OpenPype/commits?author=antirotor" title="Documentation">📖</a> <a href="#infra-antirotor" title="Infrastructure (Hosting, Build-Tools, etc)">🚇</a> <a href="#content-antirotor" title="Content">🖋</a> <a href="https://github.com/pypeclub/OpenPype/pulls?q=is%3Apr+reviewed-by%3Aantirotor" title="Reviewed Pull Requests">👀</a> <a href="#maintenance-antirotor" title="Maintenance">🚧</a> <a href="#mentoring-antirotor" title="Mentoring">🧑‍🏫</a> <a href="#projectManagement-antirotor" title="Project Management">📆</a> <a href="#question-antirotor" title="Answering Questions">💬</a></td>
<td align="center"><a href="https://github.com/iLLiCiTiT"><img src="https://avatars.githubusercontent.com/u/43494761?v=4?s=100" width="100px;" alt=""/><br /><sub><b>Jakub Trllo</b></sub></a><br /><a href="https://github.com/pypeclub/OpenPype/commits?author=iLLiCiTiT" title="Code">💻</a> <a href="https://github.com/pypeclub/OpenPype/commits?author=iLLiCiTiT" title="Documentation">📖</a> <a href="#infra-iLLiCiTiT" title="Infrastructure (Hosting, Build-Tools, etc)">🚇</a> <a href="https://github.com/pypeclub/OpenPype/pulls?q=is%3Apr+reviewed-by%3AiLLiCiTiT" title="Reviewed Pull Requests">👀</a> <a href="#maintenance-iLLiCiTiT" title="Maintenance">🚧</a> <a href="#question-iLLiCiTiT" title="Answering Questions">💬</a></td>
<td align="center"><a href="https://github.com/kalisp"><img src="https://avatars.githubusercontent.com/u/4457962?v=4?s=100" width="100px;" alt=""/><br /><sub><b>Petr Kalis</b></sub></a><br /><a href="https://github.com/pypeclub/OpenPype/commits?author=kalisp" title="Code">💻</a> <a href="https://github.com/pypeclub/OpenPype/commits?author=kalisp" title="Documentation">📖</a> <a href="#infra-kalisp" title="Infrastructure (Hosting, Build-Tools, etc)">🚇</a> <a href="https://github.com/pypeclub/OpenPype/pulls?q=is%3Apr+reviewed-by%3Akalisp" title="Reviewed Pull Requests">👀</a> <a href="#maintenance-kalisp" title="Maintenance">🚧</a> <a href="#question-kalisp" title="Answering Questions">💬</a></td>
<td align="center"><a href="https://github.com/64qam"><img src="https://avatars.githubusercontent.com/u/26925793?v=4?s=100" width="100px;" alt=""/><br /><sub><b>64qam</b></sub></a><br /><a href="https://github.com/pypeclub/OpenPype/commits?author=64qam" title="Code">💻</a> <a href="https://github.com/pypeclub/OpenPype/pulls?q=is%3Apr+reviewed-by%3A64qam" title="Reviewed Pull Requests">👀</a> <a href="https://github.com/pypeclub/OpenPype/commits?author=64qam" title="Documentation">📖</a> <a href="#infra-64qam" title="Infrastructure (Hosting, Build-Tools, etc)">🚇</a> <a href="#projectManagement-64qam" title="Project Management">📆</a> <a href="#maintenance-64qam" title="Maintenance">🚧</a> <a href="#content-64qam" title="Content">🖋</a> <a href="#userTesting-64qam" title="User Testing">📓</a></td>
<td align="center"><a href="http://www.colorbleed.nl/"><img src="https://avatars.githubusercontent.com/u/2439881?v=4?s=100" width="100px;" alt=""/><br /><sub><b>Roy Nieterau</b></sub></a><br /><a href="https://github.com/pypeclub/OpenPype/commits?author=BigRoy" title="Code">💻</a> <a href="https://github.com/pypeclub/OpenPype/commits?author=BigRoy" title="Documentation">📖</a> <a href="https://github.com/pypeclub/OpenPype/pulls?q=is%3Apr+reviewed-by%3ABigRoy" title="Reviewed Pull Requests">👀</a> <a href="#mentoring-BigRoy" title="Mentoring">🧑‍🏫</a> <a href="#question-BigRoy" title="Answering Questions">💬</a></td>
</tr>
<tr>
<td align="center"><a href="https://github.com/tokejepsen"><img src="https://avatars.githubusercontent.com/u/1860085?v=4?s=100" width="100px;" alt=""/><br /><sub><b>Toke Jepsen</b></sub></a><br /><a href="https://github.com/pypeclub/OpenPype/commits?author=tokejepsen" title="Code">💻</a> <a href="https://github.com/pypeclub/OpenPype/commits?author=tokejepsen" title="Documentation">📖</a> <a href="https://github.com/pypeclub/OpenPype/pulls?q=is%3Apr+reviewed-by%3Atokejepsen" title="Reviewed Pull Requests">👀</a> <a href="#mentoring-tokejepsen" title="Mentoring">🧑‍🏫</a> <a href="#question-tokejepsen" title="Answering Questions">💬</a></td>
<td align="center"><a href="https://github.com/jrsndl"><img src="https://avatars.githubusercontent.com/u/45896205?v=4?s=100" width="100px;" alt=""/><br /><sub><b>Jiri Sindelar</b></sub></a><br /><a href="https://github.com/pypeclub/OpenPype/commits?author=jrsndl" title="Code">💻</a> <a href="https://github.com/pypeclub/OpenPype/pulls?q=is%3Apr+reviewed-by%3Ajrsndl" title="Reviewed Pull Requests">👀</a> <a href="https://github.com/pypeclub/OpenPype/commits?author=jrsndl" title="Documentation">📖</a> <a href="#content-jrsndl" title="Content">🖋</a> <a href="#tutorial-jrsndl" title="Tutorials"></a> <a href="#userTesting-jrsndl" title="User Testing">📓</a></td>
<td align="center"><a href="https://barbierisimone.com/"><img src="https://avatars.githubusercontent.com/u/1087869?v=4?s=100" width="100px;" alt=""/><br /><sub><b>Simone Barbieri</b></sub></a><br /><a href="https://github.com/pypeclub/OpenPype/commits?author=simonebarbieri" title="Code">💻</a> <a href="https://github.com/pypeclub/OpenPype/commits?author=simonebarbieri" title="Documentation">📖</a></td>
<td align="center"><a href="http://karimmozilla.xyz/"><img src="https://avatars.githubusercontent.com/u/82811760?v=4?s=100" width="100px;" alt=""/><br /><sub><b>karimmozilla</b></sub></a><br /><a href="https://github.com/pypeclub/OpenPype/commits?author=karimmozilla" title="Code">💻</a></td>
<td align="center"><a href="https://github.com/Allan-I"><img src="https://avatars.githubusercontent.com/u/76656700?v=4?s=100" width="100px;" alt=""/><br /><sub><b>Allan I. A.</b></sub></a><br /><a href="https://github.com/pypeclub/OpenPype/commits?author=Allan-I" title="Code">💻</a></td>
<td align="center"><a href="https://www.linkedin.com/in/mmuurrpphhyy/"><img src="https://avatars.githubusercontent.com/u/352795?v=4?s=100" width="100px;" alt=""/><br /><sub><b>murphy</b></sub></a><br /><a href="https://github.com/pypeclub/OpenPype/commits?author=m-u-r-p-h-y" title="Code">💻</a> <a href="https://github.com/pypeclub/OpenPype/pulls?q=is%3Apr+reviewed-by%3Am-u-r-p-h-y" title="Reviewed Pull Requests">👀</a> <a href="#userTesting-m-u-r-p-h-y" title="User Testing">📓</a> <a href="https://github.com/pypeclub/OpenPype/commits?author=m-u-r-p-h-y" title="Documentation">📖</a> <a href="#projectManagement-m-u-r-p-h-y" title="Project Management">📆</a></td>
<td align="center"><a href="https://github.com/aardschok"><img src="https://avatars.githubusercontent.com/u/26920875?v=4?s=100" width="100px;" alt=""/><br /><sub><b>Wijnand Koreman</b></sub></a><br /><a href="https://github.com/pypeclub/OpenPype/commits?author=aardschok" title="Code">💻</a></td>
</tr>
<tr>
<td align="center"><a href="http://jedimaster.cnblogs.com/"><img src="https://avatars.githubusercontent.com/u/1798206?v=4?s=100" width="100px;" alt=""/><br /><sub><b>Bo Zhou</b></sub></a><br /><a href="https://github.com/pypeclub/OpenPype/commits?author=zhoub" title="Code">💻</a></td>
<td align="center"><a href="https://www.linkedin.com/in/clementhector/"><img src="https://avatars.githubusercontent.com/u/7068597?v=4?s=100" width="100px;" alt=""/><br /><sub><b>Clément Hector</b></sub></a><br /><a href="https://github.com/pypeclub/OpenPype/commits?author=ClementHector" title="Code">💻</a> <a href="https://github.com/pypeclub/OpenPype/pulls?q=is%3Apr+reviewed-by%3AClementHector" title="Reviewed Pull Requests">👀</a></td>
<td align="center"><a href="https://twitter.com/davidlatwe"><img src="https://avatars.githubusercontent.com/u/3357009?v=4?s=100" width="100px;" alt=""/><br /><sub><b>David Lai</b></sub></a><br /><a href="https://github.com/pypeclub/OpenPype/commits?author=davidlatwe" title="Code">💻</a> <a href="https://github.com/pypeclub/OpenPype/pulls?q=is%3Apr+reviewed-by%3Adavidlatwe" title="Reviewed Pull Requests">👀</a></td>
<td align="center"><a href="https://github.com/2-REC"><img src="https://avatars.githubusercontent.com/u/42170307?v=4?s=100" width="100px;" alt=""/><br /><sub><b>Derek </b></sub></a><br /><a href="https://github.com/pypeclub/OpenPype/commits?author=2-REC" title="Code">💻</a> <a href="https://github.com/pypeclub/OpenPype/commits?author=2-REC" title="Documentation">📖</a></td>
<td align="center"><a href="https://github.com/gabormarinov"><img src="https://avatars.githubusercontent.com/u/8620515?v=4?s=100" width="100px;" alt=""/><br /><sub><b>Gábor Marinov</b></sub></a><br /><a href="https://github.com/pypeclub/OpenPype/commits?author=gabormarinov" title="Code">💻</a> <a href="https://github.com/pypeclub/OpenPype/commits?author=gabormarinov" title="Documentation">📖</a></td>
<td align="center"><a href="https://github.com/icyvapor"><img src="https://avatars.githubusercontent.com/u/1195278?v=4?s=100" width="100px;" alt=""/><br /><sub><b>icyvapor</b></sub></a><br /><a href="https://github.com/pypeclub/OpenPype/commits?author=icyvapor" title="Code">💻</a> <a href="https://github.com/pypeclub/OpenPype/commits?author=icyvapor" title="Documentation">📖</a></td>
<td align="center"><a href="https://github.com/jlorrain"><img src="https://avatars.githubusercontent.com/u/7955673?v=4?s=100" width="100px;" alt=""/><br /><sub><b>Jérôme LORRAIN</b></sub></a><br /><a href="https://github.com/pypeclub/OpenPype/commits?author=jlorrain" title="Code">💻</a></td>
</tr>
<tr>
<td align="center"><a href="https://github.com/dmo-j-cube"><img src="https://avatars.githubusercontent.com/u/89823400?v=4?s=100" width="100px;" alt=""/><br /><sub><b>David Morris-Oliveros</b></sub></a><br /><a href="https://github.com/pypeclub/OpenPype/commits?author=dmo-j-cube" title="Code">💻</a></td>
<td align="center"><a href="https://github.com/BenoitConnan"><img src="https://avatars.githubusercontent.com/u/82808268?v=4?s=100" width="100px;" alt=""/><br /><sub><b>BenoitConnan</b></sub></a><br /><a href="https://github.com/pypeclub/OpenPype/commits?author=BenoitConnan" title="Code">💻</a></td>
<td align="center"><a href="https://github.com/Malthaldar"><img src="https://avatars.githubusercontent.com/u/33671694?v=4?s=100" width="100px;" alt=""/><br /><sub><b>Malthaldar</b></sub></a><br /><a href="https://github.com/pypeclub/OpenPype/commits?author=Malthaldar" title="Code">💻</a></td>
<td align="center"><a href="http://www.svenneve.com/"><img src="https://avatars.githubusercontent.com/u/2472863?v=4?s=100" width="100px;" alt=""/><br /><sub><b>Sven Neve</b></sub></a><br /><a href="https://github.com/pypeclub/OpenPype/commits?author=svenneve" title="Code">💻</a></td>
<td align="center"><a href="https://github.com/zafrs"><img src="https://avatars.githubusercontent.com/u/26890002?v=4?s=100" width="100px;" alt=""/><br /><sub><b>zafrs</b></sub></a><br /><a href="https://github.com/pypeclub/OpenPype/commits?author=zafrs" title="Code">💻</a></td>
</tr>
</table>
<!-- markdownlint-restore -->
<!-- prettier-ignore-end -->
<!-- ALL-CONTRIBUTORS-LIST:END -->
This project follows the [all-contributors](https://github.com/all-contributors/all-contributors) specification. Contributions of any kind welcome!

View file

@ -1,155 +1,5 @@
# -*- coding: utf-8 -*-
"""Pype module."""
import os
import platform
import functools
import logging
from .settings import get_project_settings
from .lib import (
Anatomy,
filter_pyblish_plugins,
set_plugin_attributes_from_settings,
change_timer_to_current_context,
register_event_callback,
)
pyblish = avalon = _original_discover = None
log = logging.getLogger(__name__)
PACKAGE_DIR = os.path.dirname(os.path.abspath(__file__))
PLUGINS_DIR = os.path.join(PACKAGE_DIR, "plugins")
# Global plugin paths
PUBLISH_PATH = os.path.join(PLUGINS_DIR, "publish")
LOAD_PATH = os.path.join(PLUGINS_DIR, "load")
def import_wrapper(func):
"""Wrap module imports to specific functions."""
@functools.wraps(func)
def decorated(*args, **kwargs):
global pyblish
global avalon
global _original_discover
if pyblish is None:
from pyblish import api as pyblish
from avalon import api as avalon
# we are monkey patching `avalon.api.discover()` to allow us to
# load plugin presets on plugins being discovered by avalon.
# Little bit of hacking, but it allows us to add out own features
# without need to modify upstream code.
_original_discover = avalon.discover
return func(*args, **kwargs)
return decorated
@import_wrapper
def patched_discover(superclass):
"""Patch `avalon.api.discover()`.
Monkey patched version of :func:`avalon.api.discover()`. It allows
us to load presets on plugins being discovered.
"""
# run original discover and get plugins
plugins = _original_discover(superclass)
filtered_plugins = [
plugin
for plugin in plugins
if issubclass(plugin, superclass)
]
set_plugin_attributes_from_settings(filtered_plugins, superclass)
return filtered_plugins
@import_wrapper
def install():
"""Install Pype to Avalon."""
from pyblish.lib import MessageHandler
from openpype.modules import load_modules
from openpype.pipeline import (
LegacyCreator,
register_loader_plugin_path,
register_inventory_action,
)
from avalon import pipeline
# Make sure modules are loaded
load_modules()
def modified_emit(obj, record):
"""Method replacing `emit` in Pyblish's MessageHandler."""
record.msg = record.getMessage()
obj.records.append(record)
MessageHandler.emit = modified_emit
log.info("Registering global plug-ins..")
pyblish.register_plugin_path(PUBLISH_PATH)
pyblish.register_discovery_filter(filter_pyblish_plugins)
register_loader_plugin_path(LOAD_PATH)
project_name = os.environ.get("AVALON_PROJECT")
# Register studio specific plugins
if project_name:
anatomy = Anatomy(project_name)
anatomy.set_root_environments()
avalon.register_root(anatomy.roots)
project_settings = get_project_settings(project_name)
platform_name = platform.system().lower()
project_plugins = (
project_settings
.get("global", {})
.get("project_plugins", {})
.get(platform_name)
) or []
for path in project_plugins:
try:
path = str(path.format(**os.environ))
except KeyError:
pass
if not path or not os.path.exists(path):
continue
pyblish.register_plugin_path(path)
register_loader_plugin_path(path)
avalon.register_plugin_path(LegacyCreator, path)
register_inventory_action(path)
# apply monkey patched discover to original one
log.info("Patching discovery")
avalon.discover = patched_discover
pipeline.discover = patched_discover
register_event_callback("taskChanged", _on_task_change)
def _on_task_change():
change_timer_to_current_context()
@import_wrapper
def uninstall():
"""Uninstall Pype from Avalon."""
from openpype.pipeline import deregister_loader_plugin_path
log.info("Deregistering global plug-ins..")
pyblish.deregister_plugin_path(PUBLISH_PATH)
pyblish.deregister_discovery_filter(filter_pyblish_plugins)
deregister_loader_plugin_path(LOAD_PATH)
log.info("Global plug-ins unregistred")
# restore original discover
avalon.discover = _original_discover

View file

@ -1,35 +0,0 @@
import os
from openpype.lib import PreLaunchHook
class PrePython2Vendor(PreLaunchHook):
"""Prepend python 2 dependencies for py2 hosts."""
order = 10
def execute(self):
if not self.application.use_python_2:
return
# Prepare vendor dir path
self.log.info("adding global python 2 vendor")
pype_root = os.getenv("OPENPYPE_REPOS_ROOT")
python_2_vendor = os.path.join(
pype_root,
"openpype",
"vendor",
"python",
"python_2"
)
# Add Python 2 modules
python_paths = [
python_2_vendor
]
# Load PYTHONPATH from current launch context
python_path = self.launch_context.env.get("PYTHONPATH")
if python_path:
python_paths.append(python_path)
# Set new PYTHONPATH to launch context environments
self.launch_context.env["PYTHONPATH"] = os.pathsep.join(python_paths)

View file

@ -1,5 +1,5 @@
<?xml version="1.0" encoding="UTF-8"?>
<ExtensionManifest Version="8.0" ExtensionBundleId="com.openpype.AE.panel" ExtensionBundleVersion="1.0.22"
<ExtensionManifest Version="8.0" ExtensionBundleId="com.openpype.AE.panel" ExtensionBundleVersion="1.0.23"
ExtensionBundleName="openpype" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">
<ExtensionList>
<Extension Id="com.openpype.AE.panel" Version="1.0" />

View file

@ -417,7 +417,9 @@ function getRenderInfo(){
var file_url = item.file.toString();
return JSON.stringify({
"file_name": file_url
"file_name": file_url,
"width": render_item.comp.width,
"height": render_item.comp.height
})
}

View file

@ -6,6 +6,7 @@ import logging
from Qt import QtWidgets
from openpype.pipeline import install_host
from openpype.lib.remote_publish import headless_publish
from openpype.tools.utils import host_tools
@ -22,10 +23,9 @@ def safe_excepthook(*args):
def main(*subprocess_args):
sys.excepthook = safe_excepthook
import avalon.api
from openpype.hosts.aftereffects import api
avalon.api.install(api)
install_host(api)
os.environ["OPENPYPE_LOG_NO_COLORS"] = "False"
app = QtWidgets.QApplication([])

View file

@ -4,17 +4,17 @@ import sys
from Qt import QtWidgets
import pyblish.api
import avalon.api
from avalon import io
from openpype import lib
from openpype.api import Logger
from openpype.pipeline import (
LegacyCreator,
BaseCreator,
register_loader_plugin_path,
register_creator_plugin_path,
deregister_loader_plugin_path,
deregister_creator_plugin_path,
AVALON_CONTAINER_ID,
registered_host,
)
import openpype.hosts.aftereffects
from openpype.lib import register_event_callback
@ -40,8 +40,7 @@ def install():
pyblish.api.register_plugin_path(PUBLISH_PATH)
register_loader_plugin_path(LOAD_PATH)
avalon.api.register_plugin_path(LegacyCreator, CREATE_PATH)
avalon.api.register_plugin_path(BaseCreator, CREATE_PATH)
register_creator_plugin_path(CREATE_PATH)
log.info(PUBLISH_PATH)
pyblish.api.register_callback(
@ -54,7 +53,7 @@ def install():
def uninstall():
pyblish.api.deregister_plugin_path(PUBLISH_PATH)
deregister_loader_plugin_path(LOAD_PATH)
avalon.api.deregister_plugin_path(LegacyCreator, CREATE_PATH)
deregister_creator_plugin_path(CREATE_PATH)
def application_launch():

View file

@ -29,6 +29,8 @@ class AEItem(object):
frameRate = attr.ib(default=None)
file_name = attr.ib(default=None)
instance_id = attr.ib(default=None) # New Publisher
width = attr.ib(default=None)
height = attr.ib(default=None)
class AfterEffectsServerStub():
@ -609,7 +611,9 @@ class AfterEffectsServerStub():
d.get('workAreaDuration'),
d.get('frameRate'),
d.get('file_name'),
d.get("instance_id"))
d.get("instance_id"),
d.get("width"),
d.get("height"))
ret.append(item)
return ret

View file

@ -16,7 +16,7 @@ class RenderCreator(Creator):
family = "render"
description = "Render creator"
create_allow_context_change = False
create_allow_context_change = True
def __init__(
self, create_context, system_settings, project_settings, headless=False

View file

@ -2,7 +2,6 @@ import os
import re
import tempfile
import attr
from copy import deepcopy
import pyblish.api
@ -23,11 +22,12 @@ class AERenderInstance(RenderInstance):
stagingDir = attr.ib(default=None)
app_version = attr.ib(default=None)
publish_attributes = attr.ib(default=None)
file_name = attr.ib(default=None)
class CollectAERender(abstract_collect_render.AbstractCollectRender):
order = pyblish.api.CollectorOrder + 0.498
order = pyblish.api.CollectorOrder + 0.405
label = "Collect After Effects Render Layers"
hosts = ["aftereffects"]
@ -64,8 +64,6 @@ class CollectAERender(abstract_collect_render.AbstractCollectRender):
if family not in ["render", "renderLocal"]: # legacy
continue
asset_entity = inst.data["assetEntity"]
item_id = inst.data["members"][0]
work_area_info = CollectAERender.get_stub().get_work_area(
@ -84,8 +82,11 @@ class CollectAERender(abstract_collect_render.AbstractCollectRender):
fps = work_area_info.frameRate
# TODO add resolution when supported by extension
task_name = (inst.data.get("task") or
list(asset_entity["data"]["tasks"].keys())[0]) # lega
task_name = inst.data.get("task") # legacy
render_q = CollectAERender.get_stub().get_render_info()
if not render_q:
raise ValueError("No file extension set in Render Queue")
subset_name = inst.data["subset"]
instance = AERenderInstance(
@ -103,12 +104,8 @@ class CollectAERender(abstract_collect_render.AbstractCollectRender):
publish=True,
renderer='aerender',
name=subset_name,
resolutionWidth=asset_entity["data"].get(
"resolutionWidth",
project_entity["data"]["resolutionWidth"]),
resolutionHeight=asset_entity["data"].get(
"resolutionHeight",
project_entity["data"]["resolutionHeight"]),
resolutionWidth=render_q.width,
resolutionHeight=render_q.height,
pixelAspect=1,
tileRendering=False,
tilesX=0,
@ -119,8 +116,8 @@ class CollectAERender(abstract_collect_render.AbstractCollectRender):
toBeRenderedOn='deadline',
fps=fps,
app_version=app_version,
anatomyData=deepcopy(inst.data["anatomyData"]),
publish_attributes=inst.data.get("publish_attributes")
publish_attributes=inst.data.get("publish_attributes"),
file_name=render_q.file_name
)
comp = compositions_by_id.get(int(item_id))
@ -165,15 +162,11 @@ class CollectAERender(abstract_collect_render.AbstractCollectRender):
start = render_instance.frameStart
end = render_instance.frameEnd
# pull file name from Render Queue Output module
render_q = CollectAERender.get_stub().get_render_info()
if not render_q:
raise ValueError("No file extension set in Render Queue")
_, ext = os.path.splitext(os.path.basename(render_q.file_name))
_, ext = os.path.splitext(os.path.basename(render_instance.file_name))
base_dir = self._get_output_dir(render_instance)
expected_files = []
if "#" not in render_q.file_name: # single frame (mov)W
if "#" not in render_instance.file_name: # single frame (mov)W
path = os.path.join(base_dir, "{}_{}_{}.{}".format(
render_instance.asset,
render_instance.subset,
@ -216,8 +209,6 @@ class CollectAERender(abstract_collect_render.AbstractCollectRender):
def _update_for_local(self, instance, project_entity):
"""Update old saved instances to current publishing format"""
instance.anatomyData["version"] = instance.version
instance.anatomyData["subset"] = instance.subset
instance.stagingDir = tempfile.mkdtemp()
instance.projectEntity = project_entity
fam = "render.local"

View file

@ -1,6 +1,7 @@
import os
from avalon import api
import pyblish.api
from openpype.lib import get_subset_name_with_asset_doc
class CollectWorkfile(pyblish.api.ContextPlugin):
@ -33,19 +34,20 @@ class CollectWorkfile(pyblish.api.ContextPlugin):
"stagingDir": staging_dir,
}
if not instance.data.get("representations"):
instance.data["representations"] = []
instance.data["representations"].append(representation)
instance.data["publish"] = instance.data["active"] # for DL
def _get_new_instance(self, context, scene_file):
task = api.Session["AVALON_TASK"]
version = context.data["version"]
asset_entity = context.data["assetEntity"]
project_entity = context.data["projectEntity"]
# workfile instance
family = "workfile"
subset = family + task.capitalize() # TOOD use method
instance_data = {
"active": True,
"asset": asset_entity["name"],
"task": task,
"frameStart": asset_entity["data"]["frameStart"],
@ -61,16 +63,31 @@ class CollectWorkfile(pyblish.api.ContextPlugin):
project_entity["data"]["resolutionHeight"]),
"pixelAspect": 1,
"step": 1,
"version": version,
"version": version
}
# workfile instance
family = "workfile"
subset = get_subset_name_with_asset_doc(
family,
"",
context.data["anatomyData"]["task"]["name"],
context.data["assetEntity"],
context.data["anatomyData"]["project"]["name"],
host_name=context.data["hostName"]
)
# Create instance
instance = context.create_instance(subset)
# creating instance data
instance.data.update({
"subset": subset,
"label": scene_file,
"family": family,
"families": [family],
"representations": list()
}
})
# Create instance
instance = context.create_instance(subset)
instance.data.update(instance_data)
return instance

View file

@ -12,6 +12,8 @@ One of the settings in a scene doesn't match to asset settings in database.
### How to repair?
Change values for {invalid_keys_str} in the scene OR change them in the asset database if they are wrong there.
In the scene it is right mouse click on published composition > `Composition Settings`.
</description>
<detail>
### __Detailed Info__ (optional)

View file

@ -29,12 +29,12 @@ def add_implementation_envs(env, _app):
env.get("OPENPYPE_BLENDER_USER_SCRIPTS") or ""
)
for path in openpype_blender_user_scripts.split(os.pathsep):
if path and os.path.exists(path):
if path:
previous_user_scripts.add(os.path.normpath(path))
blender_user_scripts = env.get("BLENDER_USER_SCRIPTS") or ""
for path in blender_user_scripts.split(os.pathsep):
if path and os.path.exists(path):
if path:
previous_user_scripts.add(os.path.normpath(path))
# Remove implementation path from user script paths as is set to

View file

@ -14,10 +14,12 @@ import avalon.api
from avalon import io, schema
from openpype.pipeline import (
LegacyCreator,
register_loader_plugin_path,
register_creator_plugin_path,
deregister_loader_plugin_path,
deregister_creator_plugin_path,
AVALON_CONTAINER_ID,
uninstall_host,
)
from openpype.api import Logger
from openpype.lib import (
@ -54,7 +56,7 @@ def install():
pyblish.api.register_plugin_path(str(PUBLISH_PATH))
register_loader_plugin_path(str(LOAD_PATH))
avalon.api.register_plugin_path(LegacyCreator, str(CREATE_PATH))
register_creator_plugin_path(str(CREATE_PATH))
lib.append_user_scripts()
@ -76,7 +78,7 @@ def uninstall():
pyblish.api.deregister_plugin_path(str(PUBLISH_PATH))
deregister_loader_plugin_path(str(LOAD_PATH))
avalon.api.deregister_plugin_path(LegacyCreator, str(CREATE_PATH))
deregister_creator_plugin_path(str(CREATE_PATH))
if not IS_HEADLESS:
ops.unregister()
@ -208,11 +210,10 @@ def reload_pipeline(*args):
"""
avalon.api.uninstall()
uninstall_host()
for module in (
"avalon.io",
"avalon.lib",
"avalon.pipeline",
"avalon.api",
):

View file

@ -1,4 +1,4 @@
from avalon import pipeline
from openpype.pipeline import install_host
from openpype.hosts.blender import api
pipeline.install(api)
install_host(api)

View file

@ -3,8 +3,6 @@ import sys
import copy
import argparse
from avalon import io
import pyblish.api
import pyblish.util
@ -13,6 +11,8 @@ import openpype
import openpype.hosts.celaction
from openpype.hosts.celaction import api as celaction
from openpype.tools.utils import host_tools
from openpype.pipeline import install_openpype_plugins
log = Logger().get_logger("Celaction_cli_publisher")
@ -21,9 +21,6 @@ publish_host = "celaction"
HOST_DIR = os.path.dirname(os.path.abspath(openpype.hosts.celaction.__file__))
PLUGINS_DIR = os.path.join(HOST_DIR, "plugins")
PUBLISH_PATH = os.path.join(PLUGINS_DIR, "publish")
LOAD_PATH = os.path.join(PLUGINS_DIR, "load")
CREATE_PATH = os.path.join(PLUGINS_DIR, "create")
INVENTORY_PATH = os.path.join(PLUGINS_DIR, "inventory")
def cli():
@ -74,7 +71,7 @@ def main():
_prepare_publish_environments()
# Registers pype's Global pyblish plugins
openpype.install()
install_openpype_plugins()
if os.path.exists(PUBLISH_PATH):
log.info(f"Registering path: {PUBLISH_PATH}")

View file

@ -11,10 +11,8 @@ from .constants import (
from .lib import (
CTX,
FlameAppFramework,
get_project_manager,
get_current_project,
get_current_sequence,
create_bin,
create_segment_data_marker,
get_segment_data_marker,
set_segment_data_marker,
@ -29,7 +27,10 @@ from .lib import (
get_frame_from_filename,
get_padding_from_filename,
maintained_object_duplication,
get_clip_segment
maintained_temp_file_path,
get_clip_segment,
get_batch_group_from_desktop,
MediaInfoFile
)
from .utils import (
setup,
@ -56,7 +57,6 @@ from .plugin import (
PublishableClip,
ClipLoader,
OpenClipSolver
)
from .workio import (
open_file,
@ -71,6 +71,10 @@ from .render_utils import (
get_preset_path_by_xml_name,
modify_preset_file
)
from .batch_utils import (
create_batch_group,
create_batch_group_conent
)
__all__ = [
# constants
@ -83,10 +87,8 @@ __all__ = [
# lib
"CTX",
"FlameAppFramework",
"get_project_manager",
"get_current_project",
"get_current_sequence",
"create_bin",
"create_segment_data_marker",
"get_segment_data_marker",
"set_segment_data_marker",
@ -101,7 +103,10 @@ __all__ = [
"get_frame_from_filename",
"get_padding_from_filename",
"maintained_object_duplication",
"maintained_temp_file_path",
"get_clip_segment",
"get_batch_group_from_desktop",
"MediaInfoFile",
# pipeline
"install",
@ -142,5 +147,9 @@ __all__ = [
# render utils
"export_clip",
"get_preset_path_by_xml_name",
"modify_preset_file"
"modify_preset_file",
# batch utils
"create_batch_group",
"create_batch_group_conent"
]

View file

@ -0,0 +1,151 @@
import flame
def create_batch_group(
name,
frame_start,
frame_duration,
update_batch_group=None,
**kwargs
):
"""Create Batch Group in active project's Desktop
Args:
name (str): name of batch group to be created
frame_start (int): start frame of batch
frame_end (int): end frame of batch
update_batch_group (PyBatch)[optional]: batch group to update
Return:
PyBatch: active flame batch group
"""
# make sure some batch obj is present
batch_group = update_batch_group or flame.batch
schematic_reels = kwargs.get("shematic_reels") or ['LoadedReel1']
shelf_reels = kwargs.get("shelf_reels") or ['ShelfReel1']
handle_start = kwargs.get("handleStart") or 0
handle_end = kwargs.get("handleEnd") or 0
frame_start -= handle_start
frame_duration += handle_start + handle_end
if not update_batch_group:
# Create batch group with name, start_frame value, duration value,
# set of schematic reel names, set of shelf reel names
batch_group = batch_group.create_batch_group(
name,
start_frame=frame_start,
duration=frame_duration,
reels=schematic_reels,
shelf_reels=shelf_reels
)
else:
batch_group.name = name
batch_group.start_frame = frame_start
batch_group.duration = frame_duration
# add reels to batch group
_add_reels_to_batch_group(
batch_group, schematic_reels, shelf_reels)
# TODO: also update write node if there is any
# TODO: also update loaders to start from correct frameStart
if kwargs.get("switch_batch_tab"):
# use this command to switch to the batch tab
batch_group.go_to()
return batch_group
def _add_reels_to_batch_group(batch_group, reels, shelf_reels):
# update or create defined reels
# helper variables
reel_names = [
r.name.get_value()
for r in batch_group.reels
]
shelf_reel_names = [
r.name.get_value()
for r in batch_group.shelf_reels
]
# add schematic reels
for _r in reels:
if _r in reel_names:
continue
batch_group.create_reel(_r)
# add shelf reels
for _sr in shelf_reels:
if _sr in shelf_reel_names:
continue
batch_group.create_shelf_reel(_sr)
def create_batch_group_conent(batch_nodes, batch_links, batch_group=None):
"""Creating batch group with links
Args:
batch_nodes (list of dict): each dict is node definition
batch_links (list of dict): each dict is link definition
batch_group (PyBatch, optional): batch group. Defaults to None.
Return:
dict: all batch nodes {name or id: PyNode}
"""
# make sure some batch obj is present
batch_group = batch_group or flame.batch
all_batch_nodes = {
b.name.get_value(): b
for b in batch_group.nodes
}
for node in batch_nodes:
# NOTE: node_props needs to be ideally OrederDict type
node_id, node_type, node_props = (
node["id"], node["type"], node["properties"])
# get node name for checking if exists
node_name = node_props.pop("name", None) or node_id
if all_batch_nodes.get(node_name):
# update existing batch node
batch_node = all_batch_nodes[node_name]
else:
# create new batch node
batch_node = batch_group.create_node(node_type)
# set name
batch_node.name.set_value(node_name)
# set attributes found in node props
for key, value in node_props.items():
if not hasattr(batch_node, key):
continue
setattr(batch_node, key, value)
# add created node for possible linking
all_batch_nodes[node_id] = batch_node
# link nodes to each other
for link in batch_links:
_from_n, _to_n = link["from_node"], link["to_node"]
# check if all linking nodes are available
if not all([
all_batch_nodes.get(_from_n["id"]),
all_batch_nodes.get(_to_n["id"])
]):
continue
# link nodes in defined link
batch_group.connect_nodes(
all_batch_nodes[_from_n["id"]], _from_n["connector"],
all_batch_nodes[_to_n["id"]], _to_n["connector"]
)
# sort batch nodes
batch_group.organize()
return all_batch_nodes

View file

@ -3,7 +3,12 @@ import os
import re
import json
import pickle
import tempfile
import itertools
import contextlib
import xml.etree.cElementTree as cET
from copy import deepcopy
from xml.etree import ElementTree as ET
from pprint import pformat
from .constants import (
MARKER_COLOR,
@ -12,12 +17,14 @@ from .constants import (
COLOR_MAP,
MARKER_PUBLISH_DEFAULT
)
from openpype.api import Logger
log = Logger.get_logger(__name__)
import openpype.api as openpype
log = openpype.Logger.get_logger(__name__)
FRAME_PATTERN = re.compile(r"[\._](\d+)[\.]")
class CTX:
# singleton used for passing data between api modules
app_framework = None
@ -226,16 +233,6 @@ class FlameAppFramework(object):
return True
def get_project_manager():
# TODO: get_project_manager
return
def get_media_storage():
# TODO: get_media_storage
return
def get_current_project():
import flame
return flame.project.current_project
@ -265,11 +262,6 @@ def get_current_sequence(selection):
return process_timeline
def create_bin(name, root=None):
# TODO: create_bin
return
def rescan_hooks():
import flame
try:
@ -279,6 +271,7 @@ def rescan_hooks():
def get_metadata(project_name, _log=None):
# TODO: can be replaced by MediaInfoFile class method
from adsk.libwiretapPythonClientAPI import (
WireTapClient,
WireTapServerHandle,
@ -538,9 +531,17 @@ def get_segment_attributes(segment):
# head and tail with forward compatibility
if segment.head:
clip_data["segment_head"] = int(segment.head)
# `infinite` can be also returned
if isinstance(segment.head, str):
clip_data["segment_head"] = 0
else:
clip_data["segment_head"] = int(segment.head)
if segment.tail:
clip_data["segment_tail"] = int(segment.tail)
# `infinite` can be also returned
if isinstance(segment.tail, str):
clip_data["segment_tail"] = 0
else:
clip_data["segment_tail"] = int(segment.tail)
# add all available shot tokens
shot_tokens = _get_shot_tokens_values(segment, [
@ -695,6 +696,25 @@ def maintained_object_duplication(item):
flame.delete(duplicate)
@contextlib.contextmanager
def maintained_temp_file_path(suffix=None):
_suffix = suffix or ""
try:
# Store dumped json to temporary file
temporary_file = tempfile.mktemp(
suffix=_suffix, prefix="flame_maintained_")
yield temporary_file.replace("\\", "/")
except IOError as _error:
raise IOError(
"Not able to create temp json file: {}".format(_error))
finally:
# Remove the temporary json
os.remove(temporary_file)
def get_clip_segment(flame_clip):
name = flame_clip.name.get_value()
version = flame_clip.versions[0]
@ -708,3 +728,213 @@ def get_clip_segment(flame_clip):
raise ValueError("Clip `{}` has too many segments!".format(name))
return segments[0]
def get_batch_group_from_desktop(name):
project = get_current_project()
project_desktop = project.current_workspace.desktop
for bgroup in project_desktop.batch_groups:
if bgroup.name.get_value() in name:
return bgroup
class MediaInfoFile(object):
"""Class to get media info file clip data
Raises:
IOError: MEDIA_SCRIPT_PATH path doesn't exists
TypeError: Not able to generate clip xml data file
ET.ParseError: Missing clip in xml clip data
IOError: Not able to save xml clip data to file
Attributes:
str: `MEDIA_SCRIPT_PATH` path to flame binary
logging.Logger: `log` logger
TODO: add method for getting metadata to dict
"""
MEDIA_SCRIPT_PATH = "/opt/Autodesk/mio/current/dl_get_media_info"
log = log
_clip_data = None
_start_frame = None
_fps = None
_drop_mode = None
def __init__(self, path, **kwargs):
# replace log if any
if kwargs.get("logger"):
self.log = kwargs["logger"]
# test if `dl_get_media_info` paht exists
self._validate_media_script_path()
# derivate other feed variables
self.feed_basename = os.path.basename(path)
self.feed_dir = os.path.dirname(path)
self.feed_ext = os.path.splitext(self.feed_basename)[1][1:].lower()
with maintained_temp_file_path(".clip") as tmp_path:
self.log.info("Temp File: {}".format(tmp_path))
self._generate_media_info_file(tmp_path)
# get clip data and make them single if there is multiple
# clips data
xml_data = self._make_single_clip_media_info(tmp_path)
self.log.debug("xml_data: {}".format(xml_data))
self.log.debug("type: {}".format(type(xml_data)))
# get all time related data and assign them
self._get_time_info_from_origin(xml_data)
self.log.debug("start_frame: {}".format(self.start_frame))
self.log.debug("fps: {}".format(self.fps))
self.log.debug("drop frame: {}".format(self.drop_mode))
self.clip_data = xml_data
@property
def clip_data(self):
"""Clip's xml clip data
Returns:
xml.etree.ElementTree: xml data
"""
return self._clip_data
@clip_data.setter
def clip_data(self, data):
self._clip_data = data
@property
def start_frame(self):
""" Clip's starting frame found in timecode
Returns:
int: number of frames
"""
return self._start_frame
@start_frame.setter
def start_frame(self, number):
self._start_frame = int(number)
@property
def fps(self):
""" Clip's frame rate
Returns:
float: frame rate
"""
return self._fps
@fps.setter
def fps(self, fl_number):
self._fps = float(fl_number)
@property
def drop_mode(self):
""" Clip's drop frame mode
Returns:
str: drop frame flag
"""
return self._drop_mode
@drop_mode.setter
def drop_mode(self, text):
self._drop_mode = str(text)
def _validate_media_script_path(self):
if not os.path.isfile(self.MEDIA_SCRIPT_PATH):
raise IOError("Media Scirpt does not exist: `{}`".format(
self.MEDIA_SCRIPT_PATH))
def _generate_media_info_file(self, fpath):
# Create cmd arguments for gettig xml file info file
cmd_args = [
self.MEDIA_SCRIPT_PATH,
"-e", self.feed_ext,
"-o", fpath,
self.feed_dir
]
try:
# execute creation of clip xml template data
openpype.run_subprocess(cmd_args)
except TypeError as error:
raise TypeError(
"Error creating `{}` due: {}".format(fpath, error))
def _make_single_clip_media_info(self, fpath):
with open(fpath) as f:
lines = f.readlines()
_added_root = itertools.chain(
"<root>", deepcopy(lines)[1:], "</root>")
new_root = ET.fromstringlist(_added_root)
# find the clip which is matching to my input name
xml_clips = new_root.findall("clip")
matching_clip = None
for xml_clip in xml_clips:
if xml_clip.find("name").text in self.feed_basename:
matching_clip = xml_clip
if matching_clip is None:
# return warning there is missing clip
raise ET.ParseError(
"Missing clip in `{}`. Available clips {}".format(
self.feed_basename, [
xml_clip.find("name").text
for xml_clip in xml_clips
]
))
return matching_clip
def _get_time_info_from_origin(self, xml_data):
try:
for out_track in xml_data.iter('track'):
for out_feed in out_track.iter('feed'):
# start frame
out_feed_nb_ticks_obj = out_feed.find(
'startTimecode/nbTicks')
self.start_frame = out_feed_nb_ticks_obj.text
# fps
out_feed_fps_obj = out_feed.find(
'startTimecode/rate')
self.fps = out_feed_fps_obj.text
# drop frame mode
out_feed_drop_mode_obj = out_feed.find(
'startTimecode/dropMode')
self.drop_mode = out_feed_drop_mode_obj.text
break
else:
continue
except Exception as msg:
self.log.warning(msg)
@staticmethod
def write_clip_data_to_file(fpath, xml_element_data):
""" Write xml element of clip data to file
Args:
fpath (string): file path
xml_element_data (xml.etree.ElementTree.Element): xml data
Raises:
IOError: If data could not be written to file
"""
try:
# save it as new file
tree = cET.ElementTree(xml_element_data)
tree.write(
fpath, xml_declaration=True,
method='xml', encoding='UTF-8'
)
except IOError as error:
raise IOError(
"Not able to write data to file: {}".format(error))

View file

@ -3,14 +3,14 @@ Basic avalon integration
"""
import os
import contextlib
from avalon import api as avalon
from pyblish import api as pyblish
from openpype.api import Logger
from openpype.pipeline import (
LegacyCreator,
register_loader_plugin_path,
register_creator_plugin_path,
deregister_loader_plugin_path,
deregister_creator_plugin_path,
AVALON_CONTAINER_ID,
)
from .lib import (
@ -37,7 +37,7 @@ def install():
pyblish.register_host("flame")
pyblish.register_plugin_path(PUBLISH_PATH)
register_loader_plugin_path(LOAD_PATH)
avalon.register_plugin_path(LegacyCreator, CREATE_PATH)
register_creator_plugin_path(CREATE_PATH)
log.info("OpenPype Flame plug-ins registred ...")
# register callback for switching publishable
@ -52,7 +52,7 @@ def uninstall():
log.info("Deregistering Flame plug-ins..")
pyblish.deregister_plugin_path(PUBLISH_PATH)
deregister_loader_plugin_path(LOAD_PATH)
avalon.deregister_plugin_path(LegacyCreator, CREATE_PATH)
deregister_creator_plugin_path(CREATE_PATH)
# register callback for switching publishable
pyblish.deregister_callback("instanceToggled", on_pyblish_instance_toggled)

View file

@ -1,24 +1,19 @@
import os
import re
import shutil
import sys
from xml.etree import ElementTree as ET
import six
import qargparse
from Qt import QtWidgets, QtCore
import openpype.api as openpype
from openpype.pipeline import (
LegacyCreator,
LoaderPlugin,
)
from openpype import style
from . import (
lib as flib,
pipeline as fpipeline,
constants
)
from copy import deepcopy
from xml.etree import ElementTree as ET
from Qt import QtCore, QtWidgets
import openpype.api as openpype
import qargparse
from openpype import style
from openpype.pipeline import LegacyCreator, LoaderPlugin
from . import constants
from . import lib as flib
from . import pipeline as fpipeline
log = openpype.Logger.get_logger(__name__)
@ -660,8 +655,8 @@ class PublishableClip:
# Publishing plugin functions
# Loader plugin functions
# Loader plugin functions
class ClipLoader(LoaderPlugin):
"""A basic clip loader for Flame
@ -681,50 +676,52 @@ class ClipLoader(LoaderPlugin):
]
class OpenClipSolver:
media_script_path = "/opt/Autodesk/mio/current/dl_get_media_info"
tmp_name = "_tmp.clip"
tmp_file = None
class OpenClipSolver(flib.MediaInfoFile):
create_new_clip = False
out_feed_nb_ticks = None
out_feed_fps = None
out_feed_drop_mode = None
log = log
def __init__(self, openclip_file_path, feed_data):
# test if media script paht exists
self._validate_media_script_path()
self.out_file = openclip_file_path
# new feed variables:
feed_path = feed_data["path"]
feed_path = feed_data.pop("path")
# initialize parent class
super(OpenClipSolver, self).__init__(
feed_path,
**feed_data
)
# get other metadata
self.feed_version_name = feed_data["version"]
self.feed_colorspace = feed_data.get("colorspace")
if feed_data.get("logger"):
self.log = feed_data["logger"]
self.log.debug("feed_version_name: {}".format(self.feed_version_name))
# derivate other feed variables
self.feed_basename = os.path.basename(feed_path)
self.feed_dir = os.path.dirname(feed_path)
self.feed_ext = os.path.splitext(self.feed_basename)[1][1:].lower()
if not os.path.isfile(openclip_file_path):
# openclip does not exist yet and will be created
self.tmp_file = self.out_file = openclip_file_path
self.log.debug("feed_ext: {}".format(self.feed_ext))
self.log.debug("out_file: {}".format(self.out_file))
if not self._is_valid_tmp_file(self.out_file):
self.create_new_clip = True
else:
# output a temp file
self.out_file = openclip_file_path
self.tmp_file = os.path.join(self.feed_dir, self.tmp_name)
self._clear_tmp_file()
def _is_valid_tmp_file(self, file):
# check if file exists
if os.path.isfile(file):
# test also if file is not empty
with open(file) as f:
lines = f.readlines()
self.log.info("Temp File: {}".format(self.tmp_file))
if len(lines) > 2:
return True
# file is probably corrupted
os.remove(file)
return False
def make(self):
self._generate_media_info_file()
if self.create_new_clip:
# New openClip
@ -732,42 +729,17 @@ class OpenClipSolver:
else:
self._update_open_clip()
def _validate_media_script_path(self):
if not os.path.isfile(self.media_script_path):
raise IOError("Media Scirpt does not exist: `{}`".format(
self.media_script_path))
def _generate_media_info_file(self):
# Create cmd arguments for gettig xml file info file
cmd_args = [
self.media_script_path,
"-e", self.feed_ext,
"-o", self.tmp_file,
self.feed_dir
]
# execute creation of clip xml template data
try:
openpype.run_subprocess(cmd_args)
except TypeError:
self.log.error("Error creating self.tmp_file")
six.reraise(*sys.exc_info())
def _clear_tmp_file(self):
if os.path.isfile(self.tmp_file):
os.remove(self.tmp_file)
def _clear_handler(self, xml_object):
for handler in xml_object.findall("./handler"):
self.log.debug("Handler found")
self.log.info("Handler found")
xml_object.remove(handler)
def _create_new_open_clip(self):
self.log.info("Building new openClip")
self.log.debug(">> self.clip_data: {}".format(self.clip_data))
tmp_xml = ET.parse(self.tmp_file)
tmp_xml_feeds = tmp_xml.find('tracks/track/feeds')
# clip data comming from MediaInfoFile
tmp_xml_feeds = self.clip_data.find('tracks/track/feeds')
tmp_xml_feeds.set('currentVersion', self.feed_version_name)
for tmp_feed in tmp_xml_feeds:
tmp_feed.set('vuid', self.feed_version_name)
@ -778,46 +750,48 @@ class OpenClipSolver:
self._clear_handler(tmp_feed)
tmp_xml_versions_obj = tmp_xml.find('versions')
tmp_xml_versions_obj = self.clip_data.find('versions')
tmp_xml_versions_obj.set('currentVersion', self.feed_version_name)
for xml_new_version in tmp_xml_versions_obj:
xml_new_version.set('uid', self.feed_version_name)
xml_new_version.set('type', 'version')
xml_data = self._fix_xml_data(tmp_xml)
self._clear_handler(self.clip_data)
self.log.info("Adding feed version: {}".format(self.feed_basename))
self._write_result_xml_to_file(xml_data)
self.log.info("openClip Updated: {}".format(self.tmp_file))
self.write_clip_data_to_file(self.out_file, self.clip_data)
def _update_open_clip(self):
self.log.info("Updating openClip ..")
out_xml = ET.parse(self.out_file)
tmp_xml = ET.parse(self.tmp_file)
out_xml = out_xml.getroot()
self.log.debug(">> out_xml: {}".format(out_xml))
self.log.debug(">> tmp_xml: {}".format(tmp_xml))
self.log.debug(">> self.clip_data: {}".format(self.clip_data))
# Get new feed from tmp file
tmp_xml_feed = tmp_xml.find('tracks/track/feeds/feed')
tmp_xml_feed = self.clip_data.find('tracks/track/feeds/feed')
self._clear_handler(tmp_xml_feed)
self._get_time_info_from_origin(out_xml)
if self.out_feed_fps:
# update fps from MediaInfoFile class
if self.fps:
tmp_feed_fps_obj = tmp_xml_feed.find(
"startTimecode/rate")
tmp_feed_fps_obj.text = self.out_feed_fps
if self.out_feed_nb_ticks:
tmp_feed_fps_obj.text = str(self.fps)
# update start_frame from MediaInfoFile class
if self.start_frame:
tmp_feed_nb_ticks_obj = tmp_xml_feed.find(
"startTimecode/nbTicks")
tmp_feed_nb_ticks_obj.text = self.out_feed_nb_ticks
if self.out_feed_drop_mode:
tmp_feed_nb_ticks_obj.text = str(self.start_frame)
# update drop_mode from MediaInfoFile class
if self.drop_mode:
tmp_feed_drop_mode_obj = tmp_xml_feed.find(
"startTimecode/dropMode")
tmp_feed_drop_mode_obj.text = self.out_feed_drop_mode
tmp_feed_drop_mode_obj.text = str(self.drop_mode)
new_path_obj = tmp_xml_feed.find(
"spans/span/path")
@ -850,7 +824,7 @@ class OpenClipSolver:
"version", {"type": "version", "uid": self.feed_version_name})
out_xml_versions_obj.insert(0, new_version_obj)
xml_data = self._fix_xml_data(out_xml)
self._clear_handler(out_xml)
# fist create backup
self._create_openclip_backup_file(self.out_file)
@ -858,30 +832,9 @@ class OpenClipSolver:
self.log.info("Adding feed version: {}".format(
self.feed_version_name))
self._write_result_xml_to_file(xml_data)
self.write_clip_data_to_file(self.out_file, out_xml)
self.log.info("openClip Updated: {}".format(self.out_file))
self._clear_tmp_file()
def _get_time_info_from_origin(self, xml_data):
try:
for out_track in xml_data.iter('track'):
for out_feed in out_track.iter('feed'):
out_feed_nb_ticks_obj = out_feed.find(
'startTimecode/nbTicks')
self.out_feed_nb_ticks = out_feed_nb_ticks_obj.text
out_feed_fps_obj = out_feed.find(
'startTimecode/rate')
self.out_feed_fps = out_feed_fps_obj.text
out_feed_drop_mode_obj = out_feed.find(
'startTimecode/dropMode')
self.out_feed_drop_mode = out_feed_drop_mode_obj.text
break
else:
continue
except Exception as msg:
self.log.warning(msg)
self.log.debug("OpenClip Updated: {}".format(self.out_file))
def _feed_exists(self, xml_data, path):
# loop all available feed paths and check if
@ -892,15 +845,6 @@ class OpenClipSolver:
"Not appending file as it already is in .clip file")
return True
def _fix_xml_data(self, xml_data):
xml_root = xml_data.getroot()
self._clear_handler(xml_root)
return ET.tostring(xml_root).decode('utf-8')
def _write_result_xml_to_file(self, xml_data):
with open(self.out_file, "w") as f:
f.write(xml_data)
def _create_openclip_backup_file(self, file):
bck_file = "{}.bak".format(file)
# if backup does not exist

View file

@ -185,7 +185,9 @@ class WireTapCom(object):
exit_code = subprocess.call(
project_create_cmd,
cwd=os.path.expanduser('~'))
cwd=os.path.expanduser('~'),
preexec_fn=_subprocess_preexec_fn
)
if exit_code != 0:
RuntimeError("Cannot create project in flame db")
@ -254,7 +256,7 @@ class WireTapCom(object):
filtered_users = [user for user in used_names if user_name in user]
if filtered_users:
# todo: need to find lastly created following regex pattern for
# TODO: need to find lastly created following regex pattern for
# date used in name
return filtered_users.pop()
@ -422,7 +424,13 @@ class WireTapCom(object):
color_policy = color_policy or "Legacy"
# check if the colour policy in custom dir
if not os.path.exists(color_policy):
if "/" in color_policy:
# if unlikelly full path was used make it redundant
color_policy = color_policy.replace("/syncolor/policies/", "")
# expecting input is `Shared/NameOfPolicy`
color_policy = "/syncolor/policies/{}".format(
color_policy)
else:
color_policy = "/syncolor/policies/Autodesk/{}".format(
color_policy)
@ -442,7 +450,9 @@ class WireTapCom(object):
exit_code = subprocess.call(
project_colorspace_cmd,
cwd=os.path.expanduser('~'))
cwd=os.path.expanduser('~'),
preexec_fn=_subprocess_preexec_fn
)
if exit_code != 0:
RuntimeError("Cannot set colorspace {} on project {}".format(
@ -450,6 +460,15 @@ class WireTapCom(object):
))
def _subprocess_preexec_fn():
""" Helper function
Setting permission mask to 0777
"""
os.setpgrp()
os.umask(0o000)
if __name__ == "__main__":
# get json exchange data
json_path = sys.argv[-1]

View file

@ -11,8 +11,6 @@ from . import utils
import flame
from pprint import pformat
reload(utils) # noqa
log = logging.getLogger(__name__)
@ -260,24 +258,15 @@ def create_otio_markers(otio_item, item):
otio_item.markers.append(otio_marker)
def create_otio_reference(clip_data):
def create_otio_reference(clip_data, fps=None):
metadata = _get_metadata(clip_data)
# get file info for path and start frame
frame_start = 0
fps = CTX.get_fps()
fps = fps or CTX.get_fps()
path = clip_data["fpath"]
reel_clip = None
match_reel_clip = [
clip for clip in CTX.clips
if clip["fpath"] == path
]
if match_reel_clip:
reel_clip = match_reel_clip.pop()
fps = reel_clip["fps"]
file_name = os.path.basename(path)
file_head, extension = os.path.splitext(file_name)
@ -339,13 +328,22 @@ def create_otio_reference(clip_data):
def create_otio_clip(clip_data):
from openpype.hosts.flame.api import MediaInfoFile
segment = clip_data["PySegment"]
# create media reference
media_reference = create_otio_reference(clip_data)
# calculate source in
first_frame = utils.get_frame_from_filename(clip_data["fpath"]) or 0
media_info = MediaInfoFile(clip_data["fpath"])
media_timecode_start = media_info.start_frame
media_fps = media_info.fps
# create media reference
media_reference = create_otio_reference(clip_data, media_fps)
# define first frame
first_frame = media_timecode_start or utils.get_frame_from_filename(
clip_data["fpath"]) or 0
source_in = int(clip_data["source_in"]) - int(first_frame)
# creatae source range
@ -378,38 +376,6 @@ def create_otio_gap(gap_start, clip_start, tl_start_frame, fps):
)
def get_clips_in_reels(project):
output_clips = []
project_desktop = project.current_workspace.desktop
for reel_group in project_desktop.reel_groups:
for reel in reel_group.reels:
for clip in reel.clips:
clip_data = {
"PyClip": clip,
"fps": float(str(clip.frame_rate)[:-4])
}
attrs = [
"name", "width", "height",
"ratio", "sample_rate", "bit_depth"
]
for attr in attrs:
val = getattr(clip, attr)
clip_data[attr] = val
version = clip.versions[-1]
track = version.tracks[-1]
for segment in track.segments:
segment_data = _get_segment_attributes(segment)
clip_data.update(segment_data)
output_clips.append(clip_data)
return output_clips
def _get_colourspace_policy():
output = {}
@ -493,9 +459,6 @@ def _get_shot_tokens_values(clip, tokens):
old_value = None
output = {}
if not clip.shot_name:
return output
old_value = clip.shot_name.get_value()
for token in tokens:
@ -513,15 +476,21 @@ def _get_shot_tokens_values(clip, tokens):
def _get_segment_attributes(segment):
# log.debug(dir(segment))
if str(segment.name)[1:-1] == "":
log.debug("Segment name|hidden: {}|{}".format(
segment.name.get_value(), segment.hidden
))
if (
segment.name.get_value() == ""
or segment.hidden.get_value()
):
return None
# Add timeline segment to tree
clip_data = {
"segment_name": segment.name.get_value(),
"segment_comment": segment.comment.get_value(),
"shot_name": segment.shot_name.get_value(),
"tape_name": segment.tape_name,
"source_name": segment.source_name,
"fpath": segment.file_path,
@ -529,9 +498,10 @@ def _get_segment_attributes(segment):
}
# add all available shot tokens
shot_tokens = _get_shot_tokens_values(segment, [
"<colour space>", "<width>", "<height>", "<depth>",
])
shot_tokens = _get_shot_tokens_values(
segment,
["<colour space>", "<width>", "<height>", "<depth>"]
)
clip_data.update(shot_tokens)
# populate shot source metadata
@ -561,11 +531,6 @@ def create_otio_timeline(sequence):
log.info(sequence.attributes)
CTX.project = get_current_flame_project()
CTX.clips = get_clips_in_reels(CTX.project)
log.debug(pformat(
CTX.clips
))
# get current timeline
CTX.set_fps(
@ -583,8 +548,13 @@ def create_otio_timeline(sequence):
# create otio tracks and clips
for ver in sequence.versions:
for track in ver.tracks:
if len(track.segments) == 0 and track.hidden:
return None
# avoid all empty tracks
# or hidden tracks
if (
len(track.segments) == 0
or track.hidden.get_value()
):
continue
# convert track to otio
otio_track = create_otio_track(
@ -597,11 +567,7 @@ def create_otio_timeline(sequence):
continue
all_segments.append(clip_data)
segments_ordered = {
itemindex: clip_data
for itemindex, clip_data in enumerate(
all_segments)
}
segments_ordered = dict(enumerate(all_segments))
log.debug("_ segments_ordered: {}".format(
pformat(segments_ordered)
))
@ -612,15 +578,11 @@ def create_otio_timeline(sequence):
log.debug("_ itemindex: {}".format(itemindex))
# Add Gap if needed
if itemindex == 0:
# if it is first track item at track then add
# it to previous item
prev_item = segment_data
else:
# get previous item
prev_item = segments_ordered[itemindex - 1]
prev_item = (
segment_data
if itemindex == 0
else segments_ordered[itemindex - 1]
)
log.debug("_ segment_data: {}".format(segment_data))
# calculate clip frame range difference from each other

View file

@ -22,7 +22,7 @@ class LoadClip(opfapi.ClipLoader):
# settings
reel_group_name = "OpenPype_Reels"
reel_name = "Loaded"
clip_name_template = "{asset}_{subset}_{representation}"
clip_name_template = "{asset}_{subset}_{output}"
def load(self, context, name, namespace, options):
@ -39,7 +39,7 @@ class LoadClip(opfapi.ClipLoader):
clip_name = self.clip_name_template.format(
**context["representation"]["context"])
# todo: settings in imageio
# TODO: settings in imageio
# convert colorspace with ocio to flame mapping
# in imageio flame section
colorspace = colorspace

View file

@ -0,0 +1,139 @@
import os
import flame
from pprint import pformat
import openpype.hosts.flame.api as opfapi
class LoadClipBatch(opfapi.ClipLoader):
"""Load a subset to timeline as clip
Place clip to timeline on its asset origin timings collected
during conforming to project
"""
families = ["render2d", "source", "plate", "render", "review"]
representations = ["exr", "dpx", "jpg", "jpeg", "png", "h264"]
label = "Load as clip to current batch"
order = -10
icon = "code-fork"
color = "orange"
# settings
reel_name = "OP_LoadedReel"
clip_name_template = "{asset}_{subset}_{output}"
def load(self, context, name, namespace, options):
# get flame objects
self.batch = options.get("batch") or flame.batch
# load clip to timeline and get main variables
namespace = namespace
version = context['version']
version_data = version.get("data", {})
version_name = version.get("name", None)
colorspace = version_data.get("colorspace", None)
# in case output is not in context replace key to representation
if not context["representation"]["context"].get("output"):
self.clip_name_template.replace("output", "representation")
clip_name = self.clip_name_template.format(
**context["representation"]["context"])
# TODO: settings in imageio
# convert colorspace with ocio to flame mapping
# in imageio flame section
colorspace = colorspace
# create workfile path
workfile_dir = options.get("workdir") or os.environ["AVALON_WORKDIR"]
openclip_dir = os.path.join(
workfile_dir, clip_name
)
openclip_path = os.path.join(
openclip_dir, clip_name + ".clip"
)
if not os.path.exists(openclip_dir):
os.makedirs(openclip_dir)
# prepare clip data from context ad send it to openClipLoader
loading_context = {
"path": self.fname.replace("\\", "/"),
"colorspace": colorspace,
"version": "v{:0>3}".format(version_name),
"logger": self.log
}
self.log.debug(pformat(
loading_context
))
self.log.debug(openclip_path)
# make openpype clip file
opfapi.OpenClipSolver(openclip_path, loading_context).make()
# prepare Reel group in actual desktop
opc = self._get_clip(
clip_name,
openclip_path
)
# add additional metadata from the version to imprint Avalon knob
add_keys = [
"frameStart", "frameEnd", "source", "author",
"fps", "handleStart", "handleEnd"
]
# move all version data keys to tag data
data_imprint = {
key: version_data.get(key, str(None))
for key in add_keys
}
# add variables related to version context
data_imprint.update({
"version": version_name,
"colorspace": colorspace,
"objectName": clip_name
})
# TODO: finish the containerisation
# opc_segment = opfapi.get_clip_segment(opc)
# return opfapi.containerise(
# opc_segment,
# name, namespace, context,
# self.__class__.__name__,
# data_imprint)
return opc
def _get_clip(self, name, clip_path):
reel = self._get_reel()
# with maintained openclip as opc
matching_clip = None
for cl in reel.clips:
if cl.name.get_value() != name:
continue
matching_clip = cl
if not matching_clip:
created_clips = flame.import_clips(str(clip_path), reel)
return created_clips.pop()
return matching_clip
def _get_reel(self):
matching_reel = [
rg for rg in self.batch.reels
if rg.name.get_value() == self.reel_name
]
return (
matching_reel.pop()
if matching_reel
else self.batch.create_reel(str(self.reel_name))
)

View file

@ -21,132 +21,135 @@ class CollectTimelineInstances(pyblish.api.ContextPlugin):
audio_track_items = []
# TODO: add to settings
# settings
xml_preset_attrs_from_comments = {
"width": "number",
"height": "number",
"pixelRatio": "float",
"resizeType": "string",
"resizeFilter": "string"
}
xml_preset_attrs_from_comments = []
add_tasks = []
def process(self, context):
project = context.data["flameProject"]
sequence = context.data["flameSequence"]
selected_segments = context.data["flameSelectedSegments"]
self.log.debug("__ selected_segments: {}".format(selected_segments))
self.otio_timeline = context.data["otioTimeline"]
self.clips_in_reels = opfapi.get_clips_in_reels(project)
self.fps = context.data["fps"]
# process all sellected
with opfapi.maintained_segment_selection(sequence) as segments:
for segment in segments:
comment_attributes = self._get_comment_attributes(segment)
self.log.debug("_ comment_attributes: {}".format(
pformat(comment_attributes)))
for segment in selected_segments:
# get openpype tag data
marker_data = opfapi.get_segment_data_marker(segment)
self.log.debug("__ marker_data: {}".format(
pformat(marker_data)))
clip_data = opfapi.get_segment_attributes(segment)
clip_name = clip_data["segment_name"]
self.log.debug("clip_name: {}".format(clip_name))
if not marker_data:
continue
# get openpype tag data
marker_data = opfapi.get_segment_data_marker(segment)
self.log.debug("__ marker_data: {}".format(
pformat(marker_data)))
if marker_data.get("id") != "pyblish.avalon.instance":
continue
if not marker_data:
continue
self.log.debug("__ segment.name: {}".format(
segment.name
))
if marker_data.get("id") != "pyblish.avalon.instance":
continue
comment_attributes = self._get_comment_attributes(segment)
# get file path
file_path = clip_data["fpath"]
self.log.debug("_ comment_attributes: {}".format(
pformat(comment_attributes)))
# get source clip
source_clip = self._get_reel_clip(file_path)
clip_data = opfapi.get_segment_attributes(segment)
clip_name = clip_data["segment_name"]
self.log.debug("clip_name: {}".format(clip_name))
first_frame = opfapi.get_frame_from_filename(file_path) or 0
# get file path
file_path = clip_data["fpath"]
head, tail = self._get_head_tail(clip_data, first_frame)
# get source clip
source_clip = self._get_reel_clip(file_path)
# solve handles length
marker_data["handleStart"] = min(
marker_data["handleStart"], head)
marker_data["handleEnd"] = min(
marker_data["handleEnd"], tail)
first_frame = opfapi.get_frame_from_filename(file_path) or 0
with_audio = bool(marker_data.pop("audio"))
head, tail = self._get_head_tail(clip_data, first_frame)
# add marker data to instance data
inst_data = dict(marker_data.items())
# solve handles length
marker_data["handleStart"] = min(
marker_data["handleStart"], abs(head))
marker_data["handleEnd"] = min(
marker_data["handleEnd"], abs(tail))
asset = marker_data["asset"]
subset = marker_data["subset"]
with_audio = bool(marker_data.pop("audio"))
# insert family into families
family = marker_data["family"]
families = [str(f) for f in marker_data["families"]]
families.insert(0, str(family))
# add marker data to instance data
inst_data = dict(marker_data.items())
# form label
label = asset
if asset != clip_name:
label += " ({})".format(clip_name)
label += " {}".format(subset)
label += " {}".format("[" + ", ".join(families) + "]")
asset = marker_data["asset"]
subset = marker_data["subset"]
inst_data.update({
"name": "{}_{}".format(asset, subset),
"label": label,
"asset": asset,
"item": segment,
"families": families,
"publish": marker_data["publish"],
"fps": self.fps,
"flameSourceClip": source_clip,
"sourceFirstFrame": int(first_frame),
"path": file_path
})
# insert family into families
family = marker_data["family"]
families = [str(f) for f in marker_data["families"]]
families.insert(0, str(family))
# get otio clip data
otio_data = self._get_otio_clip_instance_data(clip_data) or {}
self.log.debug("__ otio_data: {}".format(pformat(otio_data)))
# form label
label = asset
if asset != clip_name:
label += " ({})".format(clip_name)
label += " {} [{}]".format(subset, ", ".join(families))
# add to instance data
inst_data.update(otio_data)
self.log.debug("__ inst_data: {}".format(pformat(inst_data)))
inst_data.update({
"name": "{}_{}".format(asset, subset),
"label": label,
"asset": asset,
"item": segment,
"families": families,
"publish": marker_data["publish"],
"fps": self.fps,
"flameSourceClip": source_clip,
"sourceFirstFrame": int(first_frame),
"path": file_path,
"flameAddTasks": self.add_tasks,
"tasks": {
task["name"]: {"type": task["type"]}
for task in self.add_tasks}
})
# add resolution
self._get_resolution_to_data(inst_data, context)
# get otio clip data
otio_data = self._get_otio_clip_instance_data(clip_data) or {}
self.log.debug("__ otio_data: {}".format(pformat(otio_data)))
# add comment attributes if any
inst_data.update(comment_attributes)
# add to instance data
inst_data.update(otio_data)
self.log.debug("__ inst_data: {}".format(pformat(inst_data)))
# create instance
instance = context.create_instance(**inst_data)
# add resolution
self._get_resolution_to_data(inst_data, context)
# add colorspace data
instance.data.update({
"versionData": {
"colorspace": clip_data["colour_space"],
}
})
# add comment attributes if any
inst_data.update(comment_attributes)
# create shot instance for shot attributes create/update
self._create_shot_instance(context, clip_name, **inst_data)
# create instance
instance = context.create_instance(**inst_data)
self.log.info("Creating instance: {}".format(instance))
self.log.info(
"_ instance.data: {}".format(pformat(instance.data)))
# add colorspace data
instance.data.update({
"versionData": {
"colorspace": clip_data["colour_space"],
}
})
if not with_audio:
continue
# create shot instance for shot attributes create/update
self._create_shot_instance(context, clip_name, **inst_data)
# add audioReview attribute to plate instance data
# if reviewTrack is on
if marker_data.get("reviewTrack") is not None:
instance.data["reviewAudio"] = True
self.log.info("Creating instance: {}".format(instance))
self.log.info(
"_ instance.data: {}".format(pformat(instance.data)))
if not with_audio:
continue
# add audioReview attribute to plate instance data
# if reviewTrack is on
if marker_data.get("reviewTrack") is not None:
instance.data["reviewAudio"] = True
def _get_comment_attributes(self, segment):
comment = segment.comment.get_value()
@ -181,14 +184,17 @@ class CollectTimelineInstances(pyblish.api.ContextPlugin):
# split to key and value
key, value = split.split(":")
for a_name, a_type in self.xml_preset_attrs_from_comments.items():
for attr_data in self.xml_preset_attrs_from_comments:
a_name = attr_data["name"]
a_type = attr_data["type"]
# exclude all not related attributes
if a_name.lower() not in key.lower():
continue
# get pattern defined by type
pattern = TXT_PATERN
if a_type in ("number" , "float"):
if a_type in ("number", "float"):
pattern = NUM_PATERN
res_goup = pattern.findall(value)
@ -241,6 +247,7 @@ class CollectTimelineInstances(pyblish.api.ContextPlugin):
head = clip_data.get("segment_head")
tail = clip_data.get("segment_tail")
# HACK: it is here to serve for versions bellow 2021.1
if not head:
head = int(clip_data["source_in"]) - int(first_frame)
if not tail:

View file

@ -31,27 +31,28 @@ class CollecTimelineOTIO(pyblish.api.ContextPlugin):
)
# adding otio timeline to context
with opfapi.maintained_segment_selection(sequence):
with opfapi.maintained_segment_selection(sequence) as selected_seg:
otio_timeline = flame_export.create_otio_timeline(sequence)
instance_data = {
"name": subset_name,
"asset": asset_doc["name"],
"subset": subset_name,
"family": "workfile"
}
instance_data = {
"name": subset_name,
"asset": asset_doc["name"],
"subset": subset_name,
"family": "workfile"
}
# create instance with workfile
instance = context.create_instance(**instance_data)
self.log.info("Creating instance: {}".format(instance))
# create instance with workfile
instance = context.create_instance(**instance_data)
self.log.info("Creating instance: {}".format(instance))
# update context with main project attributes
context.data.update({
"flameProject": project,
"flameSequence": sequence,
"otioTimeline": otio_timeline,
"currentFile": "Flame/{}/{}".format(
project.name, sequence.name
),
"fps": float(str(sequence.frame_rate)[:-4])
})
# update context with main project attributes
context.data.update({
"flameProject": project,
"flameSequence": sequence,
"otioTimeline": otio_timeline,
"currentFile": "Flame/{}/{}".format(
project.name, sequence.name
),
"flameSelectedSegments": selected_seg,
"fps": float(str(sequence.frame_rate)[:-4])
})

View file

@ -61,9 +61,13 @@ class ExtractSubsetResources(openpype.api.Extractor):
# flame objects
segment = instance.data["item"]
segment_name = segment.name.get_value()
sequence_clip = instance.context.data["flameSequence"]
clip_data = instance.data["flameSourceClip"]
clip = clip_data["PyClip"]
reel_clip = None
if clip_data:
reel_clip = clip_data["PyClip"]
# segment's parent track name
s_track_name = segment.parent.name.get_value()
@ -108,6 +112,16 @@ class ExtractSubsetResources(openpype.api.Extractor):
ignore_comment_attrs = preset_config["ignore_comment_attrs"]
color_out = preset_config["colorspace_out"]
# get attribures related loading in integrate_batch_group
load_to_batch_group = preset_config.get(
"load_to_batch_group")
batch_group_loader_name = preset_config.get(
"batch_group_loader_name")
# convert to None if empty string
if batch_group_loader_name == "":
batch_group_loader_name = None
# get frame range with handles for representation range
frame_start_handle = frame_start - handle_start
source_duration_handles = (
@ -117,8 +131,20 @@ class ExtractSubsetResources(openpype.api.Extractor):
in_mark = (source_start_handles - source_first_frame) + 1
out_mark = in_mark + source_duration_handles
# make test for type of preset and available reel_clip
if (
not reel_clip
and export_type != "Sequence Publish"
):
self.log.warning((
"Skipping preset {}. Not available "
"reel clip for {}").format(
preset_file, segment_name
))
continue
# by default export source clips
exporting_clip = clip
exporting_clip = reel_clip
if export_type == "Sequence Publish":
# change export clip to sequence
@ -150,7 +176,7 @@ class ExtractSubsetResources(openpype.api.Extractor):
if export_type == "Sequence Publish":
# only keep visible layer where instance segment is child
self.hide_other_tracks(duplclip, s_track_name)
self.hide_others(duplclip, segment_name, s_track_name)
# validate xml preset file is filled
if preset_file == "":
@ -211,7 +237,9 @@ class ExtractSubsetResources(openpype.api.Extractor):
"tags": repre_tags,
"data": {
"colorspace": color_out
}
},
"load_to_batch_group": load_to_batch_group,
"batch_group_loader_name": batch_group_loader_name
}
# collect all available content of export dir
@ -322,18 +350,26 @@ class ExtractSubsetResources(openpype.api.Extractor):
return new_stage_dir, new_files_list
def hide_other_tracks(self, sequence_clip, track_name):
def hide_others(self, sequence_clip, segment_name, track_name):
"""Helper method used only if sequence clip is used
Args:
sequence_clip (flame.Clip): sequence clip
segment_name (str): segment name
track_name (str): track name
"""
# create otio tracks and clips
for ver in sequence_clip.versions:
for track in ver.tracks:
if len(track.segments) == 0 and track.hidden:
if len(track.segments) == 0 and track.hidden.get_value():
continue
# hide tracks which are not parent track
if track.name.get_value() != track_name:
track.hidden = True
continue
# hidde all other segments
for segment in track.segments:
if segment.name.get_value() != segment_name:
segment.hidden = True

View file

@ -0,0 +1,328 @@
import os
import copy
from collections import OrderedDict
from pprint import pformat
import pyblish
from openpype.lib import get_workdir
import openpype.hosts.flame.api as opfapi
import openpype.pipeline as op_pipeline
class IntegrateBatchGroup(pyblish.api.InstancePlugin):
"""Integrate published shot to batch group"""
order = pyblish.api.IntegratorOrder + 0.45
label = "Integrate Batch Groups"
hosts = ["flame"]
families = ["clip"]
# settings
default_loader = "LoadClip"
def process(self, instance):
add_tasks = instance.data["flameAddTasks"]
# iterate all tasks from settings
for task_data in add_tasks:
# exclude batch group
if not task_data["create_batch_group"]:
continue
# create or get already created batch group
bgroup = self._get_batch_group(instance, task_data)
# add batch group content
all_batch_nodes = self._add_nodes_to_batch_with_links(
instance, task_data, bgroup)
for name, node in all_batch_nodes.items():
self.log.debug("name: {}, dir: {}".format(
name, dir(node)
))
self.log.debug("__ node.attributes: {}".format(
node.attributes
))
# load plate to batch group
self.log.info("Loading subset `{}` into batch `{}`".format(
instance.data["subset"], bgroup.name.get_value()
))
self._load_clip_to_context(instance, bgroup)
def _add_nodes_to_batch_with_links(self, instance, task_data, batch_group):
# get write file node properties > OrederDict because order does mater
write_pref_data = self._get_write_prefs(instance, task_data)
batch_nodes = [
{
"type": "comp",
"properties": {},
"id": "comp_node01"
},
{
"type": "Write File",
"properties": write_pref_data,
"id": "write_file_node01"
}
]
batch_links = [
{
"from_node": {
"id": "comp_node01",
"connector": "Result"
},
"to_node": {
"id": "write_file_node01",
"connector": "Front"
}
}
]
# add nodes into batch group
return opfapi.create_batch_group_conent(
batch_nodes, batch_links, batch_group)
def _load_clip_to_context(self, instance, bgroup):
# get all loaders for host
loaders_by_name = {
loader.__name__: loader
for loader in op_pipeline.discover_loader_plugins()
}
# get all published representations
published_representations = instance.data["published_representations"]
repres_db_id_by_name = {
repre_info["representation"]["name"]: repre_id
for repre_id, repre_info in published_representations.items()
}
# get all loadable representations
repres_by_name = {
repre["name"]: repre for repre in instance.data["representations"]
}
# get repre_id for the loadable representations
loader_name_by_repre_id = {
repres_db_id_by_name[repr_name]: {
"loader": repr_data["batch_group_loader_name"],
# add repre data for exception logging
"_repre_data": repr_data
}
for repr_name, repr_data in repres_by_name.items()
if repr_data.get("load_to_batch_group")
}
self.log.debug("__ loader_name_by_repre_id: {}".format(pformat(
loader_name_by_repre_id)))
# get representation context from the repre_id
repre_contexts = op_pipeline.load.get_repres_contexts(
loader_name_by_repre_id.keys())
self.log.debug("__ repre_contexts: {}".format(pformat(
repre_contexts)))
# loop all returned repres from repre_context dict
for repre_id, repre_context in repre_contexts.items():
self.log.debug("__ repre_id: {}".format(repre_id))
# get loader name by representation id
loader_name = (
loader_name_by_repre_id[repre_id]["loader"]
# if nothing was added to settings fallback to default
or self.default_loader
)
# get loader plugin
loader_plugin = loaders_by_name.get(loader_name)
if loader_plugin:
# load to flame by representation context
try:
op_pipeline.load.load_with_repre_context(
loader_plugin, repre_context, **{
"data": {
"workdir": self.task_workdir,
"batch": bgroup
}
})
except op_pipeline.load.IncompatibleLoaderError as msg:
self.log.error(
"Check allowed representations for Loader `{}` "
"in settings > error: {}".format(
loader_plugin.__name__, msg))
self.log.error(
"Representaton context >>{}<< is not compatible "
"with loader `{}`".format(
pformat(repre_context), loader_plugin.__name__
)
)
else:
self.log.warning(
"Something got wrong and there is not Loader found for "
"following data: {}".format(
pformat(loader_name_by_repre_id))
)
def _get_batch_group(self, instance, task_data):
frame_start = instance.data["frameStart"]
frame_end = instance.data["frameEnd"]
handle_start = instance.data["handleStart"]
handle_end = instance.data["handleEnd"]
frame_duration = (frame_end - frame_start) + 1
asset_name = instance.data["asset"]
task_name = task_data["name"]
batchgroup_name = "{}_{}".format(asset_name, task_name)
batch_data = {
"shematic_reels": [
"OP_LoadedReel"
],
"handleStart": handle_start,
"handleEnd": handle_end
}
self.log.debug(
"__ batch_data: {}".format(pformat(batch_data)))
# check if the batch group already exists
bgroup = opfapi.get_batch_group_from_desktop(batchgroup_name)
if not bgroup:
self.log.info(
"Creating new batch group: {}".format(batchgroup_name))
# create batch with utils
bgroup = opfapi.create_batch_group(
batchgroup_name,
frame_start,
frame_duration,
**batch_data
)
else:
self.log.info(
"Updating batch group: {}".format(batchgroup_name))
# update already created batch group
bgroup = opfapi.create_batch_group(
batchgroup_name,
frame_start,
frame_duration,
update_batch_group=bgroup,
**batch_data
)
return bgroup
def _get_anamoty_data_with_current_task(self, instance, task_data):
anatomy_data = copy.deepcopy(instance.data["anatomyData"])
task_name = task_data["name"]
task_type = task_data["type"]
anatomy_obj = instance.context.data["anatomy"]
# update task data in anatomy data
project_task_types = anatomy_obj["tasks"]
task_code = project_task_types.get(task_type, {}).get("short_name")
anatomy_data.update({
"task": {
"name": task_name,
"type": task_type,
"short": task_code
}
})
return anatomy_data
def _get_write_prefs(self, instance, task_data):
# update task in anatomy data
anatomy_data = self._get_anamoty_data_with_current_task(
instance, task_data)
self.task_workdir = self._get_shot_task_dir_path(
instance, task_data)
self.log.debug("__ task_workdir: {}".format(
self.task_workdir))
# TODO: this might be done with template in settings
render_dir_path = os.path.join(
self.task_workdir, "render", "flame")
if not os.path.exists(render_dir_path):
os.makedirs(render_dir_path, mode=0o777)
# TODO: add most of these to `imageio/flame/batch/write_node`
name = "{project[code]}_{asset}_{task[name]}".format(
**anatomy_data
)
# The path attribute where the rendered clip is exported
# /path/to/file.[0001-0010].exr
media_path = render_dir_path
# name of file represented by tokens
media_path_pattern = (
"<name>_v<iteration###>/<name>_v<iteration###>.<frame><ext>")
# The Create Open Clip attribute of the Write File node. \
# Determines if an Open Clip is created by the Write File node.
create_clip = True
# The Include Setup attribute of the Write File node.
# Determines if a Batch Setup file is created by the Write File node.
include_setup = True
# The path attribute where the Open Clip file is exported by
# the Write File node.
create_clip_path = "<name>"
# The path attribute where the Batch setup file
# is exported by the Write File node.
include_setup_path = "./<name>_v<iteration###>"
# The file type for the files written by the Write File node.
# Setting this attribute also overwrites format_extension,
# bit_depth and compress_mode to match the defaults for
# this file type.
file_type = "OpenEXR"
# The file extension for the files written by the Write File node.
# This attribute resets to match file_type whenever file_type
# is set. If you require a specific extension, you must
# set format_extension after setting file_type.
format_extension = "exr"
# The bit depth for the files written by the Write File node.
# This attribute resets to match file_type whenever file_type is set.
bit_depth = "16"
# The compressing attribute for the files exported by the Write
# File node. Only relevant when file_type in 'OpenEXR', 'Sgi', 'Tiff'
compress = True
# The compression format attribute for the specific File Types
# export by the Write File node. You must set compress_mode
# after setting file_type.
compress_mode = "DWAB"
# The frame index mode attribute of the Write File node.
# Value range: `Use Timecode` or `Use Start Frame`
frame_index_mode = "Use Start Frame"
frame_padding = 6
# The versioning mode of the Open Clip exported by the Write File node.
# Only available if create_clip = True.
version_mode = "Follow Iteration"
version_name = "v<version>"
version_padding = 3
# need to make sure the order of keys is correct
return OrderedDict((
("name", name),
("media_path", media_path),
("media_path_pattern", media_path_pattern),
("create_clip", create_clip),
("include_setup", include_setup),
("create_clip_path", create_clip_path),
("include_setup_path", include_setup_path),
("file_type", file_type),
("format_extension", format_extension),
("bit_depth", bit_depth),
("compress", compress),
("compress_mode", compress_mode),
("frame_index_mode", frame_index_mode),
("frame_padding", frame_padding),
("version_mode", version_mode),
("version_name", version_name),
("version_padding", version_padding)
))
def _get_shot_task_dir_path(self, instance, task_data):
project_doc = instance.data["projectEntity"]
asset_entity = instance.data["assetEntity"]
return get_workdir(
project_doc, asset_entity, task_data["name"], "flame")

View file

@ -9,6 +9,8 @@ class ValidateSourceClip(pyblish.api.InstancePlugin):
label = "Validate Source Clip"
hosts = ["flame"]
families = ["clip"]
optional = True
active = False
def process(self, instance):
flame_source_clip = instance.data["flameSourceClip"]

View file

@ -3,18 +3,19 @@ import sys
from Qt import QtWidgets
from pprint import pformat
import atexit
import openpype
import avalon
import openpype.hosts.flame.api as opfapi
from openpype.pipeline import (
install_host,
registered_host,
)
def openpype_install():
"""Registering OpenPype in context
"""
openpype.install()
avalon.api.install(opfapi)
print("Avalon registered hosts: {}".format(
avalon.api.registered_host()))
install_host(opfapi)
print("Registered host: {}".format(registered_host()))
# Exception handler

View file

@ -7,14 +7,14 @@ import logging
import contextlib
import pyblish.api
import avalon.api
from openpype.api import Logger
from openpype.pipeline import (
LegacyCreator,
register_loader_plugin_path,
deregister_loader_plugin_path,
register_creator_plugin_path,
register_inventory_action_path,
deregister_loader_plugin_path,
deregister_creator_plugin_path,
deregister_inventory_action_path,
AVALON_CONTAINER_ID,
)
@ -70,7 +70,7 @@ def install():
log.info("Registering Fusion plug-ins..")
register_loader_plugin_path(LOAD_PATH)
avalon.api.register_plugin_path(LegacyCreator, CREATE_PATH)
register_creator_plugin_path(CREATE_PATH)
register_inventory_action_path(INVENTORY_PATH)
pyblish.api.register_callback(
@ -94,7 +94,7 @@ def uninstall():
log.info("Deregistering Fusion plug-ins..")
deregister_loader_plugin_path(LOAD_PATH)
avalon.api.deregister_plugin_path(LegacyCreator, CREATE_PATH)
deregister_creator_plugin_path(CREATE_PATH)
deregister_inventory_action_path(INVENTORY_PATH)
pyblish.api.deregister_callback(

View file

@ -1,13 +1,13 @@
import os
from openpype.pipeline import create
from openpype.pipeline import LegacyCreator
from openpype.hosts.fusion.api import (
get_current_comp,
comp_lock_and_undo_chunk
)
class CreateOpenEXRSaver(create.LegacyCreator):
class CreateOpenEXRSaver(LegacyCreator):
name = "openexrDefault"
label = "Create OpenEXR Saver"

View file

@ -7,6 +7,10 @@ import logging
import avalon.api
from avalon import io
from openpype.pipeline import (
install_host,
registered_host,
)
from openpype.lib import version_up
from openpype.hosts.fusion import api
from openpype.hosts.fusion.api import lib
@ -218,7 +222,7 @@ def switch(asset_name, filepath=None, new=True):
assert current_comp is not None, (
"Fusion could not load '{}'").format(filepath)
host = avalon.api.registered_host()
host = registered_host()
containers = list(host.ls())
assert containers, "Nothing to update"
@ -279,7 +283,7 @@ if __name__ == '__main__':
args, unknown = parser.parse_args()
avalon.api.install(api)
install_host(api)
switch(args.asset_name, args.file_path)
sys.exit(0)

View file

@ -1,24 +1,23 @@
import os
import sys
import openpype
from openpype.api import Logger
from openpype.pipeline import (
install_host,
registered_host,
)
log = Logger().get_logger(__name__)
def main(env):
import avalon.api
from openpype.hosts.fusion import api
from openpype.hosts.fusion.api import menu
# Registers pype's Global pyblish plugins
openpype.install()
# activate resolve from pype
avalon.api.install(api)
install_host(api)
log.info(f"Avalon registered hosts: {avalon.api.registered_host()}")
log.info(f"Registered host: {registered_host()}")
menu.launch_openpype_menu()

View file

@ -1,14 +1,15 @@
import os
import sys
import glob
import logging
from Qt import QtWidgets, QtCore
import avalon.api
from avalon import io
import qtawesome as qta
from openpype import style
from openpype.pipeline import install_host
from openpype.hosts.fusion import api
from openpype.lib.avalon_context import get_workdir_from_session
@ -181,8 +182,7 @@ class App(QtWidgets.QWidget):
if __name__ == '__main__':
import sys
avalon.api.install(api)
install_host(api)
app = QtWidgets.QApplication(sys.argv)
window = App()

View file

@ -183,10 +183,10 @@ def launch(application_path, *args):
application_path (str): Path to Harmony.
"""
from avalon import api
from openpype.pipeline import install_host
from openpype.hosts.harmony import api as harmony
api.install(harmony)
install_host(harmony)
ProcessContext.port = random.randrange(49152, 65535)
os.environ["AVALON_HARMONY_PORT"] = str(ProcessContext.port)

View file

@ -6,14 +6,14 @@ from bson.objectid import ObjectId
import pyblish.api
from avalon import io
import avalon.api
from openpype import lib
from openpype.lib import register_event_callback
from openpype.pipeline import (
LegacyCreator,
register_loader_plugin_path,
register_creator_plugin_path,
deregister_loader_plugin_path,
deregister_creator_plugin_path,
AVALON_CONTAINER_ID,
)
import openpype.hosts.harmony
@ -108,9 +108,8 @@ def check_inventory():
if not lib.any_outdated():
return
host = avalon.api.registered_host()
outdated_containers = []
for container in host.ls():
for container in ls():
representation = container['representation']
representation_doc = io.find_one(
{
@ -186,7 +185,7 @@ def install():
pyblish.api.register_host("harmony")
pyblish.api.register_plugin_path(PUBLISH_PATH)
register_loader_plugin_path(LOAD_PATH)
avalon.api.register_plugin_path(LegacyCreator, CREATE_PATH)
register_creator_plugin_path(CREATE_PATH)
log.info(PUBLISH_PATH)
# Register callbacks.
@ -200,7 +199,7 @@ def install():
def uninstall():
pyblish.api.deregister_plugin_path(PUBLISH_PATH)
deregister_loader_plugin_path(LOAD_PATH)
avalon.api.deregister_plugin_path(LegacyCreator, CREATE_PATH)
deregister_creator_plugin_path(CREATE_PATH)
def on_pyblish_instance_toggled(instance, old_value, new_value):

View file

@ -3,6 +3,8 @@
import pyblish.api
import os
from openpype.lib import get_subset_name_with_asset_doc
class CollectWorkfile(pyblish.api.ContextPlugin):
"""Collect current script for publish."""
@ -14,10 +16,15 @@ class CollectWorkfile(pyblish.api.ContextPlugin):
def process(self, context):
"""Plugin entry point."""
family = "workfile"
task = os.getenv("AVALON_TASK", None)
sanitized_task_name = task[0].upper() + task[1:]
basename = os.path.basename(context.data["currentFile"])
subset = "{}{}".format(family, sanitized_task_name)
subset = get_subset_name_with_asset_doc(
family,
"",
context.data["anatomyData"]["task"]["name"],
context.data["assetEntity"],
context.data["anatomyData"]["project"]["name"],
host_name=context.data["hostName"]
)
# Create instance
instance = context.create_instance(subset)

View file

@ -10,7 +10,7 @@ def add_implementation_envs(env, _app):
]
old_hiero_path = env.get("HIERO_PLUGIN_PATH") or ""
for path in old_hiero_path.split(os.pathsep):
if not path or not os.path.exists(path):
if not path:
continue
norm_path = os.path.normpath(path)

View file

@ -5,13 +5,13 @@ import os
import contextlib
from collections import OrderedDict
from avalon import api as avalon
from avalon import schema
from pyblish import api as pyblish
from openpype.api import Logger
from openpype.pipeline import (
LegacyCreator,
register_creator_plugin_path,
register_loader_plugin_path,
deregister_creator_plugin_path,
deregister_loader_plugin_path,
AVALON_CONTAINER_ID,
)
@ -34,14 +34,7 @@ AVALON_CONTAINERS = ":AVALON_CONTAINERS"
def install():
"""
Installing Hiero integration for avalon
Args:
config (obj): avalon config module `pype` in our case, it is not
used but required by avalon.api.install()
"""
"""Installing Hiero integration."""
# adding all events
events.register_events()
@ -50,7 +43,7 @@ def install():
pyblish.register_host("hiero")
pyblish.register_plugin_path(PUBLISH_PATH)
register_loader_plugin_path(LOAD_PATH)
avalon.register_plugin_path(LegacyCreator, CREATE_PATH)
register_creator_plugin_path(CREATE_PATH)
# register callback for switching publishable
pyblish.register_callback("instanceToggled", on_pyblish_instance_toggled)
@ -71,7 +64,7 @@ def uninstall():
pyblish.deregister_host("hiero")
pyblish.deregister_plugin_path(PUBLISH_PATH)
deregister_loader_plugin_path(LOAD_PATH)
avalon.deregister_plugin_path(LegacyCreator, CREATE_PATH)
deregister_creator_plugin_path(CREATE_PATH)
# register callback for switching publishable
pyblish.deregister_callback("instanceToggled", on_pyblish_instance_toggled)

View file

@ -1,9 +1,9 @@
import traceback
# activate hiero from pype
import avalon.api
from openpype.pipeline import install_host
import openpype.hosts.hiero.api as phiero
avalon.api.install(phiero)
install_host(phiero)
try:
__import__("openpype.hosts.hiero.api")

View file

@ -15,7 +15,7 @@ def add_implementation_envs(env, _app):
old_houdini_menu_path = env.get("HOUDINI_MENU_PATH") or ""
for path in old_houdini_path.split(os.pathsep):
if not path or not os.path.exists(path):
if not path:
continue
norm_path = os.path.normpath(path)
@ -23,7 +23,7 @@ def add_implementation_envs(env, _app):
new_houdini_path.append(norm_path)
for path in old_houdini_menu_path.split(os.pathsep):
if not path or not os.path.exists(path):
if not path:
continue
norm_path = os.path.normpath(path)

View file

@ -4,14 +4,11 @@ import logging
import contextlib
import hou
import hdefereval
import pyblish.api
import avalon.api
from avalon.lib import find_submodule
from openpype.pipeline import (
LegacyCreator,
register_creator_plugin_path,
register_loader_plugin_path,
AVALON_CONTAINER_ID,
)
@ -54,7 +51,7 @@ def install():
pyblish.api.register_plugin_path(PUBLISH_PATH)
register_loader_plugin_path(LOAD_PATH)
avalon.api.register_plugin_path(LegacyCreator, CREATE_PATH)
register_creator_plugin_path(CREATE_PATH)
log.info("Installing callbacks ... ")
# register_event_callback("init", on_init)
@ -215,24 +212,12 @@ def ls():
"pyblish.mindbender.container"):
containers += lib.lsattr("id", identifier)
has_metadata_collector = False
config_host = find_submodule(avalon.api.registered_config(), "houdini")
if hasattr(config_host, "collect_container_metadata"):
has_metadata_collector = True
for container in sorted(containers,
# Hou 19+ Python 3 hou.ObjNode are not
# sortable due to not supporting greater
# than comparisons
key=lambda node: node.path()):
data = parse_container(container)
# Collect custom data if attribute is present
if has_metadata_collector:
metadata = config_host.collect_container_metadata(container)
data.update(metadata)
yield data
yield parse_container(container)
def before_save():
@ -305,7 +290,13 @@ def on_new():
start = hou.playbar.playbackRange()[0]
hou.setFrame(start)
hdefereval.executeDeferred(_enforce_start_frame)
if hou.isUIAvailable():
import hdefereval
hdefereval.executeDeferred(_enforce_start_frame)
else:
# Run without execute deferred when no UI is available because
# without UI `hdefereval` is not available to import
_enforce_start_frame()
def _set_context_settings():

View file

@ -1,6 +1,7 @@
import avalon.api as api
import pyblish.api
from openpype.pipeline import registered_host
def collect_input_containers(nodes):
"""Collect containers that contain any of the node in `nodes`.
@ -18,7 +19,7 @@ def collect_input_containers(nodes):
lookup = frozenset(nodes)
containers = []
host = api.registered_host()
host = registered_host()
for container in host.ls():
node = container["node"]

View file

@ -1,8 +1,8 @@
import pyblish.api
import avalon.api
from openpype.api import version_up
from openpype.action import get_errored_plugins_from_data
from openpype.pipeline import registered_host
class IncrementCurrentFile(pyblish.api.InstancePlugin):
@ -41,7 +41,7 @@ class IncrementCurrentFile(pyblish.api.InstancePlugin):
)
# Filename must not have changed since collecting
host = avalon.api.registered_host()
host = registered_host()
current_file = host.current_file()
assert (
context.data["currentFile"] == current_file

View file

@ -1,5 +1,6 @@
import pyblish.api
import avalon.api
from openpype.pipeline import registered_host
class SaveCurrentScene(pyblish.api.ContextPlugin):
@ -12,7 +13,7 @@ class SaveCurrentScene(pyblish.api.ContextPlugin):
def process(self, context):
# Filename must not have changed since collecting
host = avalon.api.registered_host()
host = registered_host()
current_file = host.current_file()
assert context.data['currentFile'] == current_file, (
"Collected filename from current scene name."

View file

@ -1,10 +1,10 @@
import avalon.api
from openpype.pipeline import install_host
from openpype.hosts.houdini import api
def main():
print("Installing OpenPype ...")
avalon.api.install(api)
install_host(api)
main()

View file

@ -1,10 +1,10 @@
import avalon.api
from openpype.pipeline import install_host
from openpype.hosts.houdini import api
def main():
print("Installing OpenPype ...")
avalon.api.install(api)
install_host(api)
main()

View file

@ -134,6 +134,7 @@ class AvalonURIOutputProcessor(base.OutputProcessorBase):
"""
from avalon import api, io
from openpype.pipeline import registered_root
PROJECT = api.Session["AVALON_PROJECT"]
asset_doc = io.find_one({"name": asset,
@ -141,7 +142,7 @@ class AvalonURIOutputProcessor(base.OutputProcessorBase):
if not asset_doc:
raise RuntimeError("Invalid asset name: '%s'" % asset)
root = api.registered_root()
root = registered_root()
path = self._template.format(**{
"root": root,
"project": PROJECT,

View file

@ -9,7 +9,7 @@ def add_implementation_envs(env, _app):
]
old_python_path = env.get("PYTHONPATH") or ""
for path in old_python_path.split(os.pathsep):
if not path or not os.path.exists(path):
if not path:
continue
norm_path = os.path.normpath(path)

View file

@ -0,0 +1,202 @@
# -*- coding: utf-8 -*-
"""Tools to work with FBX."""
import logging
from pyblish.api import Instance
from maya import cmds # noqa
import maya.mel as mel # noqa
class FBXExtractor:
"""Extract FBX from Maya.
This extracts reproducible FBX exports ignoring any of the settings set
on the local machine in the FBX export options window.
All export settings are applied with the `FBXExport*` commands prior
to the `FBXExport` call itself. The options can be overridden with
their
nice names as seen in the "options" property on this class.
For more information on FBX exports see:
- https://knowledge.autodesk.com/support/maya/learn-explore/caas
/CloudHelp/cloudhelp/2016/ENU/Maya/files/GUID-6CCE943A-2ED4-4CEE-96D4
-9CB19C28F4E0-htm.html
- http://forums.cgsociety.org/archive/index.php?t-1032853.html
- https://groups.google.com/forum/#!msg/python_inside_maya/cLkaSo361oE
/LKs9hakE28kJ
"""
@property
def options(self):
"""Overridable options for FBX Export
Given in the following format
- {NAME: EXPECTED TYPE}
If the overridden option's type does not match,
the option is not included and a warning is logged.
"""
return {
"cameras": bool,
"smoothingGroups": bool,
"hardEdges": bool,
"tangents": bool,
"smoothMesh": bool,
"instances": bool,
# "referencedContainersContent": bool, # deprecated in Maya 2016+
"bakeComplexAnimation": int,
"bakeComplexStart": int,
"bakeComplexEnd": int,
"bakeComplexStep": int,
"bakeResampleAnimation": bool,
"animationOnly": bool,
"useSceneName": bool,
"quaternion": str, # "euler"
"shapes": bool,
"skins": bool,
"constraints": bool,
"lights": bool,
"embeddedTextures": bool,
"inputConnections": bool,
"upAxis": str, # x, y or z,
"triangulate": bool
}
@property
def default_options(self):
"""The default options for FBX extraction.
This includes shapes, skins, constraints, lights and incoming
connections and exports with the Y-axis as up-axis.
By default this uses the time sliders start and end time.
"""
start_frame = int(cmds.playbackOptions(query=True,
animationStartTime=True))
end_frame = int(cmds.playbackOptions(query=True,
animationEndTime=True))
return {
"cameras": False,
"smoothingGroups": True,
"hardEdges": False,
"tangents": False,
"smoothMesh": True,
"instances": False,
"bakeComplexAnimation": True,
"bakeComplexStart": start_frame,
"bakeComplexEnd": end_frame,
"bakeComplexStep": 1,
"bakeResampleAnimation": True,
"animationOnly": False,
"useSceneName": False,
"quaternion": "euler",
"shapes": True,
"skins": True,
"constraints": False,
"lights": True,
"embeddedTextures": False,
"inputConnections": True,
"upAxis": "y",
"triangulate": False
}
def __init__(self, log=None):
# Ensure FBX plug-in is loaded
self.log = log or logging.getLogger(self.__class__.__name__)
cmds.loadPlugin("fbxmaya", quiet=True)
def parse_overrides(self, instance, options):
"""Inspect data of instance to determine overridden options
An instance may supply any of the overridable options
as data, the option is then added to the extraction.
"""
for key in instance.data:
if key not in self.options:
continue
# Ensure the data is of correct type
value = instance.data[key]
if not isinstance(value, self.options[key]):
self.log.warning(
"Overridden attribute {key} was of "
"the wrong type: {invalid_type} "
"- should have been {valid_type}".format(
key=key,
invalid_type=type(value).__name__,
valid_type=self.options[key].__name__))
continue
options[key] = value
return options
def set_options_from_instance(self, instance):
# type: (Instance) -> None
"""Sets FBX export options from data in the instance.
Args:
instance (Instance): Instance data.
"""
# Parse export options
options = self.default_options
options = self.parse_overrides(instance, options)
self.log.info("Export options: {0}".format(options))
# Collect the start and end including handles
start = instance.data.get("frameStartHandle") or \
instance.context.data.get("frameStartHandle")
end = instance.data.get("frameEndHandle") or \
instance.context.data.get("frameEndHandle")
options['bakeComplexStart'] = start
options['bakeComplexEnd'] = end
# First apply the default export settings to be fully consistent
# each time for successive publishes
mel.eval("FBXResetExport")
# Apply the FBX overrides through MEL since the commands
# only work correctly in MEL according to online
# available discussions on the topic
_iteritems = getattr(options, "iteritems", options.items)
for option, value in _iteritems():
key = option[0].upper() + option[1:] # uppercase first letter
# Boolean must be passed as lower-case strings
# as to MEL standards
if isinstance(value, bool):
value = str(value).lower()
template = "FBXExport{0} {1}" if key == "UpAxis" else \
"FBXExport{0} -v {1}" # noqa
cmd = template.format(key, value)
self.log.info(cmd)
mel.eval(cmd)
# Never show the UI or generate a log
mel.eval("FBXExportShowUI -v false")
mel.eval("FBXExportGenerateLog -v false")
@staticmethod
def export(members, path):
# type: (list, str) -> None
"""Export members as FBX with given path.
Args:
members (list): List of members to export.
path (str): Path to use for export.
"""
cmds.select(members, r=True, noExpand=True)
mel.eval('FBXExport -f "{}" -s'.format(path))

View file

@ -26,6 +26,7 @@ from openpype.pipeline import (
loaders_from_representation,
get_representation_path,
load_container,
registered_host,
)
from .commands import reset_frame_range
@ -1574,7 +1575,7 @@ def assign_look_by_version(nodes, version_id):
"name": "json"})
# See if representation is already loaded, if so reuse it.
host = api.registered_host()
host = registered_host()
representation_id = str(look_representation['_id'])
for container in host.ls():
if (container['loader'] == "LookLoader" and
@ -2612,7 +2613,7 @@ def get_attr_in_layer(attr, layer):
def fix_incompatible_containers():
"""Backwards compatibility: old containers to use new ReferenceLoader"""
host = api.registered_host()
host = registered_host()
for container in host.ls():
loader = container['loader']
@ -3138,11 +3139,20 @@ def set_colorspace():
@contextlib.contextmanager
def root_parent(nodes):
# type: (list) -> list
def parent_nodes(nodes, parent=None):
# type: (list, str) -> list
"""Context manager to un-parent provided nodes and return them back."""
import pymel.core as pm # noqa
parent_node = None
delete_parent = False
if parent:
if not cmds.objExists(parent):
parent_node = pm.createNode("transform", n=parent, ss=False)
delete_parent = True
else:
parent_node = pm.PyNode(parent)
node_parents = []
for node in nodes:
n = pm.PyNode(node)
@ -3153,9 +3163,14 @@ def root_parent(nodes):
node_parents.append((n, root))
try:
for node in node_parents:
node[0].setParent(world=True)
if not parent:
node[0].setParent(world=True)
else:
node[0].setParent(parent_node)
yield
finally:
for node in node_parents:
if node[1]:
node[0].setParent(node[1])
if delete_parent:
pm.delete(parent_node)

View file

@ -9,8 +9,6 @@ import maya.api.OpenMaya as om
import pyblish.api
import avalon.api
from avalon.lib import find_submodule
import openpype.hosts.maya
from openpype.tools.utils import host_tools
from openpype.lib import (
@ -20,11 +18,12 @@ from openpype.lib import (
)
from openpype.lib.path_tools import HostDirmap
from openpype.pipeline import (
LegacyCreator,
register_loader_plugin_path,
register_inventory_action_path,
register_creator_plugin_path,
deregister_loader_plugin_path,
deregister_inventory_action_path,
deregister_creator_plugin_path,
AVALON_CONTAINER_ID,
)
from openpype.hosts.maya.lib import copy_workspace_mel
@ -60,7 +59,7 @@ def install():
pyblish.api.register_host("maya")
register_loader_plugin_path(LOAD_PATH)
avalon.api.register_plugin_path(LegacyCreator, CREATE_PATH)
register_creator_plugin_path(CREATE_PATH)
register_inventory_action_path(INVENTORY_PATH)
log.info(PUBLISH_PATH)
@ -189,7 +188,7 @@ def uninstall():
pyblish.api.deregister_host("maya")
deregister_loader_plugin_path(LOAD_PATH)
avalon.api.deregister_plugin_path(LegacyCreator, CREATE_PATH)
deregister_creator_plugin_path(CREATE_PATH)
deregister_inventory_action_path(INVENTORY_PATH)
menu.uninstall()
@ -268,21 +267,8 @@ def ls():
"""
container_names = _ls()
has_metadata_collector = False
config_host = find_submodule(avalon.api.registered_config(), "maya")
if hasattr(config_host, "collect_container_metadata"):
has_metadata_collector = True
for container in sorted(container_names):
data = parse_container(container)
# Collect custom data if attribute is present
if has_metadata_collector:
metadata = config_host.collect_container_metadata(container)
data.update(metadata)
yield data
yield parse_container(container)
def containerise(name,

View file

@ -22,4 +22,6 @@ class CreateLook(plugin.Creator):
self.data["maketx"] = self.make_tx
# Enable users to force a copy.
# - on Windows is "forceCopy" always changed to `True` because of
# windows implementation of hardlinks
self.data["forceCopy"] = False

View file

@ -0,0 +1,51 @@
from openpype.hosts.maya.api import plugin, lib
class CreateMultiverseUsd(plugin.Creator):
"""Multiverse USD data"""
name = "usdMain"
label = "Multiverse USD"
family = "usd"
icon = "cubes"
def __init__(self, *args, **kwargs):
super(CreateMultiverseUsd, self).__init__(*args, **kwargs)
# Add animation data first, since it maintains order.
self.data.update(lib.collect_animation_data(True))
self.data["stripNamespaces"] = False
self.data["mergeTransformAndShape"] = False
self.data["writeAncestors"] = True
self.data["flattenParentXforms"] = False
self.data["writeSparseOverrides"] = False
self.data["useMetaPrimPath"] = False
self.data["customRootPath"] = ''
self.data["customAttributes"] = ''
self.data["nodeTypesToIgnore"] = ''
self.data["writeMeshes"] = True
self.data["writeCurves"] = True
self.data["writeParticles"] = True
self.data["writeCameras"] = False
self.data["writeLights"] = False
self.data["writeJoints"] = False
self.data["writeCollections"] = False
self.data["writePositions"] = True
self.data["writeNormals"] = True
self.data["writeUVs"] = True
self.data["writeColorSets"] = False
self.data["writeTangents"] = False
self.data["writeRefPositions"] = False
self.data["writeBlendShapes"] = False
self.data["writeDisplayColor"] = False
self.data["writeSkinWeights"] = False
self.data["writeMaterialAssignment"] = False
self.data["writeHardwareShader"] = False
self.data["writeShadingNetworks"] = False
self.data["writeTransformMatrix"] = True
self.data["writeUsdAttributes"] = False
self.data["timeVaryingTopology"] = False
self.data["customMaterialNamespace"] = ''
self.data["numTimeSamples"] = 1
self.data["timeSamplesSpan"] = 0.0

View file

@ -0,0 +1,23 @@
from openpype.hosts.maya.api import plugin, lib
class CreateMultiverseUsdComp(plugin.Creator):
"""Create Multiverse USD Composition"""
name = "usdCompositionMain"
label = "Multiverse USD Composition"
family = "usdComposition"
icon = "cubes"
def __init__(self, *args, **kwargs):
super(CreateMultiverseUsdComp, self).__init__(*args, **kwargs)
# Add animation data first, since it maintains order.
self.data.update(lib.collect_animation_data(True))
self.data["stripNamespaces"] = False
self.data["mergeTransformAndShape"] = False
self.data["flattenContent"] = False
self.data["writePendingOverrides"] = False
self.data["numTimeSamples"] = 1
self.data["timeSamplesSpan"] = 0.0

View file

@ -0,0 +1,28 @@
from openpype.hosts.maya.api import plugin, lib
class CreateMultiverseUsdOver(plugin.Creator):
"""Multiverse USD data"""
name = "usdOverrideMain"
label = "Multiverse USD Override"
family = "usdOverride"
icon = "cubes"
def __init__(self, *args, **kwargs):
super(CreateMultiverseUsdOver, self).__init__(*args, **kwargs)
# Add animation data first, since it maintains order.
self.data.update(lib.collect_animation_data(True))
self.data["writeAll"] = False
self.data["writeTransforms"] = True
self.data["writeVisibility"] = True
self.data["writeAttributes"] = True
self.data["writeMaterials"] = True
self.data["writeVariants"] = True
self.data["writeVariantsDefinition"] = True
self.data["writeActiveState"] = True
self.data["writeNamespaces"] = False
self.data["numTimeSamples"] = 1
self.data["timeSamplesSpan"] = 0.0

View file

@ -4,8 +4,6 @@ import os
import json
import appdirs
import requests
import six
import sys
from maya import cmds
import maya.app.renderSetup.model.renderSetup as renderSetup
@ -14,6 +12,7 @@ from openpype.hosts.maya.api import (
lib,
plugin
)
from openpype.lib import requests_get
from openpype.api import (
get_system_settings,
get_project_settings,
@ -117,6 +116,8 @@ class CreateRender(plugin.Creator):
except KeyError:
self.aov_separator = "_"
manager = ModulesManager()
self.deadline_module = manager.modules_by_name["deadline"]
try:
default_servers = deadline_settings["deadline_urls"]
project_servers = (
@ -133,10 +134,8 @@ class CreateRender(plugin.Creator):
except AttributeError:
# Handle situation were we had only one url for deadline.
manager = ModulesManager()
deadline_module = manager.modules_by_name["deadline"]
# get default deadline webservice url from deadline module
self.deadline_servers = deadline_module.deadline_urls
self.deadline_servers = self.deadline_module.deadline_urls
def process(self):
"""Entry point."""
@ -205,53 +204,37 @@ class CreateRender(plugin.Creator):
def _deadline_webservice_changed(self):
"""Refresh Deadline server dependent options."""
# get selected server
from maya import cmds
webservice = self.deadline_servers[
self.server_aliases[
cmds.getAttr("{}.deadlineServers".format(self.instance))
]
]
pools = self._get_deadline_pools(webservice)
pools = self.deadline_module.get_deadline_pools(webservice, self.log)
cmds.deleteAttr("{}.primaryPool".format(self.instance))
cmds.deleteAttr("{}.secondaryPool".format(self.instance))
pool_setting = (self._project_settings["deadline"]
["publish"]
["CollectDeadlinePools"])
primary_pool = pool_setting["primary_pool"]
sorted_pools = self._set_default_pool(list(pools), primary_pool)
cmds.addAttr(self.instance, longName="primaryPool",
attributeType="enum",
enumName=":".join(pools))
cmds.addAttr(self.instance, longName="secondaryPool",
enumName=":".join(sorted_pools))
pools = ["-"] + pools
secondary_pool = pool_setting["secondary_pool"]
sorted_pools = self._set_default_pool(list(pools), secondary_pool)
cmds.addAttr("{}.secondaryPool".format(self.instance),
attributeType="enum",
enumName=":".join(["-"] + pools))
def _get_deadline_pools(self, webservice):
# type: (str) -> list
"""Get pools from Deadline.
Args:
webservice (str): Server url.
Returns:
list: Pools.
Throws:
RuntimeError: If deadline webservice is unreachable.
"""
argument = "{}/api/pools?NamesOnly=true".format(webservice)
try:
response = self._requests_get(argument)
except requests.exceptions.ConnectionError as exc:
msg = 'Cannot connect to deadline web service'
self.log.error(msg)
six.reraise(
RuntimeError,
RuntimeError('{} - {}'.format(msg, exc)),
sys.exc_info()[2])
if not response.ok:
self.log.warning("No pools retrieved")
return []
return response.json()
enumName=":".join(sorted_pools))
def _create_render_settings(self):
"""Create instance settings."""
# get pools
pool_names = []
default_priority = 50
self.server_aliases = list(self.deadline_servers.keys())
self.data["deadlineServers"] = self.server_aliases
@ -260,7 +243,8 @@ class CreateRender(plugin.Creator):
self.data["extendFrames"] = False
self.data["overrideExistingFrame"] = True
# self.data["useLegacyRenderLayers"] = True
self.data["priority"] = 50
self.data["priority"] = default_priority
self.data["tile_priority"] = default_priority
self.data["framesPerTask"] = 1
self.data["whitelist"] = False
self.data["machineList"] = ""
@ -293,7 +277,18 @@ class CreateRender(plugin.Creator):
# use first one for initial list of pools.
deadline_url = next(iter(self.deadline_servers.values()))
pool_names = self._get_deadline_pools(deadline_url)
pool_names = self.deadline_module.get_deadline_pools(deadline_url,
self.log)
maya_submit_dl = self._project_settings.get(
"deadline", {}).get(
"publish", {}).get(
"MayaSubmitDeadline", {})
priority = maya_submit_dl.get("priority", default_priority)
self.data["priority"] = priority
tile_priority = maya_submit_dl.get("tile_priority",
default_priority)
self.data["tile_priority"] = tile_priority
if muster_enabled:
self.log.info(">>> Loading Muster credentials ...")
@ -314,12 +309,27 @@ class CreateRender(plugin.Creator):
self.log.info(" - pool: {}".format(pool["name"]))
pool_names.append(pool["name"])
self.data["primaryPool"] = pool_names
pool_setting = (self._project_settings["deadline"]
["publish"]
["CollectDeadlinePools"])
primary_pool = pool_setting["primary_pool"]
self.data["primaryPool"] = self._set_default_pool(pool_names,
primary_pool)
# We add a string "-" to allow the user to not
# set any secondary pools
self.data["secondaryPool"] = ["-"] + pool_names
pool_names = ["-"] + pool_names
secondary_pool = pool_setting["secondary_pool"]
self.data["secondaryPool"] = self._set_default_pool(pool_names,
secondary_pool)
self.options = {"useSelection": False} # Force no content
def _set_default_pool(self, pool_names, pool_value):
"""Reorder pool names, default should come first"""
if pool_value and pool_value in pool_names:
pool_names.remove(pool_value)
pool_names = [pool_value] + pool_names
return pool_names
def _load_credentials(self):
"""Load Muster credentials.
@ -354,7 +364,7 @@ class CreateRender(plugin.Creator):
"""
params = {"authToken": self._token}
api_entry = "/api/pools/list"
response = self._requests_get(self.MUSTER_REST_URL + api_entry,
response = requests_get(self.MUSTER_REST_URL + api_entry,
params=params)
if response.status_code != 200:
if response.status_code == 401:
@ -380,45 +390,11 @@ class CreateRender(plugin.Creator):
api_url = "{}/muster/show_login".format(
os.environ["OPENPYPE_WEBSERVER_URL"])
self.log.debug(api_url)
login_response = self._requests_get(api_url, timeout=1)
login_response = requests_get(api_url, timeout=1)
if login_response.status_code != 200:
self.log.error("Cannot show login form to Muster")
raise Exception("Cannot show login form to Muster")
def _requests_post(self, *args, **kwargs):
"""Wrap request post method.
Disabling SSL certificate validation if ``DONT_VERIFY_SSL`` environment
variable is found. This is useful when Deadline or Muster server are
running with self-signed certificates and their certificate is not
added to trusted certificates on client machines.
Warning:
Disabling SSL certificate validation is defeating one line
of defense SSL is providing and it is not recommended.
"""
if "verify" not in kwargs:
kwargs["verify"] = not os.getenv("OPENPYPE_DONT_VERIFY_SSL", True)
return requests.post(*args, **kwargs)
def _requests_get(self, *args, **kwargs):
"""Wrap request get method.
Disabling SSL certificate validation if ``DONT_VERIFY_SSL`` environment
variable is found. This is useful when Deadline or Muster server are
running with self-signed certificates and their certificate is not
added to trusted certificates on client machines.
Warning:
Disabling SSL certificate validation is defeating one line
of defense SSL is providing and it is not recommended.
"""
if "verify" not in kwargs:
kwargs["verify"] = not os.getenv("OPENPYPE_DONT_VERIFY_SSL", True)
return requests.get(*args, **kwargs)
def _set_default_renderer_settings(self, renderer):
"""Set basic settings based on renderer.

View file

@ -15,6 +15,14 @@ class CreateReview(plugin.Creator):
keepImages = False
isolate = False
imagePlane = True
transparency = [
"preset",
"simple",
"object sorting",
"weighted average",
"depth peeling",
"alpha cut"
]
def __init__(self, *args, **kwargs):
super(CreateReview, self).__init__(*args, **kwargs)
@ -28,5 +36,6 @@ class CreateReview(plugin.Creator):
data["isolate"] = self.isolate
data["keepImages"] = self.keepImages
data["imagePlane"] = self.imagePlane
data["transparency"] = self.transparency
self.data = data

View file

@ -0,0 +1,50 @@
# -*- coding: utf-8 -*-
"""Creator for Unreal Skeletal Meshes."""
from openpype.hosts.maya.api import plugin, lib
from avalon.api import Session
from maya import cmds # noqa
class CreateUnrealSkeletalMesh(plugin.Creator):
"""Unreal Static Meshes with collisions."""
name = "staticMeshMain"
label = "Unreal - Skeletal Mesh"
family = "skeletalMesh"
icon = "thumbs-up"
dynamic_subset_keys = ["asset"]
joint_hints = []
def __init__(self, *args, **kwargs):
"""Constructor."""
super(CreateUnrealSkeletalMesh, self).__init__(*args, **kwargs)
@classmethod
def get_dynamic_data(
cls, variant, task_name, asset_id, project_name, host_name
):
dynamic_data = super(CreateUnrealSkeletalMesh, cls).get_dynamic_data(
variant, task_name, asset_id, project_name, host_name
)
dynamic_data["asset"] = Session.get("AVALON_ASSET")
return dynamic_data
def process(self):
self.name = "{}_{}".format(self.family, self.name)
with lib.undo_chunk():
instance = super(CreateUnrealSkeletalMesh, self).process()
content = cmds.sets(instance, query=True)
# empty set and process its former content
cmds.sets(content, rm=instance)
geometry_set = cmds.sets(name="geometry_SET", empty=True)
joints_set = cmds.sets(name="joints_SET", empty=True)
cmds.sets([geometry_set, joints_set], forceElement=instance)
members = cmds.ls(content) or []
for node in members:
if node in self.joint_hints:
cmds.sets(node, forceElement=joints_set)
else:
cmds.sets(node, forceElement=geometry_set)

View file

@ -10,7 +10,7 @@ class CreateUnrealStaticMesh(plugin.Creator):
"""Unreal Static Meshes with collisions."""
name = "staticMeshMain"
label = "Unreal - Static Mesh"
family = "unrealStaticMesh"
family = "staticMesh"
icon = "cube"
dynamic_subset_keys = ["asset"]
@ -28,10 +28,10 @@ class CreateUnrealStaticMesh(plugin.Creator):
variant, task_name, asset_id, project_name, host_name
)
dynamic_data["asset"] = Session.get("AVALON_ASSET")
return dynamic_data
def process(self):
self.name = "{}_{}".format(self.family, self.name)
with lib.undo_chunk():
instance = super(CreateUnrealStaticMesh, self).process()
content = cmds.sets(instance, query=True)

View file

@ -4,8 +4,6 @@ import os
import json
import appdirs
import requests
import six
import sys
from maya import cmds
import maya.app.renderSetup.model.renderSetup as renderSetup
@ -19,6 +17,7 @@ from openpype.api import (
get_project_settings
)
from openpype.lib import requests_get
from openpype.pipeline import CreatorError
from openpype.modules import ModulesManager
@ -40,6 +39,10 @@ class CreateVRayScene(plugin.Creator):
self._rs = renderSetup.instance()
self.data["exportOnFarm"] = False
deadline_settings = get_system_settings()["modules"]["deadline"]
manager = ModulesManager()
self.deadline_module = manager.modules_by_name["deadline"]
if not deadline_settings["enabled"]:
self.deadline_servers = {}
return
@ -62,10 +65,8 @@ class CreateVRayScene(plugin.Creator):
except AttributeError:
# Handle situation were we had only one url for deadline.
manager = ModulesManager()
deadline_module = manager.modules_by_name["deadline"]
# get default deadline webservice url from deadline module
self.deadline_servers = deadline_module.deadline_urls
self.deadline_servers = self.deadline_module.deadline_urls
def process(self):
"""Entry point."""
@ -128,7 +129,7 @@ class CreateVRayScene(plugin.Creator):
cmds.getAttr("{}.deadlineServers".format(self.instance))
]
]
pools = self._get_deadline_pools(webservice)
pools = self.deadline_module.get_deadline_pools(webservice)
cmds.deleteAttr("{}.primaryPool".format(self.instance))
cmds.deleteAttr("{}.secondaryPool".format(self.instance))
cmds.addAttr(self.instance, longName="primaryPool",
@ -138,33 +139,6 @@ class CreateVRayScene(plugin.Creator):
attributeType="enum",
enumName=":".join(["-"] + pools))
def _get_deadline_pools(self, webservice):
# type: (str) -> list
"""Get pools from Deadline.
Args:
webservice (str): Server url.
Returns:
list: Pools.
Throws:
RuntimeError: If deadline webservice is unreachable.
"""
argument = "{}/api/pools?NamesOnly=true".format(webservice)
try:
response = self._requests_get(argument)
except requests.exceptions.ConnectionError as exc:
msg = 'Cannot connect to deadline web service'
self.log.error(msg)
six.reraise(
CreatorError,
CreatorError('{} - {}'.format(msg, exc)),
sys.exc_info()[2])
if not response.ok:
self.log.warning("No pools retrieved")
return []
return response.json()
def _create_vray_instance_settings(self):
# get pools
pools = []
@ -195,7 +169,7 @@ class CreateVRayScene(plugin.Creator):
for k in self.deadline_servers.keys()
][0]
pool_names = self._get_deadline_pools(deadline_url)
pool_names = self.deadline_module.get_deadline_pools(deadline_url)
if muster_enabled:
self.log.info(">>> Loading Muster credentials ...")
@ -259,8 +233,8 @@ class CreateVRayScene(plugin.Creator):
"""
params = {"authToken": self._token}
api_entry = "/api/pools/list"
response = self._requests_get(self.MUSTER_REST_URL + api_entry,
params=params)
response = requests_get(self.MUSTER_REST_URL + api_entry,
params=params)
if response.status_code != 200:
if response.status_code == 401:
self.log.warning("Authentication token expired.")
@ -285,45 +259,7 @@ class CreateVRayScene(plugin.Creator):
api_url = "{}/muster/show_login".format(
os.environ["OPENPYPE_WEBSERVER_URL"])
self.log.debug(api_url)
login_response = self._requests_get(api_url, timeout=1)
login_response = requests_get(api_url, timeout=1)
if login_response.status_code != 200:
self.log.error("Cannot show login form to Muster")
raise CreatorError("Cannot show login form to Muster")
def _requests_post(self, *args, **kwargs):
"""Wrap request post method.
Disabling SSL certificate validation if ``DONT_VERIFY_SSL`` environment
variable is found. This is useful when Deadline or Muster server are
running with self-signed certificates and their certificate is not
added to trusted certificates on client machines.
Warning:
Disabling SSL certificate validation is defeating one line
of defense SSL is providing and it is not recommended.
"""
if "verify" not in kwargs:
kwargs["verify"] = (
False if os.getenv("OPENPYPE_DONT_VERIFY_SSL", True) else True
) # noqa
return requests.post(*args, **kwargs)
def _requests_get(self, *args, **kwargs):
"""Wrap request get method.
Disabling SSL certificate validation if ``DONT_VERIFY_SSL`` environment
variable is found. This is useful when Deadline or Muster server are
running with self-signed certificates and their certificate is not
added to trusted certificates on client machines.
Warning:
Disabling SSL certificate validation is defeating one line
of defense SSL is providing and it is not recommended.
"""
if "verify" not in kwargs:
kwargs["verify"] = (
False if os.getenv("OPENPYPE_DONT_VERIFY_SSL", True) else True
) # noqa
return requests.get(*args, **kwargs)

View file

@ -4,7 +4,6 @@ from bson.objectid import ObjectId
from openpype.pipeline import (
InventoryAction,
get_representation_context,
get_representation_path_from_context,
)
from openpype.hosts.maya.api.lib import (
maintained_selection,
@ -80,10 +79,10 @@ class ImportModelRender(InventoryAction):
})
context = get_representation_context(look_repr["_id"])
maya_file = get_representation_path_from_context(context)
maya_file = self.filepath_from_context(context)
context = get_representation_context(json_repr["_id"])
json_file = get_representation_path_from_context(context)
json_file = self.filepath_from_context(context)
# Import the look file
with maintained_selection():

View file

@ -0,0 +1,102 @@
# -*- coding: utf-8 -*-
import maya.cmds as cmds
from openpype.pipeline import (
load,
get_representation_path
)
from openpype.hosts.maya.api.lib import (
maintained_selection,
namespaced,
unique_namespace
)
from openpype.hosts.maya.api.pipeline import containerise
class MultiverseUsdLoader(load.LoaderPlugin):
"""Load the USD by Multiverse"""
families = ["model", "usd", "usdComposition", "usdOverride",
"pointcache", "animation"]
representations = ["usd", "usda", "usdc", "usdz", "abc"]
label = "Read USD by Multiverse"
order = -10
icon = "code-fork"
color = "orange"
def load(self, context, name=None, namespace=None, options=None):
asset = context['asset']['name']
namespace = namespace or unique_namespace(
asset + "_",
prefix="_" if asset[0].isdigit() else "",
suffix="_",
)
# Create the shape
cmds.loadPlugin("MultiverseForMaya", quiet=True)
shape = None
transform = None
with maintained_selection():
cmds.namespace(addNamespace=namespace)
with namespaced(namespace, new=False):
import multiverse
shape = multiverse.CreateUsdCompound(self.fname)
transform = cmds.listRelatives(
shape, parent=True, fullPath=True)[0]
# Lock the shape node so the user cannot delete it.
cmds.lockNode(shape, lock=True)
nodes = [transform, shape]
self[:] = nodes
return containerise(
name=name,
namespace=namespace,
nodes=nodes,
context=context,
loader=self.__class__.__name__)
def update(self, container, representation):
# type: (dict, dict) -> None
"""Update container with specified representation."""
node = container['objectName']
assert cmds.objExists(node), "Missing container"
members = cmds.sets(node, query=True) or []
shapes = cmds.ls(members, type="mvUsdCompoundShape")
assert shapes, "Cannot find mvUsdCompoundShape in container"
path = get_representation_path(representation)
import multiverse
for shape in shapes:
multiverse.SetUsdCompoundAssetPaths(shape, [path])
cmds.setAttr("{}.representation".format(node),
str(representation["_id"]),
type="string")
def switch(self, container, representation):
self.update(container, representation)
def remove(self, container):
# type: (dict) -> None
"""Remove loaded container."""
# Delete container and its contents
if cmds.objExists(container['objectName']):
members = cmds.sets(container['objectName'], query=True) or []
cmds.delete([container['objectName']] + members)
# Remove the namespace, if empty
namespace = container['namespace']
if cmds.namespace(exists=namespace):
members = cmds.namespaceInfo(namespace, listNamespace=True)
if not members:
cmds.namespace(removeNamespace=namespace)
else:
self.log.warning("Namespace not deleted because it "
"still has members: %s", namespace)

View file

@ -22,7 +22,8 @@ class ReferenceLoader(openpype.hosts.maya.api.plugin.ReferenceLoader):
"camera",
"rig",
"camerarig",
"xgen"]
"xgen",
"staticMesh"]
representations = ["ma", "abc", "fbx", "mb"]
label = "Reference"

View file

@ -1,31 +0,0 @@
# -*- coding: utf-8 -*-
"""Cleanup leftover nodes."""
from maya import cmds # noqa
import pyblish.api
class CleanNodesUp(pyblish.api.InstancePlugin):
"""Cleans up the staging directory after a successful publish.
This will also clean published renders and delete their parent directories.
"""
order = pyblish.api.IntegratorOrder + 10
label = "Clean Nodes"
optional = True
active = True
def process(self, instance):
if not instance.data.get("cleanNodes"):
self.log.info("Nothing to clean.")
return
nodes_to_clean = instance.data.pop("cleanNodes", [])
self.log.info("Removing {} nodes".format(len(nodes_to_clean)))
for node in nodes_to_clean:
try:
cmds.delete(node)
except ValueError:
# object might be already deleted, don't complain about it
pass

View file

@ -194,11 +194,13 @@ class CollectMayaRender(pyblish.api.ContextPlugin):
assert render_products, "no render products generated"
exp_files = []
multipart = False
render_cameras = []
for product in render_products:
if product.multipart:
multipart = True
product_name = product.productName
if product.camera and layer_render_products.has_camera_token():
render_cameras.append(product.camera)
product_name = "{}{}".format(
product.camera,
"_" + product_name if product_name else "")
@ -208,6 +210,8 @@ class CollectMayaRender(pyblish.api.ContextPlugin):
product)
})
assert render_cameras, "No render cameras found."
self.log.info("multipart: {}".format(
multipart))
assert exp_files, "no file names were generated, this is bug"
@ -386,6 +390,12 @@ class CollectMayaRender(pyblish.api.ContextPlugin):
overrides = self.parse_options(str(render_globals))
data.update(**overrides)
# get string values for pools
primary_pool = overrides["renderGlobals"]["Pool"]
secondary_pool = overrides["renderGlobals"].get("SecondaryPool")
data["primaryPool"] = primary_pool
data["secondaryPool"] = secondary_pool
# Define nice label
label = "{0} ({1})".format(expected_layer_name, data["asset"])
label += " [{0}-{1}]".format(

View file

@ -0,0 +1,39 @@
# -*- coding: utf-8 -*-
from maya import cmds # noqa
import pyblish.api
class CollectUnrealSkeletalMesh(pyblish.api.InstancePlugin):
"""Collect Unreal Skeletal Mesh."""
order = pyblish.api.CollectorOrder + 0.2
label = "Collect Unreal Skeletal Meshes"
families = ["skeletalMesh"]
def process(self, instance):
frame = cmds.currentTime(query=True)
instance.data["frameStart"] = frame
instance.data["frameEnd"] = frame
geo_sets = [
i for i in instance[:]
if i.lower().startswith("geometry_set")
]
joint_sets = [
i for i in instance[:]
if i.lower().startswith("joints_set")
]
instance.data["geometry"] = []
instance.data["joints"] = []
for geo_set in geo_sets:
geo_content = cmds.ls(cmds.sets(geo_set, query=True), long=True)
if geo_content:
instance.data["geometry"] += geo_content
for join_set in joint_sets:
join_content = cmds.ls(cmds.sets(join_set, query=True), long=True)
if join_content:
instance.data["joints"] += join_content

View file

@ -1,38 +1,36 @@
# -*- coding: utf-8 -*-
from maya import cmds
from maya import cmds # noqa
import pyblish.api
from pprint import pformat
class CollectUnrealStaticMesh(pyblish.api.InstancePlugin):
"""Collect Unreal Static Mesh
Ensures always only a single frame is extracted (current frame). This
also sets correct FBX options for later extraction.
"""
"""Collect Unreal Static Mesh."""
order = pyblish.api.CollectorOrder + 0.2
label = "Collect Unreal Static Meshes"
families = ["unrealStaticMesh"]
families = ["staticMesh"]
def process(self, instance):
# add fbx family to trigger fbx extractor
instance.data["families"].append("fbx")
# take the name from instance (without the `S_` prefix)
instance.data["staticMeshCombinedName"] = instance.name[2:]
geometry_set = [i for i in instance if i == "geometry_SET"]
instance.data["membersToCombine"] = cmds.sets(
geometry_set = [
i for i in instance
if i.startswith("geometry_SET")
]
instance.data["geometryMembers"] = cmds.sets(
geometry_set, query=True)
collision_set = [i for i in instance if i == "collisions_SET"]
self.log.info("geometry: {}".format(
pformat(instance.data.get("geometryMembers"))))
collision_set = [
i for i in instance
if i.startswith("collisions_SET")
]
instance.data["collisionMembers"] = cmds.sets(
collision_set, query=True)
# set fbx overrides on instance
instance.data["smoothingGroups"] = True
instance.data["smoothMesh"] = True
instance.data["triangulate"] = True
self.log.info("collisions: {}".format(
pformat(instance.data.get("collisionMembers"))))
frame = cmds.currentTime(query=True)
instance.data["frameStart"] = frame

View file

@ -5,152 +5,29 @@ from maya import cmds # noqa
import maya.mel as mel # noqa
import pyblish.api
import openpype.api
from openpype.hosts.maya.api.lib import (
root_parent,
maintained_selection
)
from openpype.hosts.maya.api.lib import maintained_selection
from openpype.hosts.maya.api import fbx
class ExtractFBX(openpype.api.Extractor):
"""Extract FBX from Maya.
This extracts reproducible FBX exports ignoring any of the settings set
on the local machine in the FBX export options window.
All export settings are applied with the `FBXExport*` commands prior
to the `FBXExport` call itself. The options can be overridden with their
nice names as seen in the "options" property on this class.
For more information on FBX exports see:
- https://knowledge.autodesk.com/support/maya/learn-explore/caas
/CloudHelp/cloudhelp/2016/ENU/Maya/files/GUID-6CCE943A-2ED4-4CEE-96D4
-9CB19C28F4E0-htm.html
- http://forums.cgsociety.org/archive/index.php?t-1032853.html
- https://groups.google.com/forum/#!msg/python_inside_maya/cLkaSo361oE
/LKs9hakE28kJ
This extracts reproducible FBX exports ignoring any of the
settings set on the local machine in the FBX export options window.
"""
order = pyblish.api.ExtractorOrder
label = "Extract FBX"
families = ["fbx"]
@property
def options(self):
"""Overridable options for FBX Export
Given in the following format
- {NAME: EXPECTED TYPE}
If the overridden option's type does not match,
the option is not included and a warning is logged.
"""
return {
"cameras": bool,
"smoothingGroups": bool,
"hardEdges": bool,
"tangents": bool,
"smoothMesh": bool,
"instances": bool,
# "referencedContainersContent": bool, # deprecated in Maya 2016+
"bakeComplexAnimation": int,
"bakeComplexStart": int,
"bakeComplexEnd": int,
"bakeComplexStep": int,
"bakeResampleAnimation": bool,
"animationOnly": bool,
"useSceneName": bool,
"quaternion": str, # "euler"
"shapes": bool,
"skins": bool,
"constraints": bool,
"lights": bool,
"embeddedTextures": bool,
"inputConnections": bool,
"upAxis": str, # x, y or z,
"triangulate": bool
}
@property
def default_options(self):
"""The default options for FBX extraction.
This includes shapes, skins, constraints, lights and incoming
connections and exports with the Y-axis as up-axis.
By default this uses the time sliders start and end time.
"""
start_frame = int(cmds.playbackOptions(query=True,
animationStartTime=True))
end_frame = int(cmds.playbackOptions(query=True,
animationEndTime=True))
return {
"cameras": False,
"smoothingGroups": False,
"hardEdges": False,
"tangents": False,
"smoothMesh": False,
"instances": False,
"bakeComplexAnimation": True,
"bakeComplexStart": start_frame,
"bakeComplexEnd": end_frame,
"bakeComplexStep": 1,
"bakeResampleAnimation": True,
"animationOnly": False,
"useSceneName": False,
"quaternion": "euler",
"shapes": True,
"skins": True,
"constraints": False,
"lights": True,
"embeddedTextures": True,
"inputConnections": True,
"upAxis": "y",
"triangulate": False
}
def parse_overrides(self, instance, options):
"""Inspect data of instance to determine overridden options
An instance may supply any of the overridable options
as data, the option is then added to the extraction.
"""
for key in instance.data:
if key not in self.options:
continue
# Ensure the data is of correct type
value = instance.data[key]
if not isinstance(value, self.options[key]):
self.log.warning(
"Overridden attribute {key} was of "
"the wrong type: {invalid_type} "
"- should have been {valid_type}".format(
key=key,
invalid_type=type(value).__name__,
valid_type=self.options[key].__name__))
continue
options[key] = value
return options
def process(self, instance):
# Ensure FBX plug-in is loaded
cmds.loadPlugin("fbxmaya", quiet=True)
fbx_exporter = fbx.FBXExtractor(log=self.log)
# Define output path
stagingDir = self.staging_dir(instance)
staging_dir = self.staging_dir(instance)
filename = "{0}.fbx".format(instance.name)
path = os.path.join(stagingDir, filename)
path = os.path.join(staging_dir, filename)
# The export requires forward slashes because we need
# to format it into a string in a mel expression
@ -162,54 +39,13 @@ class ExtractFBX(openpype.api.Extractor):
self.log.info("Members: {0}".format(members))
self.log.info("Instance: {0}".format(instance[:]))
# Parse export options
options = self.default_options
options = self.parse_overrides(instance, options)
self.log.info("Export options: {0}".format(options))
# Collect the start and end including handles
start = instance.data["frameStartHandle"]
end = instance.data["frameEndHandle"]
options['bakeComplexStart'] = start
options['bakeComplexEnd'] = end
# First apply the default export settings to be fully consistent
# each time for successive publishes
mel.eval("FBXResetExport")
# Apply the FBX overrides through MEL since the commands
# only work correctly in MEL according to online
# available discussions on the topic
_iteritems = getattr(options, "iteritems", options.items)
for option, value in _iteritems():
key = option[0].upper() + option[1:] # uppercase first letter
# Boolean must be passed as lower-case strings
# as to MEL standards
if isinstance(value, bool):
value = str(value).lower()
template = "FBXExport{0} {1}" if key == "UpAxis" else "FBXExport{0} -v {1}" # noqa
cmd = template.format(key, value)
self.log.info(cmd)
mel.eval(cmd)
# Never show the UI or generate a log
mel.eval("FBXExportShowUI -v false")
mel.eval("FBXExportGenerateLog -v false")
fbx_exporter.set_options_from_instance(instance)
# Export
if "unrealStaticMesh" in instance.data["families"]:
with maintained_selection():
with root_parent(members):
self.log.info("Un-parenting: {}".format(members))
cmds.select(members, r=1, noExpand=True)
mel.eval('FBXExport -f "{}" -s'.format(path))
else:
with maintained_selection():
cmds.select(members, r=1, noExpand=True)
mel.eval('FBXExport -f "{}" -s'.format(path))
with maintained_selection():
fbx_exporter.export(members, path)
cmds.select(members, r=1, noExpand=True)
mel.eval('FBXExport -f "{}" -s'.format(path))
if "representations" not in instance.data:
instance.data["representations"] = []
@ -218,7 +54,7 @@ class ExtractFBX(openpype.api.Extractor):
'name': 'fbx',
'ext': 'fbx',
'files': filename,
"stagingDir": stagingDir,
"stagingDir": staging_dir,
}
instance.data["representations"].append(representation)

View file

@ -4,6 +4,7 @@ import os
import sys
import json
import tempfile
import platform
import contextlib
import subprocess
from collections import OrderedDict
@ -334,7 +335,14 @@ class ExtractLook(openpype.api.Extractor):
transfers = []
hardlinks = []
hashes = {}
force_copy = instance.data.get("forceCopy", False)
# Temporary fix to NOT create hardlinks on windows machines
if platform.system().lower() == "windows":
self.log.info(
"Forcing copy instead of hardlink due to issues on Windows..."
)
force_copy = True
else:
force_copy = instance.data.get("forceCopy", False)
for filepath in files_metadata:

View file

@ -0,0 +1,210 @@
import os
import six
from maya import cmds
import openpype.api
from openpype.hosts.maya.api.lib import maintained_selection
class ExtractMultiverseUsd(openpype.api.Extractor):
"""Extractor for USD by Multiverse."""
label = "Extract Multiverse USD"
hosts = ["maya"]
families = ["usd"]
@property
def options(self):
"""Overridable options for Multiverse USD Export
Given in the following format
- {NAME: EXPECTED TYPE}
If the overridden option's type does not match,
the option is not included and a warning is logged.
"""
return {
"stripNamespaces": bool,
"mergeTransformAndShape": bool,
"writeAncestors": bool,
"flattenParentXforms": bool,
"writeSparseOverrides": bool,
"useMetaPrimPath": bool,
"customRootPath": str,
"customAttributes": str,
"nodeTypesToIgnore": str,
"writeMeshes": bool,
"writeCurves": bool,
"writeParticles": bool,
"writeCameras": bool,
"writeLights": bool,
"writeJoints": bool,
"writeCollections": bool,
"writePositions": bool,
"writeNormals": bool,
"writeUVs": bool,
"writeColorSets": bool,
"writeTangents": bool,
"writeRefPositions": bool,
"writeBlendShapes": bool,
"writeDisplayColor": bool,
"writeSkinWeights": bool,
"writeMaterialAssignment": bool,
"writeHardwareShader": bool,
"writeShadingNetworks": bool,
"writeTransformMatrix": bool,
"writeUsdAttributes": bool,
"timeVaryingTopology": bool,
"customMaterialNamespace": str,
"numTimeSamples": int,
"timeSamplesSpan": float
}
@property
def default_options(self):
"""The default options for Multiverse USD extraction."""
return {
"stripNamespaces": False,
"mergeTransformAndShape": False,
"writeAncestors": True,
"flattenParentXforms": False,
"writeSparseOverrides": False,
"useMetaPrimPath": False,
"customRootPath": str(),
"customAttributes": str(),
"nodeTypesToIgnore": str(),
"writeMeshes": True,
"writeCurves": True,
"writeParticles": True,
"writeCameras": False,
"writeLights": False,
"writeJoints": False,
"writeCollections": False,
"writePositions": True,
"writeNormals": True,
"writeUVs": True,
"writeColorSets": False,
"writeTangents": False,
"writeRefPositions": False,
"writeBlendShapes": False,
"writeDisplayColor": False,
"writeSkinWeights": False,
"writeMaterialAssignment": False,
"writeHardwareShader": False,
"writeShadingNetworks": False,
"writeTransformMatrix": True,
"writeUsdAttributes": False,
"timeVaryingTopology": False,
"customMaterialNamespace": str(),
"numTimeSamples": 1,
"timeSamplesSpan": 0.0
}
def parse_overrides(self, instance, options):
"""Inspect data of instance to determine overridden options"""
for key in instance.data:
if key not in self.options:
continue
# Ensure the data is of correct type
value = instance.data[key]
if isinstance(value, six.text_type):
value = str(value)
if not isinstance(value, self.options[key]):
self.log.warning(
"Overridden attribute {key} was of "
"the wrong type: {invalid_type} "
"- should have been {valid_type}".format(
key=key,
invalid_type=type(value).__name__,
valid_type=self.options[key].__name__))
continue
options[key] = value
return options
def process(self, instance):
# Load plugin firstly
cmds.loadPlugin("MultiverseForMaya", quiet=True)
# Define output file path
staging_dir = self.staging_dir(instance)
file_name = "{}.usd".format(instance.name)
file_path = os.path.join(staging_dir, file_name)
file_path = file_path.replace('\\', '/')
# Parse export options
options = self.default_options
options = self.parse_overrides(instance, options)
self.log.info("Export options: {0}".format(options))
# Perform extraction
self.log.info("Performing extraction ...")
with maintained_selection():
members = instance.data("setMembers")
members = cmds.ls(members,
dag=True,
shapes=True,
type=("mesh"),
noIntermediate=True,
long=True)
self.log.info('Collected object {}'.format(members))
import multiverse
time_opts = None
frame_start = instance.data['frameStart']
frame_end = instance.data['frameEnd']
handle_start = instance.data['handleStart']
handle_end = instance.data['handleEnd']
step = instance.data['step']
fps = instance.data['fps']
if frame_end != frame_start:
time_opts = multiverse.TimeOptions()
time_opts.writeTimeRange = True
time_opts.frameRange = (
frame_start - handle_start, frame_end + handle_end)
time_opts.frameIncrement = step
time_opts.numTimeSamples = instance.data["numTimeSamples"]
time_opts.timeSamplesSpan = instance.data["timeSamplesSpan"]
time_opts.framePerSecond = fps
asset_write_opts = multiverse.AssetWriteOptions(time_opts)
options_discard_keys = {
'numTimeSamples',
'timeSamplesSpan',
'frameStart',
'frameEnd',
'handleStart',
'handleEnd',
'step',
'fps'
}
for key, value in options.items():
if key in options_discard_keys:
continue
setattr(asset_write_opts, key, value)
multiverse.WriteAsset(file_path, members, asset_write_opts)
if "representations" not in instance.data:
instance.data["representations"] = []
representation = {
'name': 'usd',
'ext': 'usd',
'files': file_name,
"stagingDir": staging_dir
}
instance.data["representations"].append(representation)
self.log.info("Extracted instance {} to {}".format(
instance.name, file_path))

View file

@ -0,0 +1,151 @@
import os
from maya import cmds
import openpype.api
from openpype.hosts.maya.api.lib import maintained_selection
class ExtractMultiverseUsdComposition(openpype.api.Extractor):
"""Extractor of Multiverse USD Composition."""
label = "Extract Multiverse USD Composition"
hosts = ["maya"]
families = ["usdComposition"]
@property
def options(self):
"""Overridable options for Multiverse USD Export
Given in the following format
- {NAME: EXPECTED TYPE}
If the overridden option's type does not match,
the option is not included and a warning is logged.
"""
return {
"stripNamespaces": bool,
"mergeTransformAndShape": bool,
"flattenContent": bool,
"writePendingOverrides": bool,
"numTimeSamples": int,
"timeSamplesSpan": float
}
@property
def default_options(self):
"""The default options for Multiverse USD extraction."""
return {
"stripNamespaces": True,
"mergeTransformAndShape": False,
"flattenContent": False,
"writePendingOverrides": False,
"numTimeSamples": 1,
"timeSamplesSpan": 0.0
}
def parse_overrides(self, instance, options):
"""Inspect data of instance to determine overridden options"""
for key in instance.data:
if key not in self.options:
continue
# Ensure the data is of correct type
value = instance.data[key]
if not isinstance(value, self.options[key]):
self.log.warning(
"Overridden attribute {key} was of "
"the wrong type: {invalid_type} "
"- should have been {valid_type}".format(
key=key,
invalid_type=type(value).__name__,
valid_type=self.options[key].__name__))
continue
options[key] = value
return options
def process(self, instance):
# Load plugin firstly
cmds.loadPlugin("MultiverseForMaya", quiet=True)
# Define output file path
staging_dir = self.staging_dir(instance)
file_name = "{}.usd".format(instance.name)
file_path = os.path.join(staging_dir, file_name)
file_path = file_path.replace('\\', '/')
# Parse export options
options = self.default_options
options = self.parse_overrides(instance, options)
self.log.info("Export options: {0}".format(options))
# Perform extraction
self.log.info("Performing extraction ...")
with maintained_selection():
members = instance.data("setMembers")
members = cmds.ls(members,
dag=True,
shapes=True,
type="mvUsdCompoundShape",
noIntermediate=True,
long=True)
self.log.info('Collected object {}'.format(members))
import multiverse
time_opts = None
frame_start = instance.data['frameStart']
frame_end = instance.data['frameEnd']
handle_start = instance.data['handleStart']
handle_end = instance.data['handleEnd']
step = instance.data['step']
fps = instance.data['fps']
if frame_end != frame_start:
time_opts = multiverse.TimeOptions()
time_opts.writeTimeRange = True
time_opts.frameRange = (
frame_start - handle_start, frame_end + handle_end)
time_opts.frameIncrement = step
time_opts.numTimeSamples = instance.data["numTimeSamples"]
time_opts.timeSamplesSpan = instance.data["timeSamplesSpan"]
time_opts.framePerSecond = fps
comp_write_opts = multiverse.CompositionWriteOptions()
options_discard_keys = {
'numTimeSamples',
'timeSamplesSpan',
'frameStart',
'frameEnd',
'handleStart',
'handleEnd',
'step',
'fps'
}
for key, value in options.items():
if key in options_discard_keys:
continue
setattr(comp_write_opts, key, value)
multiverse.WriteComposition(file_path, members, comp_write_opts)
if "representations" not in instance.data:
instance.data["representations"] = []
representation = {
'name': 'usd',
'ext': 'usd',
'files': file_name,
"stagingDir": staging_dir
}
instance.data["representations"].append(representation)
self.log.info("Extracted instance {} to {}".format(
instance.name, file_path))

View file

@ -0,0 +1,139 @@
import os
import openpype.api
from openpype.hosts.maya.api.lib import maintained_selection
from maya import cmds
class ExtractMultiverseUsdOverride(openpype.api.Extractor):
"""Extractor for USD Override by Multiverse."""
label = "Extract Multiverse USD Override"
hosts = ["maya"]
families = ["usdOverride"]
@property
def options(self):
"""Overridable options for Multiverse USD Export
Given in the following format
- {NAME: EXPECTED TYPE}
If the overridden option's type does not match,
the option is not included and a warning is logged.
"""
return {
"writeAll": bool,
"writeTransforms": bool,
"writeVisibility": bool,
"writeAttributes": bool,
"writeMaterials": bool,
"writeVariants": bool,
"writeVariantsDefinition": bool,
"writeActiveState": bool,
"writeNamespaces": bool,
"numTimeSamples": int,
"timeSamplesSpan": float
}
@property
def default_options(self):
"""The default options for Multiverse USD extraction."""
return {
"writeAll": False,
"writeTransforms": True,
"writeVisibility": True,
"writeAttributes": True,
"writeMaterials": True,
"writeVariants": True,
"writeVariantsDefinition": True,
"writeActiveState": True,
"writeNamespaces": False,
"numTimeSamples": 1,
"timeSamplesSpan": 0.0
}
def process(self, instance):
# Load plugin firstly
cmds.loadPlugin("MultiverseForMaya", quiet=True)
# Define output file path
staging_dir = self.staging_dir(instance)
file_name = "{}.usda".format(instance.name)
file_path = os.path.join(staging_dir, file_name)
file_path = file_path.replace("\\", "/")
# Parse export options
options = self.default_options
self.log.info("Export options: {0}".format(options))
# Perform extraction
self.log.info("Performing extraction ...")
with maintained_selection():
members = instance.data("setMembers")
members = cmds.ls(members,
dag=True,
shapes=True,
type="mvUsdCompoundShape",
noIntermediate=True,
long=True)
self.log.info("Collected object {}".format(members))
# TODO: Deal with asset, composition, overide with options.
import multiverse
time_opts = None
frame_start = instance.data["frameStart"]
frame_end = instance.data["frameEnd"]
handle_start = instance.data["handleStart"]
handle_end = instance.data["handleEnd"]
step = instance.data["step"]
fps = instance.data["fps"]
if frame_end != frame_start:
time_opts = multiverse.TimeOptions()
time_opts.writeTimeRange = True
time_opts.frameRange = (
frame_start - handle_start, frame_end + handle_end)
time_opts.frameIncrement = step
time_opts.numTimeSamples = instance.data["numTimeSamples"]
time_opts.timeSamplesSpan = instance.data["timeSamplesSpan"]
time_opts.framePerSecond = fps
over_write_opts = multiverse.OverridesWriteOptions(time_opts)
options_discard_keys = {
"numTimeSamples",
"timeSamplesSpan",
"frameStart",
"frameEnd",
"handleStart",
"handleEnd",
"step",
"fps"
}
for key, value in options.items():
if key in options_discard_keys:
continue
setattr(over_write_opts, key, value)
for member in members:
multiverse.WriteOverrides(file_path, member, over_write_opts)
if "representations" not in instance.data:
instance.data["representations"] = []
representation = {
"name": "usd",
"ext": "usd",
"files": file_name,
"stagingDir": staging_dir
}
instance.data["representations"].append(representation)
self.log.info("Extracted instance {} to {}".format(
instance.name, file_path))

View file

@ -73,6 +73,11 @@ class ExtractPlayblast(openpype.api.Extractor):
pm.currentTime(refreshFrameInt - 1, edit=True)
pm.currentTime(refreshFrameInt, edit=True)
# Override transparency if requested.
transparency = instance.data.get("transparency", 0)
if transparency != 0:
preset["viewport2_options"]["transparencyAlgorithm"] = transparency
# Isolate view is requested by having objects in the set besides a
# camera.
if preset.pop("isolate_view", False) and instance.data.get("isolate"):

View file

@ -0,0 +1,85 @@
# -*- coding: utf-8 -*-
"""Create Unreal Skeletal Mesh data to be extracted as FBX."""
import os
from contextlib import contextmanager
from maya import cmds # noqa
import pyblish.api
import openpype.api
from openpype.hosts.maya.api import fbx
@contextmanager
def renamed(original_name, renamed_name):
# type: (str, str) -> None
try:
cmds.rename(original_name, renamed_name)
yield
finally:
cmds.rename(renamed_name, original_name)
class ExtractUnrealSkeletalMesh(openpype.api.Extractor):
"""Extract Unreal Skeletal Mesh as FBX from Maya. """
order = pyblish.api.ExtractorOrder - 0.1
label = "Extract Unreal Skeletal Mesh"
families = ["skeletalMesh"]
def process(self, instance):
fbx_exporter = fbx.FBXExtractor(log=self.log)
# Define output path
staging_dir = self.staging_dir(instance)
filename = "{0}.fbx".format(instance.name)
path = os.path.join(staging_dir, filename)
geo = instance.data.get("geometry")
joints = instance.data.get("joints")
to_extract = geo + joints
# The export requires forward slashes because we need
# to format it into a string in a mel expression
path = path.replace('\\', '/')
self.log.info("Extracting FBX to: {0}".format(path))
self.log.info("Members: {0}".format(to_extract))
self.log.info("Instance: {0}".format(instance[:]))
fbx_exporter.set_options_from_instance(instance)
# This magic is done for variants. To let Unreal merge correctly
# existing data, top node must have the same name. So for every
# variant we extract we need to rename top node of the rig correctly.
# It is finally done in context manager so it won't affect current
# scene.
# we rely on hierarchy under one root.
original_parent = to_extract[0].split("|")[1]
parent_node = instance.data.get("asset")
renamed_to_extract = []
for node in to_extract:
node_path = node.split("|")
node_path[1] = parent_node
renamed_to_extract.append("|".join(node_path))
with renamed(original_parent, parent_node):
self.log.info("Extracting: {}".format(renamed_to_extract, path))
fbx_exporter.export(renamed_to_extract, path)
if "representations" not in instance.data:
instance.data["representations"] = []
representation = {
'name': 'fbx',
'ext': 'fbx',
'files': filename,
"stagingDir": staging_dir,
}
instance.data["representations"].append(representation)
self.log.info("Extract FBX successful to: {0}".format(path))

View file

@ -1,33 +1,61 @@
# -*- coding: utf-8 -*-
"""Create Unreal Static Mesh data to be extracted as FBX."""
import openpype.api
import pyblish.api
import os
from maya import cmds # noqa
import pyblish.api
import openpype.api
from openpype.hosts.maya.api.lib import (
parent_nodes,
maintained_selection
)
from openpype.hosts.maya.api import fbx
class ExtractUnrealStaticMesh(openpype.api.Extractor):
"""Extract FBX from Maya. """
"""Extract Unreal Static Mesh as FBX from Maya. """
order = pyblish.api.ExtractorOrder - 0.1
label = "Extract Unreal Static Mesh"
families = ["unrealStaticMesh"]
families = ["staticMesh"]
def process(self, instance):
to_combine = instance.data.get("membersToCombine")
static_mesh_name = instance.data.get("staticMeshCombinedName")
self.log.info(
"merging {} into {}".format(
" + ".join(to_combine), static_mesh_name))
duplicates = cmds.duplicate(to_combine, ic=True)
cmds.polyUnite(
*duplicates,
n=static_mesh_name, ch=False)
members = instance.data.get("geometryMembers", [])
if instance.data.get("collisionMembers"):
members = members + instance.data.get("collisionMembers")
if not instance.data.get("cleanNodes"):
instance.data["cleanNodes"] = []
fbx_exporter = fbx.FBXExtractor(log=self.log)
instance.data["cleanNodes"].append(static_mesh_name)
instance.data["cleanNodes"] += duplicates
# Define output path
staging_dir = self.staging_dir(instance)
filename = "{0}.fbx".format(instance.name)
path = os.path.join(staging_dir, filename)
instance.data["setMembers"] = [static_mesh_name]
instance.data["setMembers"] += instance.data["collisionMembers"]
# The export requires forward slashes because we need
# to format it into a string in a mel expression
path = path.replace('\\', '/')
self.log.info("Extracting FBX to: {0}".format(path))
self.log.info("Members: {0}".format(members))
self.log.info("Instance: {0}".format(instance[:]))
fbx_exporter.set_options_from_instance(instance)
with maintained_selection():
with parent_nodes(members):
self.log.info("Un-parenting: {}".format(members))
fbx_exporter.export(members, path)
if "representations" not in instance.data:
instance.data["representations"] = []
representation = {
'name': 'fbx',
'ext': 'fbx',
'files': filename,
"stagingDir": staging_dir,
}
instance.data["representations"].append(representation)
self.log.info("Extract FBX successful to: {0}".format(path))

View file

@ -0,0 +1,14 @@
<?xml version="1.0" encoding="UTF-8"?>
<root>
<error id="main">
<title>Skeletal Mesh Top Node</title>
<description>## Skeletal meshes needs common root
Skeletal meshes and their joints must be under one common root.
### How to repair?
Make sure all geometry and joints resides under same root.
</description>
</error>
</root>

View file

@ -4,13 +4,13 @@ import getpass
import platform
import appdirs
import requests
from maya import cmds
from avalon import api
import pyblish.api
from openpype.lib import requests_post
from openpype.hosts.maya.api import lib
from openpype.api import get_system_settings
@ -184,7 +184,7 @@ class MayaSubmitMuster(pyblish.api.InstancePlugin):
"select": "name"
}
api_entry = '/api/templates/list'
response = self._requests_post(
response = requests_post(
self.MUSTER_REST_URL + api_entry, params=params)
if response.status_code != 200:
self.log.error(
@ -235,7 +235,7 @@ class MayaSubmitMuster(pyblish.api.InstancePlugin):
"name": "submit"
}
api_entry = '/api/queue/actions'
response = self._requests_post(
response = requests_post(
self.MUSTER_REST_URL + api_entry, params=params, json=payload)
if response.status_code != 200:
@ -549,16 +549,3 @@ class MayaSubmitMuster(pyblish.api.InstancePlugin):
% (value, int(value))
)
def _requests_post(self, *args, **kwargs):
""" Wrapper for requests, disabling SSL certificate validation if
DONT_VERIFY_SSL environment variable is found. This is useful when
Deadline or Muster server are running with self-signed certificates
and their certificate is not added to trusted certificates on
client machines.
WARNING: disabling SSL certificate validation is defeating one line
of defense SSL is providing and it is not recommended.
"""
if 'verify' not in kwargs:
kwargs['verify'] = False if os.getenv("OPENPYPE_DONT_VERIFY_SSL", True) else True # noqa
return requests.post(*args, **kwargs)

View file

@ -40,7 +40,14 @@ class ValidateCameraContents(pyblish.api.InstancePlugin):
# list when there are no actual cameras results in
# still an empty 'invalid' list
if len(cameras) < 1:
raise RuntimeError("No cameras in instance.")
if members:
# If there are members in the instance return all of
# them as 'invalid' so the user can still select invalid
cls.log.error("No cameras found in instance "
"members: {}".format(members))
return members
raise RuntimeError("No cameras found in empty instance.")
# non-camera shapes
valid_shapes = cmds.ls(shapes, type=('camera', 'locator'), long=True)

View file

@ -2,9 +2,9 @@ import os
import json
import appdirs
import requests
import pyblish.api
from openpype.lib import requests_get
from openpype.plugin import contextplugin_should_run
import openpype.hosts.maya.api.action
@ -51,7 +51,7 @@ class ValidateMusterConnection(pyblish.api.ContextPlugin):
'authToken': self._token
}
api_entry = '/api/pools/list'
response = self._requests_get(
response = requests_get(
MUSTER_REST_URL + api_entry, params=params)
assert response.status_code == 200, "invalid response from server"
assert response.json()['ResponseData'], "invalid data in response"
@ -88,35 +88,7 @@ class ValidateMusterConnection(pyblish.api.ContextPlugin):
api_url = "{}/muster/show_login".format(
os.environ["OPENPYPE_WEBSERVER_URL"])
cls.log.debug(api_url)
response = cls._requests_get(api_url, timeout=1)
response = requests_get(api_url, timeout=1)
if response.status_code != 200:
cls.log.error('Cannot show login form to Muster')
raise Exception('Cannot show login form to Muster')
def _requests_post(self, *args, **kwargs):
""" Wrapper for requests, disabling SSL certificate validation if
DONT_VERIFY_SSL environment variable is found. This is useful when
Deadline or Muster server are running with self-signed certificates
and their certificate is not added to trusted certificates on
client machines.
WARNING: disabling SSL certificate validation is defeating one line
of defense SSL is providing and it is not recommended.
"""
if 'verify' not in kwargs:
kwargs['verify'] = False if os.getenv("OPENPYPE_DONT_VERIFY_SSL", True) else True # noqa
return requests.post(*args, **kwargs)
def _requests_get(self, *args, **kwargs):
""" Wrapper for requests, disabling SSL certificate validation if
DONT_VERIFY_SSL environment variable is found. This is useful when
Deadline or Muster server are running with self-signed certificates
and their certificate is not added to trusted certificates on
client machines.
WARNING: disabling SSL certificate validation is defeating one line
of defense SSL is providing and it is not recommended.
"""
if 'verify' not in kwargs:
kwargs['verify'] = False if os.getenv("OPENPYPE_DONT_VERIFY_SSL", True) else True # noqa
return requests.get(*args, **kwargs)

View file

@ -0,0 +1,32 @@
# -*- coding: utf-8 -*-
import pyblish.api
import openpype.api
from openpype.pipeline import PublishXmlValidationError
from maya import cmds
class ValidateSkeletalMeshHierarchy(pyblish.api.InstancePlugin):
"""Validates that nodes has common root."""
order = openpype.api.ValidateContentsOrder
hosts = ["maya"]
families = ["skeletalMesh"]
label = "Skeletal Mesh Top Node"
def process(self, instance):
geo = instance.data.get("geometry")
joints = instance.data.get("joints")
joints_parents = cmds.ls(joints, long=True)
geo_parents = cmds.ls(geo, long=True)
parents_set = {
parent.split("|")[1] for parent in (joints_parents + geo_parents)
}
if len(set(parents_set)) != 1:
raise PublishXmlValidationError(
self,
"Multiple roots on geometry or joints."
)

View file

@ -10,10 +10,11 @@ class ValidateUnrealMeshTriangulated(pyblish.api.InstancePlugin):
order = openpype.api.ValidateMeshOrder
hosts = ["maya"]
families = ["unrealStaticMesh"]
families = ["staticMesh"]
category = "geometry"
label = "Mesh is Triangulated"
actions = [openpype.hosts.maya.api.action.SelectInvalidAction]
active = False
@classmethod
def get_invalid(cls, instance):

View file

@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
"""Validator for correct naming of Static Meshes."""
from maya import cmds # noqa
import pyblish.api
import openpype.api
@ -52,8 +52,8 @@ class ValidateUnrealStaticMeshName(pyblish.api.InstancePlugin):
optional = True
order = openpype.api.ValidateContentsOrder
hosts = ["maya"]
families = ["unrealStaticMesh"]
label = "Unreal StaticMesh Name"
families = ["staticMesh"]
label = "Unreal Static Mesh Name"
actions = [openpype.hosts.maya.api.action.SelectInvalidAction]
regex_mesh = r"(?P<renderName>.*))"
regex_collision = r"(?P<renderName>.*)"
@ -72,15 +72,13 @@ class ValidateUnrealStaticMeshName(pyblish.api.InstancePlugin):
["collision_prefixes"]
)
combined_geometry_name = instance.data.get(
"staticMeshCombinedName", None)
if cls.validate_mesh:
# compile regex for testing names
regex_mesh = "{}{}".format(
("_" + cls.static_mesh_prefix) or "", cls.regex_mesh
)
sm_r = re.compile(regex_mesh)
if not sm_r.match(combined_geometry_name):
if not sm_r.match(instance.data.get("subset")):
cls.log.error("Mesh doesn't comply with name validation.")
return True
@ -91,7 +89,7 @@ class ValidateUnrealStaticMeshName(pyblish.api.InstancePlugin):
cls.log.warning("No collision objects to validate.")
return False
regex_collision = "{}{}".format(
regex_collision = "{}{}_(\\d+)".format(
"(?P<prefix>({}))_".format(
"|".join("{0}".format(p) for p in collision_prefixes)
) or "", cls.regex_collision
@ -99,6 +97,9 @@ class ValidateUnrealStaticMeshName(pyblish.api.InstancePlugin):
cl_r = re.compile(regex_collision)
mesh_name = "{}{}".format(instance.data["asset"],
instance.data.get("variant", []))
for obj in collision_set:
cl_m = cl_r.match(obj)
if not cl_m:
@ -107,7 +108,7 @@ class ValidateUnrealStaticMeshName(pyblish.api.InstancePlugin):
else:
expected_collision = "{}_{}".format(
cl_m.group("prefix"),
combined_geometry_name
mesh_name
)
if not obj.startswith(expected_collision):
@ -116,11 +117,11 @@ class ValidateUnrealStaticMeshName(pyblish.api.InstancePlugin):
"Collision object name doesn't match "
"static mesh name"
)
cls.log.error("{}_{} != {}_{}".format(
cls.log.error("{}_{} != {}_{}*".format(
cl_m.group("prefix"),
cl_m.group("renderName"),
cl_m.group("prefix"),
combined_geometry_name,
mesh_name,
))
invalid.append(obj)

View file

@ -9,9 +9,10 @@ class ValidateUnrealUpAxis(pyblish.api.ContextPlugin):
"""Validate if Z is set as up axis in Maya"""
optional = True
active = False
order = openpype.api.ValidateContentsOrder
hosts = ["maya"]
families = ["unrealStaticMesh"]
families = ["staticMesh"]
label = "Unreal Up-Axis check"
actions = [openpype.api.RepairAction]

View file

@ -1,11 +1,10 @@
import os
import avalon.api
from openpype.api import get_project_settings
from openpype.pipeline import install_host
from openpype.hosts.maya import api
import openpype.hosts.maya.api.lib as mlib
from maya import cmds
avalon.api.install(api)
install_host(api)
print("starting OpenPype usersetup")

View file

@ -10,7 +10,7 @@ def add_implementation_envs(env, _app):
]
old_nuke_path = env.get("NUKE_PATH") or ""
for path in old_nuke_path.split(os.pathsep):
if not path or not os.path.exists(path):
if not path:
continue
norm_path = os.path.normpath(path)

View file

@ -26,6 +26,7 @@ from openpype.tools.utils import host_tools
from openpype.lib.path_tools import HostDirmap
from openpype.settings import get_project_settings
from openpype.modules import ModulesManager
from openpype.pipeline import discover_legacy_creator_plugins
from .workio import (
save_file,
@ -1047,17 +1048,36 @@ def add_review_knob(node):
def add_deadline_tab(node):
node.addKnob(nuke.Tab_Knob("Deadline"))
knob = nuke.Int_Knob("deadlineChunkSize", "Chunk Size")
knob.setValue(0)
node.addKnob(knob)
knob = nuke.Int_Knob("deadlinePriority", "Priority")
knob.setValue(50)
node.addKnob(knob)
knob = nuke.Int_Knob("deadlineChunkSize", "Chunk Size")
knob.setValue(0)
node.addKnob(knob)
knob = nuke.Int_Knob("deadlineConcurrentTasks", "Concurrent tasks")
# zero as default will get value from Settings during collection
# instead of being an explicit user override, see precollect_write.py
knob.setValue(0)
node.addKnob(knob)
knob = nuke.Text_Knob("divd", '')
knob.setValue('')
node.addKnob(knob)
knob = nuke.Boolean_Knob("suspend_publish", "Suspend publish")
knob.setValue(False)
node.addKnob(knob)
def get_deadline_knob_names():
return ["Deadline", "deadlineChunkSize", "deadlinePriority"]
return [
"Deadline",
"deadlineChunkSize",
"deadlinePriority",
"deadlineConcurrentTasks"
]
def create_backdrop(label="", color=None, layer=0,
@ -1902,7 +1922,7 @@ def recreate_instance(origin_node, avalon_data=None):
# create new node
# get appropriate plugin class
creator_plugin = None
for Creator in api.discover(api.Creator):
for Creator in discover_legacy_creator_plugins():
if Creator.__name__ == data["creator"]:
creator_plugin = Creator
break

View file

@ -5,7 +5,6 @@ from collections import OrderedDict
import nuke
import pyblish.api
import avalon.api
import openpype
from openpype.api import (
@ -15,10 +14,11 @@ from openpype.api import (
)
from openpype.lib import register_event_callback
from openpype.pipeline import (
LegacyCreator,
register_loader_plugin_path,
register_creator_plugin_path,
register_inventory_action_path,
deregister_loader_plugin_path,
deregister_creator_plugin_path,
deregister_inventory_action_path,
AVALON_CONTAINER_ID,
)
@ -106,7 +106,7 @@ def install():
log.info("Registering Nuke plug-ins..")
pyblish.api.register_plugin_path(PUBLISH_PATH)
register_loader_plugin_path(LOAD_PATH)
avalon.api.register_plugin_path(LegacyCreator, CREATE_PATH)
register_creator_plugin_path(CREATE_PATH)
register_inventory_action_path(INVENTORY_PATH)
# Register Avalon event for workfiles loading.
@ -132,7 +132,7 @@ def uninstall():
pyblish.deregister_host("nuke")
pyblish.api.deregister_plugin_path(PUBLISH_PATH)
deregister_loader_plugin_path(LOAD_PATH)
avalon.api.deregister_plugin_path(LegacyCreator, CREATE_PATH)
deregister_creator_plugin_path(CREATE_PATH)
deregister_inventory_action_path(INVENTORY_PATH)
pyblish.api.deregister_callback(

View file

@ -1,6 +1,8 @@
import os
import random
import string
from collections import OrderedDict
from abc import abstractmethod
import nuke
@ -450,6 +452,7 @@ class ExporterReviewMov(ExporterReview):
def generate_mov(self, farm=False, **kwargs):
self.publish_on_farm = farm
read_raw = kwargs["read_raw"]
reformat_node_add = kwargs["reformat_node_add"]
reformat_node_config = kwargs["reformat_node_config"]
bake_viewer_process = kwargs["bake_viewer_process"]
@ -484,6 +487,9 @@ class ExporterReviewMov(ExporterReview):
r_node["origlast"].setValue(self.last_frame)
r_node["colorspace"].setValue(self.write_colorspace)
if read_raw:
r_node["raw"].setValue(1)
# connect
self._temp_nodes[subset].append(r_node)
self.previous_node = r_node
@ -590,3 +596,139 @@ class ExporterReviewMov(ExporterReview):
nuke.scriptSave()
return self.data
class AbstractWriteRender(OpenPypeCreator):
"""Abstract creator to gather similar implementation for Write creators"""
name = ""
label = ""
hosts = ["nuke"]
n_class = "Write"
family = "render"
icon = "sign-out"
defaults = ["Main", "Mask"]
def __init__(self, *args, **kwargs):
super(AbstractWriteRender, self).__init__(*args, **kwargs)
data = OrderedDict()
data["family"] = self.family
data["families"] = self.n_class
for k, v in self.data.items():
if k not in data.keys():
data.update({k: v})
self.data = data
self.nodes = nuke.selectedNodes()
self.log.debug("_ self.data: '{}'".format(self.data))
def process(self):
inputs = []
outputs = []
instance = nuke.toNode(self.data["subset"])
selected_node = None
# use selection
if (self.options or {}).get("useSelection"):
nodes = self.nodes
if not (len(nodes) < 2):
msg = ("Select only one node. "
"The node you want to connect to, "
"or tick off `Use selection`")
self.log.error(msg)
nuke.message(msg)
return
if len(nodes) == 0:
msg = (
"No nodes selected. Please select a single node to connect"
" to or tick off `Use selection`"
)
self.log.error(msg)
nuke.message(msg)
return
selected_node = nodes[0]
inputs = [selected_node]
outputs = selected_node.dependent()
if instance:
if (instance.name() in selected_node.name()):
selected_node = instance.dependencies()[0]
# if node already exist
if instance:
# collect input / outputs
inputs = instance.dependencies()
outputs = instance.dependent()
selected_node = inputs[0]
# remove old one
nuke.delete(instance)
# recreate new
write_data = {
"nodeclass": self.n_class,
"families": [self.family],
"avalon": self.data
}
# add creator data
creator_data = {"creator": self.__class__.__name__}
self.data.update(creator_data)
write_data.update(creator_data)
if self.presets.get('fpath_template'):
self.log.info("Adding template path from preset")
write_data.update(
{"fpath_template": self.presets["fpath_template"]}
)
else:
self.log.info("Adding template path from plugin")
write_data.update({
"fpath_template":
("{work}/" + self.family + "s/nuke/{subset}"
"/{subset}.{frame}.{ext}")})
write_node = self._create_write_node(selected_node,
inputs, outputs,
write_data)
# relinking to collected connections
for i, input in enumerate(inputs):
write_node.setInput(i, input)
write_node.autoplace()
for output in outputs:
output.setInput(0, write_node)
write_node = self._modify_write_node(write_node)
return write_node
@abstractmethod
def _create_write_node(self, selected_node, inputs, outputs, write_data):
"""Family dependent implementation of Write node creation
Args:
selected_node (nuke.Node)
inputs (list of nuke.Node) - input dependencies (what is connected)
outputs (list of nuke.Node) - output dependencies
write_data (dict) - values used to fill Knobs
Returns:
node (nuke.Node): group node with data as Knobs
"""
pass
@abstractmethod
def _modify_write_node(self, write_node):
"""Family dependent modification of created 'write_node'
Returns:
node (nuke.Node): group node with data as Knobs
"""
pass

View file

View file

@ -1,12 +1,10 @@
from collections import OrderedDict
import nuke
from openpype.hosts.nuke.api import plugin
from openpype.hosts.nuke.api.lib import create_write_node
class CreateWritePrerender(plugin.OpenPypeCreator):
class CreateWritePrerender(plugin.AbstractWriteRender):
# change this to template preset
name = "WritePrerender"
label = "Create Write Prerender"
@ -19,85 +17,7 @@ class CreateWritePrerender(plugin.OpenPypeCreator):
def __init__(self, *args, **kwargs):
super(CreateWritePrerender, self).__init__(*args, **kwargs)
data = OrderedDict()
data["family"] = self.family
data["families"] = self.n_class
for k, v in self.data.items():
if k not in data.keys():
data.update({k: v})
self.data = data
self.nodes = nuke.selectedNodes()
self.log.debug("_ self.data: '{}'".format(self.data))
def process(self):
inputs = []
outputs = []
instance = nuke.toNode(self.data["subset"])
selected_node = None
# use selection
if (self.options or {}).get("useSelection"):
nodes = self.nodes
if not (len(nodes) < 2):
msg = ("Select only one node. The node "
"you want to connect to, "
"or tick off `Use selection`")
self.log.error(msg)
nuke.message(msg)
if len(nodes) == 0:
msg = (
"No nodes selected. Please select a single node to connect"
" to or tick off `Use selection`"
)
self.log.error(msg)
nuke.message(msg)
selected_node = nodes[0]
inputs = [selected_node]
outputs = selected_node.dependent()
if instance:
if (instance.name() in selected_node.name()):
selected_node = instance.dependencies()[0]
# if node already exist
if instance:
# collect input / outputs
inputs = instance.dependencies()
outputs = instance.dependent()
selected_node = inputs[0]
# remove old one
nuke.delete(instance)
# recreate new
write_data = {
"nodeclass": self.n_class,
"families": [self.family],
"avalon": self.data
}
# add creator data
creator_data = {"creator": self.__class__.__name__}
self.data.update(creator_data)
write_data.update(creator_data)
if self.presets.get('fpath_template'):
self.log.info("Adding template path from preset")
write_data.update(
{"fpath_template": self.presets["fpath_template"]}
)
else:
self.log.info("Adding template path from plugin")
write_data.update({
"fpath_template": ("{work}/prerenders/nuke/{subset}"
"/{subset}.{frame}.{ext}")})
self.log.info("write_data: {}".format(write_data))
def _create_write_node(self, selected_node, inputs, outputs, write_data):
reviewable = self.presets.get("reviewable")
write_node = create_write_node(
self.data["subset"],
@ -107,15 +27,9 @@ class CreateWritePrerender(plugin.OpenPypeCreator):
review=reviewable,
linked_knobs=["channels", "___", "first", "last", "use_limit"])
# relinking to collected connections
for i, input in enumerate(inputs):
write_node.setInput(i, input)
write_node.autoplace()
for output in outputs:
output.setInput(0, write_node)
return write_node
def _modify_write_node(self, write_node):
# open group node
write_node.begin()
for n in nuke.allNodes():

Some files were not shown because too many files have changed in this diff Show more