Merge remote-tracking branch 'upstream/develop' into refactor_integrator

# Conflicts:
#	openpype/plugins/publish/integrate_new.py
This commit is contained in:
Roy Nieterau 2022-05-02 14:46:06 +02:00
commit 398ccf9b34
383 changed files with 8072 additions and 4208 deletions

315
.all-contributorsrc Normal file
View file

@ -0,0 +1,315 @@
{
"projectName": "OpenPype",
"projectOwner": "pypeclub",
"repoType": "github",
"repoHost": "https://github.com",
"files": [
"README.md"
],
"imageSize": 100,
"commit": true,
"commitConvention": "none",
"contributors": [
{
"login": "mkolar",
"name": "Milan Kolar",
"avatar_url": "https://avatars.githubusercontent.com/u/3333008?v=4",
"profile": "http://pype.club/",
"contributions": [
"code",
"doc",
"infra",
"business",
"content",
"fundingFinding",
"maintenance",
"projectManagement",
"review",
"mentoring",
"question"
]
},
{
"login": "jakubjezek001",
"name": "Jakub Ježek",
"avatar_url": "https://avatars.githubusercontent.com/u/40640033?v=4",
"profile": "https://www.linkedin.com/in/jakubjezek79",
"contributions": [
"code",
"doc",
"infra",
"content",
"review",
"maintenance",
"mentoring",
"projectManagement",
"question"
]
},
{
"login": "antirotor",
"name": "Ondřej Samohel",
"avatar_url": "https://avatars.githubusercontent.com/u/33513211?v=4",
"profile": "https://github.com/antirotor",
"contributions": [
"code",
"doc",
"infra",
"content",
"review",
"maintenance",
"mentoring",
"projectManagement",
"question"
]
},
{
"login": "iLLiCiTiT",
"name": "Jakub Trllo",
"avatar_url": "https://avatars.githubusercontent.com/u/43494761?v=4",
"profile": "https://github.com/iLLiCiTiT",
"contributions": [
"code",
"doc",
"infra",
"review",
"maintenance",
"question"
]
},
{
"login": "kalisp",
"name": "Petr Kalis",
"avatar_url": "https://avatars.githubusercontent.com/u/4457962?v=4",
"profile": "https://github.com/kalisp",
"contributions": [
"code",
"doc",
"infra",
"review",
"maintenance",
"question"
]
},
{
"login": "64qam",
"name": "64qam",
"avatar_url": "https://avatars.githubusercontent.com/u/26925793?v=4",
"profile": "https://github.com/64qam",
"contributions": [
"code",
"review",
"doc",
"infra",
"projectManagement",
"maintenance",
"content",
"userTesting"
]
},
{
"login": "BigRoy",
"name": "Roy Nieterau",
"avatar_url": "https://avatars.githubusercontent.com/u/2439881?v=4",
"profile": "http://www.colorbleed.nl/",
"contributions": [
"code",
"doc",
"review",
"mentoring",
"question"
]
},
{
"login": "tokejepsen",
"name": "Toke Jepsen",
"avatar_url": "https://avatars.githubusercontent.com/u/1860085?v=4",
"profile": "https://github.com/tokejepsen",
"contributions": [
"code",
"doc",
"review",
"mentoring",
"question"
]
},
{
"login": "jrsndl",
"name": "Jiri Sindelar",
"avatar_url": "https://avatars.githubusercontent.com/u/45896205?v=4",
"profile": "https://github.com/jrsndl",
"contributions": [
"code",
"review",
"doc",
"content",
"tutorial",
"userTesting"
]
},
{
"login": "simonebarbieri",
"name": "Simone Barbieri",
"avatar_url": "https://avatars.githubusercontent.com/u/1087869?v=4",
"profile": "https://barbierisimone.com/",
"contributions": [
"code",
"doc"
]
},
{
"login": "karimmozilla",
"name": "karimmozilla",
"avatar_url": "https://avatars.githubusercontent.com/u/82811760?v=4",
"profile": "http://karimmozilla.xyz/",
"contributions": [
"code"
]
},
{
"login": "Allan-I",
"name": "Allan I. A.",
"avatar_url": "https://avatars.githubusercontent.com/u/76656700?v=4",
"profile": "https://github.com/Allan-I",
"contributions": [
"code"
]
},
{
"login": "m-u-r-p-h-y",
"name": "murphy",
"avatar_url": "https://avatars.githubusercontent.com/u/352795?v=4",
"profile": "https://www.linkedin.com/in/mmuurrpphhyy/",
"contributions": [
"code",
"review",
"userTesting",
"doc",
"projectManagement"
]
},
{
"login": "aardschok",
"name": "Wijnand Koreman",
"avatar_url": "https://avatars.githubusercontent.com/u/26920875?v=4",
"profile": "https://github.com/aardschok",
"contributions": [
"code"
]
},
{
"login": "zhoub",
"name": "Bo Zhou",
"avatar_url": "https://avatars.githubusercontent.com/u/1798206?v=4",
"profile": "http://jedimaster.cnblogs.com/",
"contributions": [
"code"
]
},
{
"login": "ClementHector",
"name": "Clément Hector",
"avatar_url": "https://avatars.githubusercontent.com/u/7068597?v=4",
"profile": "https://www.linkedin.com/in/clementhector/",
"contributions": [
"code",
"review"
]
},
{
"login": "davidlatwe",
"name": "David Lai",
"avatar_url": "https://avatars.githubusercontent.com/u/3357009?v=4",
"profile": "https://twitter.com/davidlatwe",
"contributions": [
"code",
"review"
]
},
{
"login": "2-REC",
"name": "Derek ",
"avatar_url": "https://avatars.githubusercontent.com/u/42170307?v=4",
"profile": "https://github.com/2-REC",
"contributions": [
"code",
"doc"
]
},
{
"login": "gabormarinov",
"name": "Gábor Marinov",
"avatar_url": "https://avatars.githubusercontent.com/u/8620515?v=4",
"profile": "https://github.com/gabormarinov",
"contributions": [
"code",
"doc"
]
},
{
"login": "icyvapor",
"name": "icyvapor",
"avatar_url": "https://avatars.githubusercontent.com/u/1195278?v=4",
"profile": "https://github.com/icyvapor",
"contributions": [
"code",
"doc"
]
},
{
"login": "jlorrain",
"name": "Jérôme LORRAIN",
"avatar_url": "https://avatars.githubusercontent.com/u/7955673?v=4",
"profile": "https://github.com/jlorrain",
"contributions": [
"code"
]
},
{
"login": "dmo-j-cube",
"name": "David Morris-Oliveros",
"avatar_url": "https://avatars.githubusercontent.com/u/89823400?v=4",
"profile": "https://github.com/dmo-j-cube",
"contributions": [
"code"
]
},
{
"login": "BenoitConnan",
"name": "BenoitConnan",
"avatar_url": "https://avatars.githubusercontent.com/u/82808268?v=4",
"profile": "https://github.com/BenoitConnan",
"contributions": [
"code"
]
},
{
"login": "Malthaldar",
"name": "Malthaldar",
"avatar_url": "https://avatars.githubusercontent.com/u/33671694?v=4",
"profile": "https://github.com/Malthaldar",
"contributions": [
"code"
]
},
{
"login": "svenneve",
"name": "Sven Neve",
"avatar_url": "https://avatars.githubusercontent.com/u/2472863?v=4",
"profile": "http://www.svenneve.com/",
"contributions": [
"code"
]
},
{
"login": "zafrs",
"name": "zafrs",
"avatar_url": "https://avatars.githubusercontent.com/u/26890002?v=4",
"profile": "https://github.com/zafrs",
"contributions": [
"code"
]
}
],
"contributorsPerLine": 7
}

2
.gitignore vendored
View file

@ -70,6 +70,8 @@ coverage.xml
##################
node_modules
package-lock.json
package.json
yarn.lock
openpype/premiere/ppro/js/debug.log

3
.gitmodules vendored
View file

@ -1,3 +0,0 @@
[submodule "repos/avalon-core"]
path = repos/avalon-core
url = https://github.com/pypeclub/avalon-core.git

View file

@ -1,25 +1,39 @@
# Changelog
## [3.10.0-nightly.1](https://github.com/pypeclub/OpenPype/tree/HEAD)
## [3.10.0-nightly.2](https://github.com/pypeclub/OpenPype/tree/HEAD)
[Full Changelog](https://github.com/pypeclub/OpenPype/compare/3.9.4...HEAD)
### 📖 Documentation
- Docs: add all-contributors config and initial list [\#3094](https://github.com/pypeclub/OpenPype/pull/3094)
- Nuke docs with videos [\#3052](https://github.com/pypeclub/OpenPype/pull/3052)
**🚀 Enhancements**
- Standalone publisher: add support for bgeo and vdb [\#3080](https://github.com/pypeclub/OpenPype/pull/3080)
- Update collect\_render.py [\#3055](https://github.com/pypeclub/OpenPype/pull/3055)
- SiteSync: Added compute\_resource\_sync\_sites to sync\_server\_module [\#2983](https://github.com/pypeclub/OpenPype/pull/2983)
**🐛 Bug fixes**
- RoyalRender Control Submission - AVALON\_APP\_NAME default [\#3091](https://github.com/pypeclub/OpenPype/pull/3091)
- Ftrack: Update Create Folders action [\#3089](https://github.com/pypeclub/OpenPype/pull/3089)
- Project Manager: Avoid unnecessary updates of asset documents [\#3083](https://github.com/pypeclub/OpenPype/pull/3083)
- Standalone publisher: Fix plugins install [\#3077](https://github.com/pypeclub/OpenPype/pull/3077)
- General: Extract review sequence is not converted with same names [\#3076](https://github.com/pypeclub/OpenPype/pull/3076)
- Webpublisher: Use variant value [\#3068](https://github.com/pypeclub/OpenPype/pull/3068)
- Nuke: Add aov matching even for remainder and prerender [\#3060](https://github.com/pypeclub/OpenPype/pull/3060)
**🔀 Refactored code**
- General: Move host install [\#3009](https://github.com/pypeclub/OpenPype/pull/3009)
**Merged pull requests:**
- Nuke: added suspend\_publish knob [\#3078](https://github.com/pypeclub/OpenPype/pull/3078)
- Bump async from 2.6.3 to 2.6.4 in /website [\#3065](https://github.com/pypeclub/OpenPype/pull/3065)
## [3.9.4](https://github.com/pypeclub/OpenPype/tree/3.9.4) (2022-04-15)
[Full Changelog](https://github.com/pypeclub/OpenPype/compare/CI/3.9.4-nightly.2...3.9.4)
@ -53,6 +67,7 @@
- LibraryLoader: Use current project for asset query in families filter [\#3042](https://github.com/pypeclub/OpenPype/pull/3042)
- SiteSync: Providers ignore that site is disabled [\#3041](https://github.com/pypeclub/OpenPype/pull/3041)
- Unreal: Creator import fixes [\#3040](https://github.com/pypeclub/OpenPype/pull/3040)
- Settings UI: Version column can be extended so version are visible [\#3032](https://github.com/pypeclub/OpenPype/pull/3032)
- SiteSync: fix transitive alternate sites, fix dropdown in Local Settings [\#3018](https://github.com/pypeclub/OpenPype/pull/3018)
**Merged pull requests:**
@ -72,7 +87,6 @@
- Ftrack: Add description integrator [\#3027](https://github.com/pypeclub/OpenPype/pull/3027)
- Publishing textures for Unreal [\#2988](https://github.com/pypeclub/OpenPype/pull/2988)
- Maya to Unreal: Static and Skeletal Meshes [\#2978](https://github.com/pypeclub/OpenPype/pull/2978)
**🚀 Enhancements**
@ -80,14 +94,11 @@
- Console Interpreter: Changed how console splitter size are reused on show [\#3016](https://github.com/pypeclub/OpenPype/pull/3016)
- Deadline: Use more suitable name for sequence review logic [\#3015](https://github.com/pypeclub/OpenPype/pull/3015)
- General: default workfile subset name for workfile [\#3011](https://github.com/pypeclub/OpenPype/pull/3011)
- Nuke: add concurrency attr to deadline job [\#3005](https://github.com/pypeclub/OpenPype/pull/3005)
- Deadline: priority configurable in Maya jobs [\#2995](https://github.com/pypeclub/OpenPype/pull/2995)
- Workfiles tool: Save as published workfiles [\#2937](https://github.com/pypeclub/OpenPype/pull/2937)
**🐛 Bug fixes**
- Deadline: Fixed default value of use sequence for review [\#3033](https://github.com/pypeclub/OpenPype/pull/3033)
- Settings UI: Version column can be extended so version are visible [\#3032](https://github.com/pypeclub/OpenPype/pull/3032)
- General: Fix validate asset docs plug-in filename and class name [\#3029](https://github.com/pypeclub/OpenPype/pull/3029)
- General: Fix import after movements [\#3028](https://github.com/pypeclub/OpenPype/pull/3028)
- Harmony: Added creating subset name for workfile from template [\#3024](https://github.com/pypeclub/OpenPype/pull/3024)
@ -99,10 +110,6 @@
- Nuke: fixing unicode type detection in effect loaders [\#3002](https://github.com/pypeclub/OpenPype/pull/3002)
- Nuke: removing redundant Ftrack asset when farm publishing [\#2996](https://github.com/pypeclub/OpenPype/pull/2996)
**🔀 Refactored code**
- General: Move plugins register and discover [\#2935](https://github.com/pypeclub/OpenPype/pull/2935)
**Merged pull requests:**
- Maya: Allow to select invalid camera contents if no cameras found [\#3030](https://github.com/pypeclub/OpenPype/pull/3030)
@ -120,19 +127,17 @@
**🆕 New features**
- nuke: bypass baking [\#2992](https://github.com/pypeclub/OpenPype/pull/2992)
- Maya to Unreal: Static and Skeletal Meshes [\#2978](https://github.com/pypeclub/OpenPype/pull/2978)
**🚀 Enhancements**
- Nuke: add concurrency attr to deadline job [\#3005](https://github.com/pypeclub/OpenPype/pull/3005)
- Photoshop: create image without instance [\#3001](https://github.com/pypeclub/OpenPype/pull/3001)
- TVPaint: Render scene family [\#3000](https://github.com/pypeclub/OpenPype/pull/3000)
- Nuke: ReviewDataMov Read RAW attribute [\#2985](https://github.com/pypeclub/OpenPype/pull/2985)
- SiteSync: Added compute\_resource\_sync\_sites to sync\_server\_module [\#2983](https://github.com/pypeclub/OpenPype/pull/2983)
- General: `METADATA\_KEYS` constant as `frozenset` for optimal immutable lookup [\#2980](https://github.com/pypeclub/OpenPype/pull/2980)
- General: Tools with host filters [\#2975](https://github.com/pypeclub/OpenPype/pull/2975)
- Hero versions: Use custom templates [\#2967](https://github.com/pypeclub/OpenPype/pull/2967)
- Slack: Added configurable maximum file size of review upload to Slack [\#2945](https://github.com/pypeclub/OpenPype/pull/2945)
- NewPublisher: Prepared implementation of optional pyblish plugin [\#2943](https://github.com/pypeclub/OpenPype/pull/2943)
- TVPaint: Extractor to convert PNG into EXR [\#2942](https://github.com/pypeclub/OpenPype/pull/2942)
**🐛 Bug fixes**
@ -148,14 +153,6 @@
- General: OIIO conversion for ffmeg can handle sequences [\#2958](https://github.com/pypeclub/OpenPype/pull/2958)
- Settings: Conditional dictionary avoid invalid logs [\#2956](https://github.com/pypeclub/OpenPype/pull/2956)
- General: Smaller fixes and typos [\#2950](https://github.com/pypeclub/OpenPype/pull/2950)
- LogViewer: Don't refresh on initialization [\#2949](https://github.com/pypeclub/OpenPype/pull/2949)
- nuke: python3 compatibility issue with `iteritems` [\#2948](https://github.com/pypeclub/OpenPype/pull/2948)
- General: anatomy data with correct task short key [\#2947](https://github.com/pypeclub/OpenPype/pull/2947)
- SceneInventory: Fix imports in UI [\#2944](https://github.com/pypeclub/OpenPype/pull/2944)
- Slack: add generic exception [\#2941](https://github.com/pypeclub/OpenPype/pull/2941)
- General: Python specific vendor paths on env injection [\#2939](https://github.com/pypeclub/OpenPype/pull/2939)
- General: More fail safe delete old versions [\#2936](https://github.com/pypeclub/OpenPype/pull/2936)
- Settings UI: Collapsed of collapsible wrapper works as expected [\#2934](https://github.com/pypeclub/OpenPype/pull/2934)
**Merged pull requests:**

View file

@ -1,4 +1,7 @@
<!-- ALL-CONTRIBUTORS-BADGE:START - Do not remove or modify this section -->
[![All Contributors](https://img.shields.io/badge/all_contributors-26-orange.svg?style=flat-square)](#contributors-)
<!-- ALL-CONTRIBUTORS-BADGE:END -->
OpenPype
====
@ -283,3 +286,54 @@ Running tests
To run tests, execute `.\tools\run_tests(.ps1|.sh)`.
**Note that it needs existing virtual environment.**
## Contributors ✨
Thanks goes to these wonderful people ([emoji key](https://allcontributors.org/docs/en/emoji-key)):
<!-- ALL-CONTRIBUTORS-LIST:START - Do not remove or modify this section -->
<!-- prettier-ignore-start -->
<!-- markdownlint-disable -->
<table>
<tr>
<td align="center"><a href="http://pype.club/"><img src="https://avatars.githubusercontent.com/u/3333008?v=4?s=100" width="100px;" alt=""/><br /><sub><b>Milan Kolar</b></sub></a><br /><a href="https://github.com/pypeclub/OpenPype/commits?author=mkolar" title="Code">💻</a> <a href="https://github.com/pypeclub/OpenPype/commits?author=mkolar" title="Documentation">📖</a> <a href="#infra-mkolar" title="Infrastructure (Hosting, Build-Tools, etc)">🚇</a> <a href="#business-mkolar" title="Business development">💼</a> <a href="#content-mkolar" title="Content">🖋</a> <a href="#fundingFinding-mkolar" title="Funding Finding">🔍</a> <a href="#maintenance-mkolar" title="Maintenance">🚧</a> <a href="#projectManagement-mkolar" title="Project Management">📆</a> <a href="https://github.com/pypeclub/OpenPype/pulls?q=is%3Apr+reviewed-by%3Amkolar" title="Reviewed Pull Requests">👀</a> <a href="#mentoring-mkolar" title="Mentoring">🧑‍🏫</a> <a href="#question-mkolar" title="Answering Questions">💬</a></td>
<td align="center"><a href="https://www.linkedin.com/in/jakubjezek79"><img src="https://avatars.githubusercontent.com/u/40640033?v=4?s=100" width="100px;" alt=""/><br /><sub><b>Jakub Ježek</b></sub></a><br /><a href="https://github.com/pypeclub/OpenPype/commits?author=jakubjezek001" title="Code">💻</a> <a href="https://github.com/pypeclub/OpenPype/commits?author=jakubjezek001" title="Documentation">📖</a> <a href="#infra-jakubjezek001" title="Infrastructure (Hosting, Build-Tools, etc)">🚇</a> <a href="#content-jakubjezek001" title="Content">🖋</a> <a href="https://github.com/pypeclub/OpenPype/pulls?q=is%3Apr+reviewed-by%3Ajakubjezek001" title="Reviewed Pull Requests">👀</a> <a href="#maintenance-jakubjezek001" title="Maintenance">🚧</a> <a href="#mentoring-jakubjezek001" title="Mentoring">🧑‍🏫</a> <a href="#projectManagement-jakubjezek001" title="Project Management">📆</a> <a href="#question-jakubjezek001" title="Answering Questions">💬</a></td>
<td align="center"><a href="https://github.com/antirotor"><img src="https://avatars.githubusercontent.com/u/33513211?v=4?s=100" width="100px;" alt=""/><br /><sub><b>Ondřej Samohel</b></sub></a><br /><a href="https://github.com/pypeclub/OpenPype/commits?author=antirotor" title="Code">💻</a> <a href="https://github.com/pypeclub/OpenPype/commits?author=antirotor" title="Documentation">📖</a> <a href="#infra-antirotor" title="Infrastructure (Hosting, Build-Tools, etc)">🚇</a> <a href="#content-antirotor" title="Content">🖋</a> <a href="https://github.com/pypeclub/OpenPype/pulls?q=is%3Apr+reviewed-by%3Aantirotor" title="Reviewed Pull Requests">👀</a> <a href="#maintenance-antirotor" title="Maintenance">🚧</a> <a href="#mentoring-antirotor" title="Mentoring">🧑‍🏫</a> <a href="#projectManagement-antirotor" title="Project Management">📆</a> <a href="#question-antirotor" title="Answering Questions">💬</a></td>
<td align="center"><a href="https://github.com/iLLiCiTiT"><img src="https://avatars.githubusercontent.com/u/43494761?v=4?s=100" width="100px;" alt=""/><br /><sub><b>Jakub Trllo</b></sub></a><br /><a href="https://github.com/pypeclub/OpenPype/commits?author=iLLiCiTiT" title="Code">💻</a> <a href="https://github.com/pypeclub/OpenPype/commits?author=iLLiCiTiT" title="Documentation">📖</a> <a href="#infra-iLLiCiTiT" title="Infrastructure (Hosting, Build-Tools, etc)">🚇</a> <a href="https://github.com/pypeclub/OpenPype/pulls?q=is%3Apr+reviewed-by%3AiLLiCiTiT" title="Reviewed Pull Requests">👀</a> <a href="#maintenance-iLLiCiTiT" title="Maintenance">🚧</a> <a href="#question-iLLiCiTiT" title="Answering Questions">💬</a></td>
<td align="center"><a href="https://github.com/kalisp"><img src="https://avatars.githubusercontent.com/u/4457962?v=4?s=100" width="100px;" alt=""/><br /><sub><b>Petr Kalis</b></sub></a><br /><a href="https://github.com/pypeclub/OpenPype/commits?author=kalisp" title="Code">💻</a> <a href="https://github.com/pypeclub/OpenPype/commits?author=kalisp" title="Documentation">📖</a> <a href="#infra-kalisp" title="Infrastructure (Hosting, Build-Tools, etc)">🚇</a> <a href="https://github.com/pypeclub/OpenPype/pulls?q=is%3Apr+reviewed-by%3Akalisp" title="Reviewed Pull Requests">👀</a> <a href="#maintenance-kalisp" title="Maintenance">🚧</a> <a href="#question-kalisp" title="Answering Questions">💬</a></td>
<td align="center"><a href="https://github.com/64qam"><img src="https://avatars.githubusercontent.com/u/26925793?v=4?s=100" width="100px;" alt=""/><br /><sub><b>64qam</b></sub></a><br /><a href="https://github.com/pypeclub/OpenPype/commits?author=64qam" title="Code">💻</a> <a href="https://github.com/pypeclub/OpenPype/pulls?q=is%3Apr+reviewed-by%3A64qam" title="Reviewed Pull Requests">👀</a> <a href="https://github.com/pypeclub/OpenPype/commits?author=64qam" title="Documentation">📖</a> <a href="#infra-64qam" title="Infrastructure (Hosting, Build-Tools, etc)">🚇</a> <a href="#projectManagement-64qam" title="Project Management">📆</a> <a href="#maintenance-64qam" title="Maintenance">🚧</a> <a href="#content-64qam" title="Content">🖋</a> <a href="#userTesting-64qam" title="User Testing">📓</a></td>
<td align="center"><a href="http://www.colorbleed.nl/"><img src="https://avatars.githubusercontent.com/u/2439881?v=4?s=100" width="100px;" alt=""/><br /><sub><b>Roy Nieterau</b></sub></a><br /><a href="https://github.com/pypeclub/OpenPype/commits?author=BigRoy" title="Code">💻</a> <a href="https://github.com/pypeclub/OpenPype/commits?author=BigRoy" title="Documentation">📖</a> <a href="https://github.com/pypeclub/OpenPype/pulls?q=is%3Apr+reviewed-by%3ABigRoy" title="Reviewed Pull Requests">👀</a> <a href="#mentoring-BigRoy" title="Mentoring">🧑‍🏫</a> <a href="#question-BigRoy" title="Answering Questions">💬</a></td>
</tr>
<tr>
<td align="center"><a href="https://github.com/tokejepsen"><img src="https://avatars.githubusercontent.com/u/1860085?v=4?s=100" width="100px;" alt=""/><br /><sub><b>Toke Jepsen</b></sub></a><br /><a href="https://github.com/pypeclub/OpenPype/commits?author=tokejepsen" title="Code">💻</a> <a href="https://github.com/pypeclub/OpenPype/commits?author=tokejepsen" title="Documentation">📖</a> <a href="https://github.com/pypeclub/OpenPype/pulls?q=is%3Apr+reviewed-by%3Atokejepsen" title="Reviewed Pull Requests">👀</a> <a href="#mentoring-tokejepsen" title="Mentoring">🧑‍🏫</a> <a href="#question-tokejepsen" title="Answering Questions">💬</a></td>
<td align="center"><a href="https://github.com/jrsndl"><img src="https://avatars.githubusercontent.com/u/45896205?v=4?s=100" width="100px;" alt=""/><br /><sub><b>Jiri Sindelar</b></sub></a><br /><a href="https://github.com/pypeclub/OpenPype/commits?author=jrsndl" title="Code">💻</a> <a href="https://github.com/pypeclub/OpenPype/pulls?q=is%3Apr+reviewed-by%3Ajrsndl" title="Reviewed Pull Requests">👀</a> <a href="https://github.com/pypeclub/OpenPype/commits?author=jrsndl" title="Documentation">📖</a> <a href="#content-jrsndl" title="Content">🖋</a> <a href="#tutorial-jrsndl" title="Tutorials"></a> <a href="#userTesting-jrsndl" title="User Testing">📓</a></td>
<td align="center"><a href="https://barbierisimone.com/"><img src="https://avatars.githubusercontent.com/u/1087869?v=4?s=100" width="100px;" alt=""/><br /><sub><b>Simone Barbieri</b></sub></a><br /><a href="https://github.com/pypeclub/OpenPype/commits?author=simonebarbieri" title="Code">💻</a> <a href="https://github.com/pypeclub/OpenPype/commits?author=simonebarbieri" title="Documentation">📖</a></td>
<td align="center"><a href="http://karimmozilla.xyz/"><img src="https://avatars.githubusercontent.com/u/82811760?v=4?s=100" width="100px;" alt=""/><br /><sub><b>karimmozilla</b></sub></a><br /><a href="https://github.com/pypeclub/OpenPype/commits?author=karimmozilla" title="Code">💻</a></td>
<td align="center"><a href="https://github.com/Allan-I"><img src="https://avatars.githubusercontent.com/u/76656700?v=4?s=100" width="100px;" alt=""/><br /><sub><b>Allan I. A.</b></sub></a><br /><a href="https://github.com/pypeclub/OpenPype/commits?author=Allan-I" title="Code">💻</a></td>
<td align="center"><a href="https://www.linkedin.com/in/mmuurrpphhyy/"><img src="https://avatars.githubusercontent.com/u/352795?v=4?s=100" width="100px;" alt=""/><br /><sub><b>murphy</b></sub></a><br /><a href="https://github.com/pypeclub/OpenPype/commits?author=m-u-r-p-h-y" title="Code">💻</a> <a href="https://github.com/pypeclub/OpenPype/pulls?q=is%3Apr+reviewed-by%3Am-u-r-p-h-y" title="Reviewed Pull Requests">👀</a> <a href="#userTesting-m-u-r-p-h-y" title="User Testing">📓</a> <a href="https://github.com/pypeclub/OpenPype/commits?author=m-u-r-p-h-y" title="Documentation">📖</a> <a href="#projectManagement-m-u-r-p-h-y" title="Project Management">📆</a></td>
<td align="center"><a href="https://github.com/aardschok"><img src="https://avatars.githubusercontent.com/u/26920875?v=4?s=100" width="100px;" alt=""/><br /><sub><b>Wijnand Koreman</b></sub></a><br /><a href="https://github.com/pypeclub/OpenPype/commits?author=aardschok" title="Code">💻</a></td>
</tr>
<tr>
<td align="center"><a href="http://jedimaster.cnblogs.com/"><img src="https://avatars.githubusercontent.com/u/1798206?v=4?s=100" width="100px;" alt=""/><br /><sub><b>Bo Zhou</b></sub></a><br /><a href="https://github.com/pypeclub/OpenPype/commits?author=zhoub" title="Code">💻</a></td>
<td align="center"><a href="https://www.linkedin.com/in/clementhector/"><img src="https://avatars.githubusercontent.com/u/7068597?v=4?s=100" width="100px;" alt=""/><br /><sub><b>Clément Hector</b></sub></a><br /><a href="https://github.com/pypeclub/OpenPype/commits?author=ClementHector" title="Code">💻</a> <a href="https://github.com/pypeclub/OpenPype/pulls?q=is%3Apr+reviewed-by%3AClementHector" title="Reviewed Pull Requests">👀</a></td>
<td align="center"><a href="https://twitter.com/davidlatwe"><img src="https://avatars.githubusercontent.com/u/3357009?v=4?s=100" width="100px;" alt=""/><br /><sub><b>David Lai</b></sub></a><br /><a href="https://github.com/pypeclub/OpenPype/commits?author=davidlatwe" title="Code">💻</a> <a href="https://github.com/pypeclub/OpenPype/pulls?q=is%3Apr+reviewed-by%3Adavidlatwe" title="Reviewed Pull Requests">👀</a></td>
<td align="center"><a href="https://github.com/2-REC"><img src="https://avatars.githubusercontent.com/u/42170307?v=4?s=100" width="100px;" alt=""/><br /><sub><b>Derek </b></sub></a><br /><a href="https://github.com/pypeclub/OpenPype/commits?author=2-REC" title="Code">💻</a> <a href="https://github.com/pypeclub/OpenPype/commits?author=2-REC" title="Documentation">📖</a></td>
<td align="center"><a href="https://github.com/gabormarinov"><img src="https://avatars.githubusercontent.com/u/8620515?v=4?s=100" width="100px;" alt=""/><br /><sub><b>Gábor Marinov</b></sub></a><br /><a href="https://github.com/pypeclub/OpenPype/commits?author=gabormarinov" title="Code">💻</a> <a href="https://github.com/pypeclub/OpenPype/commits?author=gabormarinov" title="Documentation">📖</a></td>
<td align="center"><a href="https://github.com/icyvapor"><img src="https://avatars.githubusercontent.com/u/1195278?v=4?s=100" width="100px;" alt=""/><br /><sub><b>icyvapor</b></sub></a><br /><a href="https://github.com/pypeclub/OpenPype/commits?author=icyvapor" title="Code">💻</a> <a href="https://github.com/pypeclub/OpenPype/commits?author=icyvapor" title="Documentation">📖</a></td>
<td align="center"><a href="https://github.com/jlorrain"><img src="https://avatars.githubusercontent.com/u/7955673?v=4?s=100" width="100px;" alt=""/><br /><sub><b>Jérôme LORRAIN</b></sub></a><br /><a href="https://github.com/pypeclub/OpenPype/commits?author=jlorrain" title="Code">💻</a></td>
</tr>
<tr>
<td align="center"><a href="https://github.com/dmo-j-cube"><img src="https://avatars.githubusercontent.com/u/89823400?v=4?s=100" width="100px;" alt=""/><br /><sub><b>David Morris-Oliveros</b></sub></a><br /><a href="https://github.com/pypeclub/OpenPype/commits?author=dmo-j-cube" title="Code">💻</a></td>
<td align="center"><a href="https://github.com/BenoitConnan"><img src="https://avatars.githubusercontent.com/u/82808268?v=4?s=100" width="100px;" alt=""/><br /><sub><b>BenoitConnan</b></sub></a><br /><a href="https://github.com/pypeclub/OpenPype/commits?author=BenoitConnan" title="Code">💻</a></td>
<td align="center"><a href="https://github.com/Malthaldar"><img src="https://avatars.githubusercontent.com/u/33671694?v=4?s=100" width="100px;" alt=""/><br /><sub><b>Malthaldar</b></sub></a><br /><a href="https://github.com/pypeclub/OpenPype/commits?author=Malthaldar" title="Code">💻</a></td>
<td align="center"><a href="http://www.svenneve.com/"><img src="https://avatars.githubusercontent.com/u/2472863?v=4?s=100" width="100px;" alt=""/><br /><sub><b>Sven Neve</b></sub></a><br /><a href="https://github.com/pypeclub/OpenPype/commits?author=svenneve" title="Code">💻</a></td>
<td align="center"><a href="https://github.com/zafrs"><img src="https://avatars.githubusercontent.com/u/26890002?v=4?s=100" width="100px;" alt=""/><br /><sub><b>zafrs</b></sub></a><br /><a href="https://github.com/pypeclub/OpenPype/commits?author=zafrs" title="Code">💻</a></td>
</tr>
</table>
<!-- markdownlint-restore -->
<!-- prettier-ignore-end -->
<!-- ALL-CONTRIBUTORS-LIST:END -->
This project follows the [all-contributors](https://github.com/all-contributors/all-contributors) specification. Contributions of any kind welcome!

View file

@ -627,8 +627,6 @@ class BootstrapRepos:
Attributes:
data_dir (Path): local OpenPype installation directory.
live_repo_dir (Path): path to repos directory if running live,
otherwise `None`.
registry (OpenPypeSettingsRegistry): OpenPype registry object.
zip_filter (list): List of files to exclude from zip
openpype_filter (list): list of top level directories to
@ -654,7 +652,7 @@ class BootstrapRepos:
self.registry = OpenPypeSettingsRegistry()
self.zip_filter = [".pyc", "__pycache__"]
self.openpype_filter = [
"openpype", "repos", "schema", "LICENSE"
"openpype", "schema", "LICENSE"
]
self._message = message
@ -667,11 +665,6 @@ class BootstrapRepos:
progress_callback = empty_progress
self._progress_callback = progress_callback
if getattr(sys, "frozen", False):
self.live_repo_dir = Path(sys.executable).parent / "repos"
else:
self.live_repo_dir = Path(Path(__file__).parent / ".." / "repos")
@staticmethod
def get_version_path_from_list(
version: str, version_list: list) -> Union[Path, None]:
@ -736,11 +729,12 @@ class BootstrapRepos:
# if repo dir is not set, we detect local "live" OpenPype repository
# version and use it as a source. Otherwise repo_dir is user
# entered location.
if not repo_dir:
version = OpenPypeVersion.get_installed_version_str()
repo_dir = self.live_repo_dir
else:
if repo_dir:
version = self.get_version(repo_dir)
else:
installed_version = OpenPypeVersion.get_installed_version()
version = str(installed_version)
repo_dir = installed_version.path
if not version:
self._print("OpenPype not found.", LOG_ERROR)
@ -756,7 +750,7 @@ class BootstrapRepos:
Path(temp_dir) / f"openpype-v{version}.zip"
self._print(f"creating zip: {temp_zip}")
self._create_openpype_zip(temp_zip, repo_dir.parent)
self._create_openpype_zip(temp_zip, repo_dir)
if not os.path.exists(temp_zip):
self._print("make archive failed.", LOG_ERROR)
return None
@ -1057,27 +1051,11 @@ class BootstrapRepos:
if not archive.is_file() and not archive.exists():
raise ValueError("Archive is not file.")
with ZipFile(archive, "r") as zip_file:
name_list = zip_file.namelist()
roots = []
paths = []
for item in name_list:
if not item.startswith("repos/"):
continue
root = item.split("/")[1]
if root not in roots:
roots.append(root)
paths.append(
f"{archive}{os.path.sep}repos{os.path.sep}{root}")
sys.path.insert(0, paths[-1])
sys.path.insert(0, f"{archive}")
archive_path = str(archive)
sys.path.insert(0, archive_path)
pythonpath = os.getenv("PYTHONPATH", "")
python_paths = pythonpath.split(os.pathsep)
python_paths += paths
python_paths.insert(0, archive_path)
os.environ["PYTHONPATH"] = os.pathsep.join(python_paths)
@ -1094,24 +1072,8 @@ class BootstrapRepos:
directory (Path): path to directory.
"""
sys.path.insert(0, directory.as_posix())
directory /= "repos"
if not directory.exists() and not directory.is_dir():
raise ValueError("directory is invalid")
roots = []
for item in directory.iterdir():
if item.is_dir():
root = item.as_posix()
if root not in roots:
roots.append(root)
sys.path.insert(0, root)
pythonpath = os.getenv("PYTHONPATH", "")
paths = pythonpath.split(os.pathsep)
paths += roots
os.environ["PYTHONPATH"] = os.pathsep.join(paths)
@staticmethod
def find_openpype_version(version, staging):
@ -1437,6 +1399,7 @@ class BootstrapRepos:
# create destination parent directories even if they don't exist.
destination.mkdir(parents=True)
remove_source_file = False
# version is directory
if openpype_version.path.is_dir():
# create zip inside temporary directory.
@ -1470,6 +1433,8 @@ class BootstrapRepos:
self._progress_callback(35)
openpype_version.path = self._copy_zip(
openpype_version.path, destination)
# Mark zip to be deleted when done
remove_source_file = True
# extract zip there
self._print("extracting zip to destination ...")
@ -1478,6 +1443,10 @@ class BootstrapRepos:
zip_ref.extractall(destination)
self._progress_callback(100)
# Remove zip file copied to local app data
if remove_source_file:
os.remove(openpype_version.path)
return destination
def _copy_zip(self, source: Path, destination: Path) -> Path:

View file

@ -3,7 +3,6 @@ from .settings import (
get_project_settings,
get_current_project_settings,
get_anatomy_settings,
get_environments,
SystemSettings,
ProjectSettings
@ -23,7 +22,6 @@ from .lib import (
get_app_environments_for_context,
source_hash,
get_latest_version,
get_global_environments,
get_local_site_id,
change_openpype_mongo_url,
create_project_folders,
@ -69,10 +67,10 @@ __all__ = [
"get_project_settings",
"get_current_project_settings",
"get_anatomy_settings",
"get_environments",
"get_project_basic_paths",
"SystemSettings",
"ProjectSettings",
"PypeLogger",
"Logger",
@ -102,8 +100,9 @@ __all__ = [
# get contextual data
"version_up",
"get_hierarchy",
"get_asset",
"get_hierarchy",
"get_workdir_data",
"get_version_from_path",
"get_last_version_from_path",
"get_app_environments_for_context",
@ -111,7 +110,6 @@ __all__ = [
"run_subprocess",
"get_latest_version",
"get_global_environments",
"get_local_site_id",
"change_openpype_mongo_url",

View file

@ -20,6 +20,10 @@ from .pype_commands import PypeCommands
"to list staging versions."))
@click.option("--validate-version", expose_value=False,
help="validate given version integrity")
@click.option("--debug", is_flag=True, expose_value=False,
help=("Enable debug"))
@click.option("--verbose", expose_value=False,
help=("Change OpenPype log level (debug - critical or 0-50)"))
def main(ctx):
"""Pype is main command serving as entry point to pipeline system.
@ -49,18 +53,13 @@ def traypublisher():
@main.command()
@click.option("-d", "--debug",
is_flag=True, help=("Run pype tray in debug mode"))
def tray(debug=False):
def tray():
"""Launch pype tray.
Default action of pype command is to launch tray widget to control basic
aspects of pype. See documentation for more information.
Running pype with `--debug` will result in lot of information useful for
debugging to be shown in console.
"""
PypeCommands().launch_tray(debug)
PypeCommands().launch_tray()
@PypeCommands.add_modules
@ -75,7 +74,6 @@ def module(ctx):
@main.command()
@click.option("-d", "--debug", is_flag=True, help="Print debug messages")
@click.option("--ftrack-url", envvar="FTRACK_SERVER",
help="Ftrack server url")
@click.option("--ftrack-user", envvar="FTRACK_API_USER",
@ -88,8 +86,7 @@ def module(ctx):
help="Clockify API key.")
@click.option("--clockify-workspace", envvar="CLOCKIFY_WORKSPACE",
help="Clockify workspace")
def eventserver(debug,
ftrack_url,
def eventserver(ftrack_url,
ftrack_user,
ftrack_api_key,
legacy,
@ -100,8 +97,6 @@ def eventserver(debug,
This should be ideally used by system service (such us systemd or upstart
on linux and window service).
"""
if debug:
os.environ["OPENPYPE_DEBUG"] = "1"
PypeCommands().launch_eventservercli(
ftrack_url,
@ -114,12 +109,11 @@ def eventserver(debug,
@main.command()
@click.option("-d", "--debug", is_flag=True, help="Print debug messages")
@click.option("-h", "--host", help="Host", default=None)
@click.option("-p", "--port", help="Port", default=None)
@click.option("-e", "--executable", help="Executable")
@click.option("-u", "--upload_dir", help="Upload dir")
def webpublisherwebserver(debug, executable, upload_dir, host=None, port=None):
def webpublisherwebserver(executable, upload_dir, host=None, port=None):
"""Starts webserver for communication with Webpublish FR via command line
OP must be congigured on a machine, eg. OPENPYPE_MONGO filled AND
@ -127,8 +121,6 @@ def webpublisherwebserver(debug, executable, upload_dir, host=None, port=None):
Expect "pype.club" user created on Ftrack.
"""
if debug:
os.environ["OPENPYPE_DEBUG"] = "1"
PypeCommands().launch_webpublisher_webservercli(
upload_dir=upload_dir,
@ -164,38 +156,34 @@ def extractenvironments(output_json_path, project, asset, task, app, envgroup):
@main.command()
@click.argument("paths", nargs=-1)
@click.option("-d", "--debug", is_flag=True, help="Print debug messages")
@click.option("-t", "--targets", help="Targets module", default=None,
multiple=True)
@click.option("-g", "--gui", is_flag=True,
help="Show Publish UI", default=False)
def publish(debug, paths, targets, gui):
def publish(paths, targets, gui):
"""Start CLI publishing.
Publish collects json from paths provided as an argument.
More than one path is allowed.
"""
if debug:
os.environ["OPENPYPE_DEBUG"] = "1"
PypeCommands.publish(list(paths), targets, gui)
@main.command()
@click.argument("path")
@click.option("-d", "--debug", is_flag=True, help="Print debug messages")
@click.option("-h", "--host", help="Host")
@click.option("-u", "--user", help="User email address")
@click.option("-p", "--project", help="Project")
@click.option("-t", "--targets", help="Targets", default=None,
multiple=True)
def remotepublishfromapp(debug, project, path, host, user=None, targets=None):
def remotepublishfromapp(project, path, host, user=None, targets=None):
"""Start CLI publishing.
Publish collects json from paths provided as an argument.
More than one path is allowed.
"""
if debug:
os.environ["OPENPYPE_DEBUG"] = "1"
PypeCommands.remotepublishfromapp(
project, path, host, user, targets=targets
)
@ -203,24 +191,21 @@ def remotepublishfromapp(debug, project, path, host, user=None, targets=None):
@main.command()
@click.argument("path")
@click.option("-d", "--debug", is_flag=True, help="Print debug messages")
@click.option("-u", "--user", help="User email address")
@click.option("-p", "--project", help="Project")
@click.option("-t", "--targets", help="Targets", default=None,
multiple=True)
def remotepublish(debug, project, path, user=None, targets=None):
def remotepublish(project, path, user=None, targets=None):
"""Start CLI publishing.
Publish collects json from paths provided as an argument.
More than one path is allowed.
"""
if debug:
os.environ["OPENPYPE_DEBUG"] = "1"
PypeCommands.remotepublish(project, path, user, targets=targets)
@main.command()
@click.option("-d", "--debug", is_flag=True, help="Print debug messages")
@click.option("-p", "--project", required=True,
help="name of project asset is under")
@click.option("-a", "--asset", required=True,
@ -228,7 +213,7 @@ def remotepublish(debug, project, path, user=None, targets=None):
@click.option("--path", required=True,
help="path where textures are found",
type=click.Path(exists=True))
def texturecopy(debug, project, asset, path):
def texturecopy(project, asset, path):
"""Copy specified textures to provided asset path.
It validates if project and asset exists. Then it will use speedcopy to
@ -239,8 +224,7 @@ def texturecopy(debug, project, asset, path):
Result will be copied without directory structure so it will be flat then.
Nothing is written to database.
"""
if debug:
os.environ["OPENPYPE_DEBUG"] = "1"
PypeCommands().texture_copy(project, asset, path)
@ -389,11 +373,9 @@ def runtests(folder, mark, pyargs, test_data_folder, persist, app_variant,
@main.command()
@click.option("-d", "--debug",
is_flag=True, help=("Run process in debug mode"))
@click.option("-a", "--active_site", required=True,
help="Name of active stie")
def syncserver(debug, active_site):
def syncserver(active_site):
"""Run sync site server in background.
Some Site Sync use cases need to expose site to another one.
@ -408,8 +390,7 @@ def syncserver(debug, active_site):
Settings (configured by starting OP Tray with env
var OPENPYPE_LOCAL_ID set to 'active_site'.
"""
if debug:
os.environ["OPENPYPE_DEBUG"] = "1"
PypeCommands().syncserver(active_site)

View file

@ -5,8 +5,7 @@ from openpype.lib import (
prepare_app_environments,
prepare_context_environments
)
import avalon.api
from openpype.pipeline import AvalonMongoDB
class GlobalHostDataHook(PreLaunchHook):
@ -64,7 +63,7 @@ class GlobalHostDataHook(PreLaunchHook):
self.data["anatomy"] = Anatomy(project_name)
# Mongo connection
dbcon = avalon.api.AvalonMongoDB()
dbcon = AvalonMongoDB()
dbcon.Session["AVALON_PROJECT"] = project_name
dbcon.install()

View file

@ -16,7 +16,10 @@ from .pipeline import (
uninstall,
list_instances,
remove_instance,
containerise
containerise,
get_context_data,
update_context_data,
get_context_title
)
from .workio import (
@ -51,6 +54,9 @@ __all__ = [
"list_instances",
"remove_instance",
"containerise",
"get_context_data",
"update_context_data",
"get_context_title",
"file_extensions",
"has_unsaved_changes",

View file

@ -1,5 +1,5 @@
<?xml version="1.0" encoding="UTF-8"?>
<ExtensionManifest Version="8.0" ExtensionBundleId="com.openpype.AE.panel" ExtensionBundleVersion="1.0.22"
<ExtensionManifest Version="8.0" ExtensionBundleId="com.openpype.AE.panel" ExtensionBundleVersion="1.0.23"
ExtensionBundleName="openpype" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">
<ExtensionList>
<Extension Id="com.openpype.AE.panel" Version="1.0" />

View file

@ -417,7 +417,9 @@ function getRenderInfo(){
var file_url = item.file.toString();
return JSON.stringify({
"file_name": file_url
"file_name": file_url,
"width": render_item.comp.width,
"height": render_item.comp.height
})
}

View file

@ -12,9 +12,8 @@ from wsrpc_aiohttp import (
from Qt import QtCore
from openpype.pipeline import legacy_io
from openpype.tools.utils import host_tools
from avalon import api
from openpype.tools.adobe_webserver.app import WebServerTool
from .ws_stub import AfterEffectsServerStub
@ -271,13 +270,13 @@ class AfterEffectsRoute(WebSocketRoute):
log.info("Setting context change")
log.info("project {} asset {} ".format(project, asset))
if project:
api.Session["AVALON_PROJECT"] = project
legacy_io.Session["AVALON_PROJECT"] = project
os.environ["AVALON_PROJECT"] = project
if asset:
api.Session["AVALON_ASSET"] = asset
legacy_io.Session["AVALON_ASSET"] = asset
os.environ["AVALON_ASSET"] = asset
if task:
api.Session["AVALON_TASK"] = task
legacy_io.Session["AVALON_TASK"] = task
os.environ["AVALON_TASK"] = task
async def read(self):

View file

@ -2,10 +2,8 @@ import os
import sys
from Qt import QtWidgets
from bson.objectid import ObjectId
import pyblish.api
from avalon import io
from openpype import lib
from openpype.api import Logger
@ -15,7 +13,7 @@ from openpype.pipeline import (
deregister_loader_plugin_path,
deregister_creator_plugin_path,
AVALON_CONTAINER_ID,
registered_host,
legacy_io,
)
import openpype.hosts.aftereffects
from openpype.lib import register_event_callback
@ -34,24 +32,6 @@ LOAD_PATH = os.path.join(PLUGINS_DIR, "load")
CREATE_PATH = os.path.join(PLUGINS_DIR, "create")
def check_inventory():
if not lib.any_outdated():
return
# Warn about outdated containers.
print("Starting new QApplication..")
app = QtWidgets.QApplication(sys.argv)
message_box = QtWidgets.QMessageBox()
message_box.setIcon(QtWidgets.QMessageBox.Warning)
msg = "There are outdated containers in the scene."
message_box.setText(msg)
message_box.exec_()
def application_launch():
check_inventory()
def install():
print("Installing Pype config...")
@ -75,6 +55,11 @@ def uninstall():
deregister_creator_plugin_path(CREATE_PATH)
def application_launch():
"""Triggered after start of app"""
check_inventory()
def on_pyblish_instance_toggled(instance, old_value, new_value):
"""Toggle layer visibility on instance toggles."""
instance[0].Visible = new_value
@ -109,65 +94,6 @@ def get_asset_settings():
}
def containerise(name,
namespace,
comp,
context,
loader=None,
suffix="_CON"):
"""
Containerisation enables a tracking of version, author and origin
for loaded assets.
Creates dictionary payloads that gets saved into file metadata. Each
container contains of who loaded (loader) and members (single or multiple
in case of background).
Arguments:
name (str): Name of resulting assembly
namespace (str): Namespace under which to host container
comp (Comp): Composition to containerise
context (dict): Asset information
loader (str, optional): Name of loader used to produce this container.
suffix (str, optional): Suffix of container, defaults to `_CON`.
Returns:
container (str): Name of container assembly
"""
data = {
"schema": "openpype:container-2.0",
"id": AVALON_CONTAINER_ID,
"name": name,
"namespace": namespace,
"loader": str(loader),
"representation": str(context["representation"]["_id"]),
"members": comp.members or [comp.id]
}
stub = get_stub()
stub.imprint(comp, data)
return comp
def _get_stub():
"""
Handle pulling stub from PS to run operations on host
Returns:
(AEServerStub) or None
"""
try:
stub = get_stub() # only after Photoshop is up
except lib.ConnectionNotEstablishedYet:
print("Not connected yet, ignoring")
return
if not stub.get_active_document_name():
return
return stub
def ls():
"""Yields containers from active AfterEffects document.
@ -208,6 +134,66 @@ def ls():
yield data
def check_inventory():
"""Checks loaded containers if they are of highest version"""
if not lib.any_outdated():
return
# Warn about outdated containers.
_app = QtWidgets.QApplication.instance()
if not _app:
print("Starting new QApplication..")
_app = QtWidgets.QApplication([])
message_box = QtWidgets.QMessageBox()
message_box.setIcon(QtWidgets.QMessageBox.Warning)
msg = "There are outdated containers in the scene."
message_box.setText(msg)
message_box.exec_()
def containerise(name,
namespace,
comp,
context,
loader=None,
suffix="_CON"):
"""
Containerisation enables a tracking of version, author and origin
for loaded assets.
Creates dictionary payloads that gets saved into file metadata. Each
container contains of who loaded (loader) and members (single or multiple
in case of background).
Arguments:
name (str): Name of resulting assembly
namespace (str): Namespace under which to host container
comp (AEItem): Composition to containerise
context (dict): Asset information
loader (str, optional): Name of loader used to produce this container.
suffix (str, optional): Suffix of container, defaults to `_CON`.
Returns:
container (str): Name of container assembly
"""
data = {
"schema": "openpype:container-2.0",
"id": AVALON_CONTAINER_ID,
"name": name,
"namespace": namespace,
"loader": str(loader),
"representation": str(context["representation"]["_id"]),
"members": comp.members or [comp.id]
}
stub = get_stub()
stub.imprint(comp.id, data)
return comp
# created instances section
def list_instances():
"""
List all created instances from current workfile which
@ -228,16 +214,8 @@ def list_instances():
layers_meta = stub.get_metadata()
for instance in layers_meta:
if instance.get("schema") and \
"container" in instance.get("schema"):
continue
uuid_val = instance.get("uuid")
if uuid_val:
instance['uuid'] = uuid_val
else:
instance['uuid'] = instance.get("members")[0] # legacy
instances.append(instance)
if instance.get("id") == "pyblish.avalon.instance":
instances.append(instance)
return instances
@ -258,8 +236,59 @@ def remove_instance(instance):
if not stub:
return
stub.remove_instance(instance.get("uuid"))
item = stub.get_item(instance.get("uuid"))
if item:
stub.rename_item(item.id,
item.name.replace(stub.PUBLISH_ICON, ''))
inst_id = instance.get("instance_id") or instance.get("uuid") # legacy
if not inst_id:
log.warning("No instance identifier for {}".format(instance))
return
stub.remove_instance(inst_id)
if instance.get("members"):
item = stub.get_item(instance["members"][0])
if item:
stub.rename_item(item.id,
item.name.replace(stub.PUBLISH_ICON, ''))
# new publisher section
def get_context_data():
meta = _get_stub().get_metadata()
for item in meta:
if item.get("id") == "publish_context":
item.pop("id")
return item
return {}
def update_context_data(data, changes):
item = data
item["id"] = "publish_context"
_get_stub().imprint(item["id"], item)
def get_context_title():
"""Returns title for Creator window"""
project_name = legacy_io.Session["AVALON_PROJECT"]
asset_name = legacy_io.Session["AVALON_ASSET"]
task_name = legacy_io.Session["AVALON_TASK"]
return "{}/{}/{}".format(project_name, asset_name, task_name)
def _get_stub():
"""
Handle pulling stub from PS to run operations on host
Returns:
(AEServerStub) or None
"""
try:
stub = get_stub() # only after Photoshop is up
except lib.ConnectionNotEstablishedYet:
print("Not connected yet, ignoring")
return
if not stub.get_active_document_name():
return
return stub

View file

@ -51,4 +51,4 @@ def _active_document():
print("Nothing opened")
pass
return document_name
return document_name

View file

@ -28,6 +28,9 @@ class AEItem(object):
workAreaDuration = attr.ib(default=None)
frameRate = attr.ib(default=None)
file_name = attr.ib(default=None)
instance_id = attr.ib(default=None) # New Publisher
width = attr.ib(default=None)
height = attr.ib(default=None)
class AfterEffectsServerStub():
@ -110,11 +113,11 @@ class AfterEffectsServerStub():
self.log.debug("Couldn't find layer metadata")
def imprint(self, item, data, all_items=None, items_meta=None):
def imprint(self, item_id, data, all_items=None, items_meta=None):
"""
Save item metadata to Label field of metadata of active document
Args:
item (AEItem):
item_id (int|str): id of FootageItem or instance_id for workfiles
data(string): json representation for single layer
all_items (list of item): for performance, could be
injected for usage in loop, if not, single call will be
@ -132,8 +135,9 @@ class AfterEffectsServerStub():
is_new = True
for item_meta in items_meta:
if item_meta.get('members') \
and str(item.id) == str(item_meta.get('members')[0]):
if ((item_meta.get('members') and
str(item_id) == str(item_meta.get('members')[0])) or
item_meta.get("instance_id") == item_id):
is_new = False
if data:
item_meta.update(data)
@ -153,10 +157,12 @@ class AfterEffectsServerStub():
item_ids = [int(item.id) for item in all_items]
cleaned_data = []
for meta in result_meta:
# for creation of instance OR loaded container
if 'instance' in meta.get('id') or \
int(meta.get('members')[0]) in item_ids:
cleaned_data.append(meta)
# do not added instance with nonexistend item id
if meta.get("members"):
if int(meta["members"][0]) not in item_ids:
continue
cleaned_data.append(meta)
payload = json.dumps(cleaned_data, indent=4)
@ -167,7 +173,7 @@ class AfterEffectsServerStub():
def get_active_document_full_name(self):
"""
Returns just a name of active document via ws call
Returns absolute path of active document via ws call
Returns(string): file name
"""
res = self.websocketserver.call(self.client.call(
@ -314,15 +320,13 @@ class AfterEffectsServerStub():
Keep matching item in file though.
Args:
instance_id(string): instance uuid
instance_id(string): instance id
"""
cleaned_data = []
for instance in self.get_metadata():
uuid_val = instance.get("uuid")
if not uuid_val:
uuid_val = instance.get("members")[0] # legacy
if uuid_val != instance_id:
inst_id = instance.get("instance_id") or instance.get("uuid")
if inst_id != instance_id:
cleaned_data.append(instance)
payload = json.dumps(cleaned_data, indent=4)
@ -357,7 +361,7 @@ class AfterEffectsServerStub():
item_id (int):
Returns:
(namedtuple)
(AEItem)
"""
res = self.websocketserver.call(self.client.call
@ -418,7 +422,7 @@ class AfterEffectsServerStub():
""" Get render queue info for render purposes
Returns:
(namedtuple): with 'file_name' field
(AEItem): with 'file_name' field
"""
res = self.websocketserver.call(self.client.call
('AfterEffects.get_render_info'))
@ -606,7 +610,10 @@ class AfterEffectsServerStub():
d.get('workAreaStart'),
d.get('workAreaDuration'),
d.get('frameRate'),
d.get('file_name'))
d.get('file_name'),
d.get("instance_id"),
d.get("width"),
d.get("height"))
ret.append(item)
return ret

View file

@ -1,7 +1,7 @@
from openpype.hosts.aftereffects.plugins.create import create_render
from openpype.hosts.aftereffects.plugins.create import create_legacy_render
class CreateLocalRender(create_render.CreateRender):
class CreateLocalRender(create_legacy_render.CreateRender):
""" Creator to render locally.
Created only after default render on farm. So family 'render.local' is

View file

@ -0,0 +1,62 @@
from openpype.pipeline import create
from openpype.pipeline import CreatorError
from openpype.hosts.aftereffects.api import (
get_stub,
list_instances
)
class CreateRender(create.LegacyCreator):
"""Render folder for publish.
Creates subsets in format 'familyTaskSubsetname',
eg 'renderCompositingMain'.
Create only single instance from composition at a time.
"""
name = "renderDefault"
label = "Render on Farm"
family = "render"
defaults = ["Main"]
def process(self):
stub = get_stub() # only after After Effects is up
items = []
if (self.options or {}).get("useSelection"):
items = stub.get_selected_items(
comps=True, folders=False, footages=False
)
if len(items) > 1:
raise CreatorError(
"Please select only single composition at time."
)
if not items:
raise CreatorError((
"Nothing to create. Select composition "
"if 'useSelection' or create at least "
"one composition."
))
existing_subsets = [
instance['subset'].lower()
for instance in list_instances()
]
item = items.pop()
if self.name.lower() in existing_subsets:
txt = "Instance with name \"{}\" already exists.".format(self.name)
raise CreatorError(txt)
self.data["members"] = [item.id]
self.data["uuid"] = item.id # for SubsetManager
self.data["subset"] = (
self.data["subset"]
.replace(stub.PUBLISH_ICON, '')
.replace(stub.LOADED_ICON, '')
)
stub.imprint(item, self.data)
stub.set_label_color(item.id, 14) # Cyan options 0 - 16
stub.rename_item(item.id, stub.PUBLISH_ICON + self.data["subset"])

View file

@ -1,38 +1,70 @@
from openpype import resources
from openpype.lib import BoolDef, UISeparatorDef
from openpype.hosts.aftereffects import api
from openpype.pipeline import (
Creator,
CreatedInstance,
CreatorError,
LegacyCreator
)
from openpype.hosts.aftereffects.api import (
get_stub,
list_instances
legacy_io,
)
class CreateRender(LegacyCreator):
"""Render folder for publish.
Creates subsets in format 'familyTaskSubsetname',
eg 'renderCompositingMain'.
Create only single instance from composition at a time.
"""
name = "renderDefault"
label = "Render on Farm"
class RenderCreator(Creator):
identifier = "render"
label = "Render"
family = "render"
defaults = ["Main"]
description = "Render creator"
def process(self):
stub = get_stub() # only after After Effects is up
if (self.options or {}).get("useSelection"):
create_allow_context_change = True
def __init__(
self, create_context, system_settings, project_settings, headless=False
):
super(RenderCreator, self).__init__(create_context, system_settings,
project_settings, headless)
self._default_variants = (project_settings["aftereffects"]
["create"]
["RenderCreator"]
["defaults"])
def get_icon(self):
return resources.get_openpype_splash_filepath()
def collect_instances(self):
for instance_data in api.list_instances():
# legacy instances have family=='render' or 'renderLocal', use them
creator_id = (instance_data.get("creator_identifier") or
instance_data.get("family", '').replace("Local", ''))
if creator_id == self.identifier:
instance_data = self._handle_legacy(instance_data)
instance = CreatedInstance.from_existing(
instance_data, self
)
self._add_instance_to_context(instance)
def update_instances(self, update_list):
for created_inst, _changes in update_list:
api.get_stub().imprint(created_inst.get("instance_id"),
created_inst.data_to_store())
def remove_instances(self, instances):
for instance in instances:
api.remove_instance(instance)
self._remove_instance_from_context(instance)
def create(self, subset_name, data, pre_create_data):
stub = api.get_stub() # only after After Effects is up
if pre_create_data.get("use_selection"):
items = stub.get_selected_items(
comps=True, folders=False, footages=False
)
else:
items = stub.get_items(comps=True, folders=False, footages=False)
if len(items) > 1:
raise CreatorError(
"Please select only single composition at time."
)
if not items:
raise CreatorError((
"Nothing to create. Select composition "
@ -40,24 +72,54 @@ class CreateRender(LegacyCreator):
"one composition."
))
existing_subsets = [
instance['subset'].lower()
for instance in list_instances()
for inst in self.create_context.instances:
if subset_name == inst.subset_name:
raise CreatorError("{} already exists".format(
inst.subset_name))
data["members"] = [items[0].id]
new_instance = CreatedInstance(self.family, subset_name, data, self)
if "farm" in pre_create_data:
use_farm = pre_create_data["farm"]
new_instance.creator_attributes["farm"] = use_farm
api.get_stub().imprint(new_instance.id,
new_instance.data_to_store())
self._add_instance_to_context(new_instance)
def get_default_variants(self):
return self._default_variants
def get_instance_attr_defs(self):
return [BoolDef("farm", label="Render on farm")]
def get_pre_create_attr_defs(self):
output = [
BoolDef("use_selection", default=True, label="Use selection"),
UISeparatorDef(),
BoolDef("farm", label="Render on farm")
]
return output
item = items.pop()
if self.name.lower() in existing_subsets:
txt = "Instance with name \"{}\" already exists.".format(self.name)
raise CreatorError(txt)
def get_detail_description(self):
return """Creator for Render instances"""
self.data["members"] = [item.id]
self.data["uuid"] = item.id # for SubsetManager
self.data["subset"] = (
self.data["subset"]
.replace(stub.PUBLISH_ICON, '')
.replace(stub.LOADED_ICON, '')
)
def _handle_legacy(self, instance_data):
"""Converts old instances to new format."""
if not instance_data.get("members"):
instance_data["members"] = [instance_data.get("uuid")]
stub.imprint(item, self.data)
stub.set_label_color(item.id, 14) # Cyan options 0 - 16
stub.rename_item(item.id, stub.PUBLISH_ICON + self.data["subset"])
if instance_data.get("uuid"):
# uuid not needed, replaced with unique instance_id
api.get_stub().remove_instance(instance_data.get("uuid"))
instance_data.pop("uuid")
if not instance_data.get("task"):
instance_data["task"] = legacy_io.Session.get("AVALON_TASK")
if not instance_data.get("creator_attributes"):
is_old_farm = instance_data["family"] != "renderLocal"
instance_data["creator_attributes"] = {"farm": is_old_farm}
instance_data["family"] = self.family
return instance_data

View file

@ -0,0 +1,80 @@
import openpype.hosts.aftereffects.api as api
from openpype.pipeline import (
AutoCreator,
CreatedInstance,
legacy_io,
)
class AEWorkfileCreator(AutoCreator):
identifier = "workfile"
family = "workfile"
def get_instance_attr_defs(self):
return []
def collect_instances(self):
for instance_data in api.list_instances():
creator_id = instance_data.get("creator_identifier")
if creator_id == self.identifier:
subset_name = instance_data["subset"]
instance = CreatedInstance(
self.family, subset_name, instance_data, self
)
self._add_instance_to_context(instance)
def update_instances(self, update_list):
# nothing to change on workfiles
pass
def create(self, options=None):
existing_instance = None
for instance in self.create_context.instances:
if instance.family == self.family:
existing_instance = instance
break
variant = ''
project_name = legacy_io.Session["AVALON_PROJECT"]
asset_name = legacy_io.Session["AVALON_ASSET"]
task_name = legacy_io.Session["AVALON_TASK"]
host_name = legacy_io.Session["AVALON_APP"]
if existing_instance is None:
asset_doc = legacy_io.find_one({
"type": "asset",
"name": asset_name
})
subset_name = self.get_subset_name(
variant, task_name, asset_doc, project_name, host_name
)
data = {
"asset": asset_name,
"task": task_name,
"variant": variant
}
data.update(self.get_dynamic_data(
variant, task_name, asset_doc, project_name, host_name
))
new_instance = CreatedInstance(
self.family, subset_name, data, self
)
self._add_instance_to_context(new_instance)
api.get_stub().imprint(new_instance.get("instance_id"),
new_instance.data_to_store())
elif (
existing_instance["asset"] != asset_name
or existing_instance["task"] != task_name
):
asset_doc = legacy_io.find_one({
"type": "asset",
"name": asset_name
})
subset_name = self.get_subset_name(
variant, task_name, asset_doc, project_name, host_name
)
existing_instance["asset"] = asset_name
existing_instance["task"] = task_name

View file

@ -90,7 +90,7 @@ class BackgroundLoader(AfterEffectsLoader):
container["namespace"] = comp_name
container["members"] = comp.members
stub.imprint(comp, container)
stub.imprint(comp.id, container)
def remove(self, container):
"""
@ -99,10 +99,9 @@ class BackgroundLoader(AfterEffectsLoader):
Args:
container (dict): container to be removed - used to get layer_id
"""
print("!!!! container:: {}".format(container))
stub = self.get_stub()
layer = container.pop("layer")
stub.imprint(layer, {})
stub.imprint(layer.id, {})
stub.delete_item(layer.id)
def switch(self, container, representation):

View file

@ -96,9 +96,9 @@ class FileLoader(AfterEffectsLoader):
# with aftereffects.maintained_selection(): # TODO
stub.replace_item(layer.id, path, stub.LOADED_ICON + layer_name)
stub.imprint(
layer, {"representation": str(representation["_id"]),
"name": context["subset"],
"namespace": layer_name}
layer.id, {"representation": str(representation["_id"]),
"name": context["subset"],
"namespace": layer_name}
)
def remove(self, container):
@ -109,7 +109,7 @@ class FileLoader(AfterEffectsLoader):
"""
stub = self.get_stub()
layer = container.pop("layer")
stub.imprint(layer, {})
stub.imprint(layer.id, {})
stub.delete_item(layer.id)
def switch(self, container, representation):

View file

@ -17,12 +17,11 @@ class CollectAudio(pyblish.api.ContextPlugin):
def process(self, context):
for instance in context:
if instance.data["family"] == 'render.farm':
if 'render.farm' in instance.data.get("families", []):
comp_id = instance.data["comp_id"]
if not comp_id:
self.log.debug("No comp_id filled in instance")
# @iLLiCiTiT QUESTION Should return or continue?
return
continue
context.data["audioFile"] = os.path.normpath(
get_stub().get_audio_url(comp_id)
).replace("\\", "/")

View file

@ -21,135 +21,129 @@ class AERenderInstance(RenderInstance):
projectEntity = attr.ib(default=None)
stagingDir = attr.ib(default=None)
app_version = attr.ib(default=None)
publish_attributes = attr.ib(default=None)
file_name = attr.ib(default=None)
class CollectAERender(abstract_collect_render.AbstractCollectRender):
order = pyblish.api.CollectorOrder + 0.400
order = pyblish.api.CollectorOrder + 0.405
label = "Collect After Effects Render Layers"
hosts = ["aftereffects"]
# internal
family_remapping = {
"render": ("render.farm", "farm"), # (family, label)
"renderLocal": ("render", "local")
}
padding_width = 6
rendered_extension = 'png'
stub = get_stub()
_stub = None
@classmethod
def get_stub(cls):
if not cls._stub:
cls._stub = get_stub()
return cls._stub
def get_instances(self, context):
instances = []
instances_to_remove = []
app_version = self.stub.get_app_version()
app_version = CollectAERender.get_stub().get_app_version()
app_version = app_version[0:4]
current_file = context.data["currentFile"]
version = context.data["version"]
asset_entity = context.data["assetEntity"]
project_entity = context.data["projectEntity"]
compositions = self.stub.get_items(True)
compositions = CollectAERender.get_stub().get_items(True)
compositions_by_id = {item.id: item for item in compositions}
for inst in self.stub.get_metadata():
schema = inst.get('schema')
# loaded asset container skip it
if schema and 'container' in schema:
for inst in context:
if not inst.data.get("active", True):
continue
if not inst["members"]:
raise ValueError("Couldn't find id, unable to publish. " +
"Please recreate instance.")
item_id = inst["members"][0]
family = inst.data["family"]
if family not in ["render", "renderLocal"]: # legacy
continue
work_area_info = self.stub.get_work_area(int(item_id))
item_id = inst.data["members"][0]
work_area_info = CollectAERender.get_stub().get_work_area(
int(item_id))
if not work_area_info:
self.log.warning("Orphaned instance, deleting metadata")
self.stub.remove_instance(int(item_id))
inst_id = inst.get("instance_id") or item_id
CollectAERender.get_stub().remove_instance(inst_id)
continue
frameStart = work_area_info.workAreaStart
frameEnd = round(work_area_info.workAreaStart +
float(work_area_info.workAreaDuration) *
float(work_area_info.frameRate)) - 1
frame_start = work_area_info.workAreaStart
frame_end = round(work_area_info.workAreaStart +
float(work_area_info.workAreaDuration) *
float(work_area_info.frameRate)) - 1
fps = work_area_info.frameRate
# TODO add resolution when supported by extension
if inst["family"] in self.family_remapping.keys() \
and inst["active"]:
remapped_family = self.family_remapping[inst["family"]]
instance = AERenderInstance(
family=remapped_family[0],
families=[remapped_family[0]],
version=version,
time="",
source=current_file,
label="{} - {}".format(inst["subset"], remapped_family[1]),
subset=inst["subset"],
asset=context.data["assetEntity"]["name"],
attachTo=False,
setMembers='',
publish=True,
renderer='aerender',
name=inst["subset"],
resolutionWidth=asset_entity["data"].get(
"resolutionWidth",
project_entity["data"]["resolutionWidth"]),
resolutionHeight=asset_entity["data"].get(
"resolutionHeight",
project_entity["data"]["resolutionHeight"]),
pixelAspect=1,
tileRendering=False,
tilesX=0,
tilesY=0,
frameStart=frameStart,
frameEnd=frameEnd,
frameStep=1,
toBeRenderedOn='deadline',
fps=fps,
app_version=app_version
)
task_name = inst.data.get("task") # legacy
comp = compositions_by_id.get(int(item_id))
if not comp:
raise ValueError("There is no composition for item {}".
format(item_id))
instance.comp_name = comp.name
instance.comp_id = item_id
instance._anatomy = context.data["anatomy"]
instance.anatomyData = context.data["anatomyData"]
render_q = CollectAERender.get_stub().get_render_info()
if not render_q:
raise ValueError("No file extension set in Render Queue")
instance.outputDir = self._get_output_dir(instance)
instance.context = context
subset_name = inst.data["subset"]
instance = AERenderInstance(
family=family,
families=inst.data.get("families", []),
version=version,
time="",
source=current_file,
label="{} - {}".format(subset_name, family),
subset=subset_name,
asset=inst.data["asset"],
task=task_name,
attachTo=False,
setMembers='',
publish=True,
renderer='aerender',
name=subset_name,
resolutionWidth=render_q.width,
resolutionHeight=render_q.height,
pixelAspect=1,
tileRendering=False,
tilesX=0,
tilesY=0,
frameStart=frame_start,
frameEnd=frame_end,
frameStep=1,
toBeRenderedOn='deadline',
fps=fps,
app_version=app_version,
publish_attributes=inst.data.get("publish_attributes"),
file_name=render_q.file_name
)
settings = get_project_settings(os.getenv("AVALON_PROJECT"))
reviewable_subset_filter = \
(settings["deadline"]
["publish"]
["ProcessSubmittedJobOnFarm"]
["aov_filter"])
comp = compositions_by_id.get(int(item_id))
if not comp:
raise ValueError("There is no composition for item {}".
format(item_id))
instance.outputDir = self._get_output_dir(instance)
instance.comp_name = comp.name
instance.comp_id = item_id
if inst["family"] == "renderLocal":
# for local renders
instance.anatomyData["version"] = instance.version
instance.anatomyData["subset"] = instance.subset
instance.stagingDir = tempfile.mkdtemp()
instance.projectEntity = project_entity
is_local = "renderLocal" in inst.data["family"] # legacy
if inst.data.get("creator_attributes"):
is_local = not inst.data["creator_attributes"].get("farm")
if is_local:
# for local renders
instance = self._update_for_local(instance, project_entity)
else:
fam = "render.farm"
if fam not in instance.families:
instance.families.append(fam)
if self.hosts[0] in reviewable_subset_filter.keys():
for aov_pattern in \
reviewable_subset_filter[self.hosts[0]]:
if re.match(aov_pattern, instance.subset):
instance.families.append("review")
instance.review = True
break
self.log.info("New instance:: {}".format(instance))
instances.append(instance)
instances.append(instance)
instances_to_remove.append(inst)
for instance in instances_to_remove:
context.remove(instance)
return instances
def get_expected_files(self, render_instance):
@ -168,15 +162,11 @@ class CollectAERender(abstract_collect_render.AbstractCollectRender):
start = render_instance.frameStart
end = render_instance.frameEnd
# pull file name from Render Queue Output module
render_q = self.stub.get_render_info()
if not render_q:
raise ValueError("No file extension set in Render Queue")
_, ext = os.path.splitext(os.path.basename(render_q.file_name))
_, ext = os.path.splitext(os.path.basename(render_instance.file_name))
base_dir = self._get_output_dir(render_instance)
expected_files = []
if "#" not in render_q.file_name: # single frame (mov)W
if "#" not in render_instance.file_name: # single frame (mov)W
path = os.path.join(base_dir, "{}_{}_{}.{}".format(
render_instance.asset,
render_instance.subset,
@ -216,3 +206,24 @@ class CollectAERender(abstract_collect_render.AbstractCollectRender):
# for submit_publish_job
return base_dir
def _update_for_local(self, instance, project_entity):
"""Update old saved instances to current publishing format"""
instance.stagingDir = tempfile.mkdtemp()
instance.projectEntity = project_entity
fam = "render.local"
if fam not in instance.families:
instance.families.append(fam)
settings = get_project_settings(os.getenv("AVALON_PROJECT"))
reviewable_subset_filter = (settings["deadline"]
["publish"]
["ProcessSubmittedJobOnFarm"]
["aov_filter"].get(self.hosts[0]))
for aov_pattern in reviewable_subset_filter:
if re.match(aov_pattern, instance.subset):
instance.families.append("review")
instance.review = True
break
return instance

View file

@ -1,7 +1,8 @@
import os
from avalon import api
import pyblish.api
from openpype.lib import get_subset_name_with_asset_doc
from openpype.pipeline import legacy_io
class CollectWorkfile(pyblish.api.ContextPlugin):
@ -11,16 +12,45 @@ class CollectWorkfile(pyblish.api.ContextPlugin):
order = pyblish.api.CollectorOrder + 0.1
def process(self, context):
task = api.Session["AVALON_TASK"]
existing_instance = None
for instance in context:
if instance.data["family"] == "workfile":
self.log.debug("Workfile instance found, won't create new")
existing_instance = instance
break
current_file = context.data["currentFile"]
staging_dir = os.path.dirname(current_file)
scene_file = os.path.basename(current_file)
if existing_instance is None: # old publish
instance = self._get_new_instance(context, scene_file)
else:
instance = existing_instance
# creating representation
representation = {
'name': 'aep',
'ext': 'aep',
'files': scene_file,
"stagingDir": staging_dir,
}
if not instance.data.get("representations"):
instance.data["representations"] = []
instance.data["representations"].append(representation)
instance.data["publish"] = instance.data["active"] # for DL
def _get_new_instance(self, context, scene_file):
task = legacy_io.Session["AVALON_TASK"]
version = context.data["version"]
asset_entity = context.data["assetEntity"]
project_entity = context.data["projectEntity"]
shared_instance_data = {
instance_data = {
"active": True,
"asset": asset_entity["name"],
"task": task,
"frameStart": asset_entity["data"]["frameStart"],
"frameEnd": asset_entity["data"]["frameEnd"],
"handleStart": asset_entity["data"]["handleStart"],
@ -59,20 +89,6 @@ class CollectWorkfile(pyblish.api.ContextPlugin):
"representations": list()
})
# adding basic script data
instance.data.update(shared_instance_data)
instance.data.update(instance_data)
# creating representation
representation = {
'name': 'aep',
'ext': 'aep',
'files': scene_file,
"stagingDir": staging_dir,
}
instance.data["representations"].append(representation)
self.log.info('Publishing After Effects workfile')
for i in context:
self.log.debug(f"{i.data['families']}")
return instance

View file

@ -12,7 +12,7 @@ class ExtractLocalRender(openpype.api.Extractor):
order = openpype.api.Extractor.order - 0.47
label = "Extract Local Render"
hosts = ["aftereffects"]
families = ["render"]
families = ["renderLocal", "render.local"]
def process(self, instance):
stub = get_stub()

View file

@ -1,15 +1,16 @@
import pyblish.api
import openpype.api
from openpype.hosts.aftereffects.api import get_stub
class ExtractSaveScene(openpype.api.Extractor):
class ExtractSaveScene(pyblish.api.ContextPlugin):
"""Save scene before extraction."""
order = openpype.api.Extractor.order - 0.48
label = "Extract Save Scene"
hosts = ["aftereffects"]
families = ["workfile"]
def process(self, instance):
def process(self, context):
stub = get_stub()
stub.save()

View file

@ -12,6 +12,8 @@ One of the settings in a scene doesn't match to asset settings in database.
### How to repair?
Change values for {invalid_keys_str} in the scene OR change them in the asset database if they are wrong there.
In the scene it is right mouse click on published composition > `Composition Settings`.
</description>
<detail>
### __Detailed Info__ (optional)

View file

@ -0,0 +1,54 @@
import json
import pyblish.api
from openpype.hosts.aftereffects.api import list_instances
class PreCollectRender(pyblish.api.ContextPlugin):
"""
Checks if render instance is of old type, adds to families to both
existing collectors work same way.
Could be removed in the future when no one uses old publish.
"""
label = "PreCollect Render"
order = pyblish.api.CollectorOrder + 0.400
hosts = ["aftereffects"]
family_remapping = {
"render": ("render.farm", "farm"), # (family, label)
"renderLocal": ("render.local", "local")
}
def process(self, context):
if context.data.get("newPublishing"):
self.log.debug("Not applicable for New Publisher, skip")
return
for inst in list_instances():
if inst.get("creator_attributes"):
raise ValueError("Instance created in New publisher, "
"cannot be published in Pyblish.\n"
"Please publish in New Publisher "
"or recreate instances with legacy Creators")
if inst["family"] not in self.family_remapping.keys():
continue
if not inst["members"]:
raise ValueError("Couldn't find id, unable to publish. " +
"Please recreate instance.")
instance = context.create_instance(inst["subset"])
inst["families"] = [self.family_remapping[inst["family"]][0]]
instance.data.update(inst)
self._debug_log(instance)
def _debug_log(self, instance):
def _default_json(value):
return str(value)
self.log.info(
json.dumps(instance.data, indent=4, default=_default_json)
)

View file

@ -1,7 +1,10 @@
from avalon import api
import pyblish.api
import openpype.api
from openpype.pipeline import PublishXmlValidationError
from openpype.pipeline import (
PublishXmlValidationError,
legacy_io,
)
from openpype.hosts.aftereffects.api import get_stub
@ -27,8 +30,8 @@ class ValidateInstanceAssetRepair(pyblish.api.Action):
for instance in instances:
data = stub.read(instance[0])
data["asset"] = api.Session["AVALON_ASSET"]
stub.imprint(instance[0], data)
data["asset"] = legacy_io.Session["AVALON_ASSET"]
stub.imprint(instance[0].instance_id, data)
class ValidateInstanceAsset(pyblish.api.InstancePlugin):
@ -51,7 +54,7 @@ class ValidateInstanceAsset(pyblish.api.InstancePlugin):
def process(self, instance):
instance_asset = instance.data["asset"]
current_asset = api.Session["AVALON_ASSET"]
current_asset = legacy_io.Session["AVALON_ASSET"]
msg = (
f"Instance asset {instance_asset} is not the same "
f"as current context {current_asset}."

View file

@ -5,11 +5,15 @@ import re
import pyblish.api
from openpype.pipeline import PublishXmlValidationError
from openpype.pipeline import (
PublishXmlValidationError,
OptionalPyblishPluginMixin
)
from openpype.hosts.aftereffects.api import get_asset_settings
class ValidateSceneSettings(pyblish.api.InstancePlugin):
class ValidateSceneSettings(OptionalPyblishPluginMixin,
pyblish.api.InstancePlugin):
"""
Ensures that Composition Settings (right mouse on comp) are same as
in FTrack on task.
@ -59,15 +63,20 @@ class ValidateSceneSettings(pyblish.api.InstancePlugin):
def process(self, instance):
"""Plugin entry point."""
# Skip the instance if is not active by data on the instance
if not self.is_active(instance.data):
return
expected_settings = get_asset_settings()
self.log.info("config from DB::{}".format(expected_settings))
if any(re.search(pattern, os.getenv('AVALON_TASK'))
task_name = instance.data["anatomyData"]["task"]["name"]
if any(re.search(pattern, task_name)
for pattern in self.skip_resolution_check):
expected_settings.pop("resolutionWidth")
expected_settings.pop("resolutionHeight")
if any(re.search(pattern, os.getenv('AVALON_TASK'))
if any(re.search(pattern, task_name)
for pattern in self.skip_timelines_check):
expected_settings.pop('fps', None)
expected_settings.pop('frameStart', None)
@ -87,10 +96,14 @@ class ValidateSceneSettings(pyblish.api.InstancePlugin):
duration = instance.data.get("frameEndHandle") - \
instance.data.get("frameStartHandle") + 1
self.log.debug("filtered config::{}".format(expected_settings))
self.log.debug("validated items::{}".format(expected_settings))
current_settings = {
"fps": fps,
"frameStart": instance.data.get("frameStart"),
"frameEnd": instance.data.get("frameEnd"),
"handleStart": instance.data.get("handleStart"),
"handleEnd": instance.data.get("handleEnd"),
"frameStartHandle": instance.data.get("frameStartHandle"),
"frameEndHandle": instance.data.get("frameEndHandle"),
"resolutionWidth": instance.data.get("resolutionWidth"),
@ -103,24 +116,22 @@ class ValidateSceneSettings(pyblish.api.InstancePlugin):
invalid_keys = set()
for key, value in expected_settings.items():
if value != current_settings[key]:
invalid_settings.append(
"{} expected: {} found: {}".format(key, value,
current_settings[key])
)
msg = "'{}' expected: '{}' found: '{}'".format(
key, value, current_settings[key])
if key == "duration" and expected_settings.get("handleStart"):
msg += "Handles included in calculation. Remove " \
"handles in DB or extend frame range in " \
"Composition Setting."
invalid_settings.append(msg)
invalid_keys.add(key)
if ((expected_settings.get("handleStart")
or expected_settings.get("handleEnd"))
and invalid_settings):
msg = "Handles included in calculation. Remove handles in DB " +\
"or extend frame range in Composition Setting."
invalid_settings[-1]["reason"] = msg
msg = "Found invalid settings:\n{}".format(
"\n".join(invalid_settings)
)
if invalid_settings:
msg = "Found invalid settings:\n{}".format(
"\n".join(invalid_settings)
)
invalid_keys_str = ",".join(invalid_keys)
break_str = "<br/>"
invalid_setting_str = "<b>Found invalid settings:</b><br/>{}".\

View file

@ -15,9 +15,9 @@ from Qt import QtWidgets, QtCore
import bpy
import bpy.utils.previews
import avalon.api
from openpype.tools.utils import host_tools
from openpype import style
from openpype.pipeline import legacy_io
from openpype.tools.utils import host_tools
from .workio import OpenFileCacher
@ -279,7 +279,7 @@ class LaunchLoader(LaunchQtApp):
def before_window_show(self):
self._window.set_context(
{"asset": avalon.api.Session["AVALON_ASSET"]},
{"asset": legacy_io.Session["AVALON_ASSET"]},
refresh=True
)
@ -327,8 +327,8 @@ class LaunchWorkFiles(LaunchQtApp):
def execute(self, context):
result = super().execute(context)
self._window.set_context({
"asset": avalon.api.Session["AVALON_ASSET"],
"task": avalon.api.Session["AVALON_TASK"]
"asset": legacy_io.Session["AVALON_ASSET"],
"task": legacy_io.Session["AVALON_TASK"]
})
return result
@ -358,8 +358,8 @@ class TOPBAR_MT_avalon(bpy.types.Menu):
else:
pyblish_menu_icon_id = 0
asset = avalon.api.Session['AVALON_ASSET']
task = avalon.api.Session['AVALON_TASK']
asset = legacy_io.Session['AVALON_ASSET']
task = legacy_io.Session['AVALON_TASK']
context_label = f"{asset}, {task}"
context_label_item = layout.row()
context_label_item.operator(

View file

@ -1,6 +1,5 @@
import os
import sys
import importlib
import traceback
from typing import Callable, Dict, Iterator, List, Optional
@ -10,16 +9,15 @@ from . import lib
from . import ops
import pyblish.api
import avalon.api
from avalon import io, schema
from openpype.pipeline import (
schema,
legacy_io,
register_loader_plugin_path,
register_creator_plugin_path,
deregister_loader_plugin_path,
deregister_creator_plugin_path,
AVALON_CONTAINER_ID,
uninstall_host,
)
from openpype.api import Logger
from openpype.lib import (
@ -85,8 +83,8 @@ def uninstall():
def set_start_end_frames():
asset_name = io.Session["AVALON_ASSET"]
asset_doc = io.find_one({
asset_name = legacy_io.Session["AVALON_ASSET"]
asset_doc = legacy_io.find_one({
"type": "asset",
"name": asset_name
})
@ -190,7 +188,7 @@ def _on_task_changed():
# `directory` attribute, so it opens in that directory (does it?).
# https://docs.blender.org/api/blender2.8/bpy.types.Operator.html#calling-a-file-selector
# https://docs.blender.org/api/blender2.8/bpy.types.WindowManager.html#bpy.types.WindowManager.fileselect_add
workdir = avalon.api.Session["AVALON_WORKDIR"]
workdir = legacy_io.Session["AVALON_WORKDIR"]
log.debug("New working directory: %s", workdir)
@ -201,26 +199,6 @@ def _register_events():
log.info("Installed event callback for 'taskChanged'...")
def reload_pipeline(*args):
"""Attempt to reload pipeline at run-time.
Warning:
This is primarily for development and debugging purposes and not well
tested.
"""
uninstall_host()
for module in (
"avalon.io",
"avalon.pipeline",
"avalon.api",
):
module = importlib.import_module(module)
importlib.reload(module)
def _discover_gui() -> Optional[Callable]:
"""Return the most desirable of the currently registered GUIs"""

View file

@ -2,7 +2,7 @@
import bpy
from avalon import api
from openpype.pipeline import legacy_io
import openpype.hosts.blender.api.plugin
from openpype.hosts.blender.api import lib
@ -22,7 +22,7 @@ class CreateAction(openpype.hosts.blender.api.plugin.Creator):
name = openpype.hosts.blender.api.plugin.asset_name(asset, subset)
collection = bpy.data.collections.new(name=name)
bpy.context.scene.collection.children.link(collection)
self.data['task'] = api.Session.get('AVALON_TASK')
self.data['task'] = legacy_io.Session.get('AVALON_TASK')
lib.imprint(collection, self.data)
if (self.options or {}).get("useSelection"):

View file

@ -2,7 +2,7 @@
import bpy
from avalon import api
from openpype.pipeline import legacy_io
from openpype.hosts.blender.api import plugin, lib, ops
from openpype.hosts.blender.api.pipeline import AVALON_INSTANCES
@ -37,7 +37,7 @@ class CreateAnimation(plugin.Creator):
# asset_group.empty_display_type = 'SINGLE_ARROW'
asset_group = bpy.data.collections.new(name=name)
instances.children.link(asset_group)
self.data['task'] = api.Session.get('AVALON_TASK')
self.data['task'] = legacy_io.Session.get('AVALON_TASK')
lib.imprint(asset_group, self.data)
if (self.options or {}).get("useSelection"):

View file

@ -2,7 +2,7 @@
import bpy
from avalon import api
from openpype.pipeline import legacy_io
from openpype.hosts.blender.api import plugin, lib, ops
from openpype.hosts.blender.api.pipeline import AVALON_INSTANCES
@ -40,7 +40,7 @@ class CreateCamera(plugin.Creator):
asset_group = bpy.data.objects.new(name=name, object_data=None)
asset_group.empty_display_type = 'SINGLE_ARROW'
instances.objects.link(asset_group)
self.data['task'] = api.Session.get('AVALON_TASK')
self.data['task'] = legacy_io.Session.get('AVALON_TASK')
print(f"self.data: {self.data}")
lib.imprint(asset_group, self.data)

View file

@ -2,7 +2,7 @@
import bpy
from avalon import api
from openpype.pipeline import legacy_io
from openpype.hosts.blender.api import plugin, lib, ops
from openpype.hosts.blender.api.pipeline import AVALON_INSTANCES
@ -34,7 +34,7 @@ class CreateLayout(plugin.Creator):
asset_group = bpy.data.objects.new(name=name, object_data=None)
asset_group.empty_display_type = 'SINGLE_ARROW'
instances.objects.link(asset_group)
self.data['task'] = api.Session.get('AVALON_TASK')
self.data['task'] = legacy_io.Session.get('AVALON_TASK')
lib.imprint(asset_group, self.data)
# Add selected objects to instance

View file

@ -2,7 +2,7 @@
import bpy
from avalon import api
from openpype.pipeline import legacy_io
from openpype.hosts.blender.api import plugin, lib, ops
from openpype.hosts.blender.api.pipeline import AVALON_INSTANCES
@ -34,7 +34,7 @@ class CreateModel(plugin.Creator):
asset_group = bpy.data.objects.new(name=name, object_data=None)
asset_group.empty_display_type = 'SINGLE_ARROW'
instances.objects.link(asset_group)
self.data['task'] = api.Session.get('AVALON_TASK')
self.data['task'] = legacy_io.Session.get('AVALON_TASK')
lib.imprint(asset_group, self.data)
# Add selected objects to instance

View file

@ -2,7 +2,7 @@
import bpy
from avalon import api
from openpype.pipeline import legacy_io
import openpype.hosts.blender.api.plugin
from openpype.hosts.blender.api import lib
@ -22,7 +22,7 @@ class CreatePointcache(openpype.hosts.blender.api.plugin.Creator):
name = openpype.hosts.blender.api.plugin.asset_name(asset, subset)
collection = bpy.data.collections.new(name=name)
bpy.context.scene.collection.children.link(collection)
self.data['task'] = api.Session.get('AVALON_TASK')
self.data['task'] = legacy_io.Session.get('AVALON_TASK')
lib.imprint(collection, self.data)
if (self.options or {}).get("useSelection"):

View file

@ -2,7 +2,7 @@
import bpy
from avalon import api
from openpype.pipeline import legacy_io
from openpype.hosts.blender.api import plugin, lib, ops
from openpype.hosts.blender.api.pipeline import AVALON_INSTANCES
@ -34,7 +34,7 @@ class CreateRig(plugin.Creator):
asset_group = bpy.data.objects.new(name=name, object_data=None)
asset_group.empty_display_type = 'SINGLE_ARROW'
instances.objects.link(asset_group)
self.data['task'] = api.Session.get('AVALON_TASK')
self.data['task'] = legacy_io.Session.get('AVALON_TASK')
lib.imprint(asset_group, self.data)
# Add selected objects to instance

View file

@ -7,7 +7,7 @@ import bpy
import bpy_extras
import bpy_extras.anim_utils
from avalon import io
from openpype.pipeline import legacy_io
from openpype.hosts.blender.api import plugin
from openpype.hosts.blender.api.pipeline import AVALON_PROPERTY
import openpype.api
@ -139,7 +139,7 @@ class ExtractLayout(openpype.api.Extractor):
self.log.debug("Parent: {}".format(parent))
# Get blend reference
blend = io.find_one(
blend = legacy_io.find_one(
{
"type": "representation",
"parent": ObjectId(parent),
@ -150,7 +150,7 @@ class ExtractLayout(openpype.api.Extractor):
if blend:
blend_id = blend["_id"]
# Get fbx reference
fbx = io.find_one(
fbx = legacy_io.find_one(
{
"type": "representation",
"parent": ObjectId(parent),
@ -161,7 +161,7 @@ class ExtractLayout(openpype.api.Extractor):
if fbx:
fbx_id = fbx["_id"]
# Get abc reference
abc = io.find_one(
abc = legacy_io.find_one(
{
"type": "representation",
"parent": ObjectId(parent),

View file

@ -1,6 +1,5 @@
import json
from avalon import io
import pyblish.api

View file

@ -1,10 +1,10 @@
import os
import collections
from pprint import pformat
import pyblish.api
from avalon import io
from pprint import pformat
from openpype.pipeline import legacy_io
class AppendCelactionAudio(pyblish.api.ContextPlugin):
@ -60,7 +60,7 @@ class AppendCelactionAudio(pyblish.api.ContextPlugin):
"""
# Query all subsets for asset
subset_docs = io.find({
subset_docs = legacy_io.find({
"type": "subset",
"parent": asset_doc["_id"]
})
@ -93,7 +93,7 @@ class AppendCelactionAudio(pyblish.api.ContextPlugin):
}}
]
last_versions_by_subset_id = dict()
for doc in io.aggregate(pipeline):
for doc in legacy_io.aggregate(pipeline):
doc["parent"] = doc["_id"]
doc["_id"] = doc.pop("_version_id")
last_versions_by_subset_id[doc["parent"]] = doc
@ -102,7 +102,7 @@ class AppendCelactionAudio(pyblish.api.ContextPlugin):
for version_doc in last_versions_by_subset_id.values():
version_docs_by_id[version_doc["_id"]] = version_doc
repre_docs = io.find({
repre_docs = legacy_io.find({
"type": "representation",
"parent": {"$in": list(version_docs_by_id.keys())},
"name": {"$in": representations}

View file

@ -1,6 +1,6 @@
import os
from avalon import api
import pyblish.api
from openpype.pipeline import legacy_io
class CollectCelactionInstances(pyblish.api.ContextPlugin):
@ -10,7 +10,7 @@ class CollectCelactionInstances(pyblish.api.ContextPlugin):
order = pyblish.api.CollectorOrder + 0.1
def process(self, context):
task = api.Session["AVALON_TASK"]
task = legacy_io.Session["AVALON_TASK"]
current_file = context.data["currentFile"]
staging_dir = os.path.dirname(current_file)
scene_file = os.path.basename(current_file)

View file

@ -873,6 +873,5 @@ class OpenClipSolver(flib.MediaInfoFile):
if feed_clr_obj is not None:
feed_clr_obj = ET.Element(
"colourSpace", {"type": "string"})
feed_clr_obj.text = profile_name
feed_storage_obj.append(feed_clr_obj)
feed_clr_obj.text = profile_name

View file

@ -26,12 +26,10 @@ class CollectTimelineInstances(pyblish.api.ContextPlugin):
add_tasks = []
def process(self, context):
project = context.data["flameProject"]
selected_segments = context.data["flameSelectedSegments"]
self.log.debug("__ selected_segments: {}".format(selected_segments))
self.otio_timeline = context.data["otioTimeline"]
self.clips_in_reels = opfapi.get_clips_in_reels(project)
self.fps = context.data["fps"]
# process all sellected
@ -63,9 +61,6 @@ class CollectTimelineInstances(pyblish.api.ContextPlugin):
# get file path
file_path = clip_data["fpath"]
# get source clip
source_clip = self._get_reel_clip(file_path)
first_frame = opfapi.get_frame_from_filename(file_path) or 0
head, tail = self._get_head_tail(clip_data, first_frame)
@ -103,7 +98,6 @@ class CollectTimelineInstances(pyblish.api.ContextPlugin):
"families": families,
"publish": marker_data["publish"],
"fps": self.fps,
"flameSourceClip": source_clip,
"sourceFirstFrame": int(first_frame),
"path": file_path,
"flameAddTasks": self.add_tasks,
@ -258,14 +252,6 @@ class CollectTimelineInstances(pyblish.api.ContextPlugin):
)
return head, tail
def _get_reel_clip(self, path):
match_reel_clip = [
clip for clip in self.clips_in_reels
if clip["fpath"] == path
]
if match_reel_clip:
return match_reel_clip.pop()
def _get_resolution_to_data(self, data, context):
assert data.get("otioClip"), "Missing `otioClip` data"

View file

@ -1,6 +1,7 @@
import pyblish.api
import avalon.api as avalon
import openpype.lib as oplib
from openpype.pipeline import legacy_io
import openpype.hosts.flame.api as opfapi
from openpype.hosts.flame.otio import flame_export
@ -18,7 +19,7 @@ class CollecTimelineOTIO(pyblish.api.ContextPlugin):
# main
asset_doc = context.data["assetEntity"]
task_name = avalon.Session["AVALON_TASK"]
task_name = legacy_io.Session["AVALON_TASK"]
project = opfapi.get_current_project()
sequence = opfapi.get_current_sequence(opfapi.CTX.selection)

View file

@ -1,4 +1,5 @@
import os
import re
from pprint import pformat
from copy import deepcopy
@ -6,6 +7,8 @@ import pyblish.api
import openpype.api
from openpype.hosts.flame import api as opfapi
import flame
class ExtractSubsetResources(openpype.api.Extractor):
"""
@ -20,27 +23,31 @@ class ExtractSubsetResources(openpype.api.Extractor):
# plugin defaults
default_presets = {
"thumbnail": {
"active": True,
"ext": "jpg",
"xml_preset_file": "Jpeg (8-bit).xml",
"xml_preset_dir": "",
"export_type": "File Sequence",
"ignore_comment_attrs": True,
"parsed_comment_attrs": False,
"colorspace_out": "Output - sRGB",
"representation_add_range": False,
"representation_tags": ["thumbnail"]
"representation_tags": ["thumbnail"],
"path_regex": ".*"
},
"ftrackpreview": {
"active": True,
"ext": "mov",
"xml_preset_file": "Apple iPad (1920x1080).xml",
"xml_preset_dir": "",
"export_type": "Movie",
"ignore_comment_attrs": True,
"parsed_comment_attrs": False,
"colorspace_out": "Output - Rec.709",
"representation_add_range": True,
"representation_tags": [
"review",
"delete"
]
],
"path_regex": ".*"
}
}
keep_original_representation = False
@ -61,13 +68,10 @@ class ExtractSubsetResources(openpype.api.Extractor):
# flame objects
segment = instance.data["item"]
asset_name = instance.data["asset"]
segment_name = segment.name.get_value()
clip_path = instance.data["path"]
sequence_clip = instance.context.data["flameSequence"]
clip_data = instance.data["flameSourceClip"]
reel_clip = None
if clip_data:
reel_clip = clip_data["PyClip"]
# segment's parent track name
s_track_name = segment.parent.name.get_value()
@ -104,14 +108,44 @@ class ExtractSubsetResources(openpype.api.Extractor):
for unique_name, preset_config in export_presets.items():
modify_xml_data = {}
# get activating attributes
activated_preset = preset_config["active"]
filter_path_regex = preset_config.get("filter_path_regex")
self.log.info(
"Preset `{}` is active `{}` with filter `{}`".format(
unique_name, activated_preset, filter_path_regex
)
)
self.log.debug(
"__ clip_path: `{}`".format(clip_path))
# skip if not activated presete
if not activated_preset:
continue
# exclude by regex filter if any
if (
filter_path_regex
and not re.search(filter_path_regex, clip_path)
):
continue
# get all presets attributes
extension = preset_config["ext"]
preset_file = preset_config["xml_preset_file"]
preset_dir = preset_config["xml_preset_dir"]
export_type = preset_config["export_type"]
repre_tags = preset_config["representation_tags"]
ignore_comment_attrs = preset_config["ignore_comment_attrs"]
parsed_comment_attrs = preset_config["parsed_comment_attrs"]
color_out = preset_config["colorspace_out"]
self.log.info(
"Processing `{}` as `{}` to `{}` type...".format(
preset_file, export_type, extension
)
)
# get attribures related loading in integrate_batch_group
load_to_batch_group = preset_config.get(
"load_to_batch_group")
@ -131,161 +165,157 @@ class ExtractSubsetResources(openpype.api.Extractor):
in_mark = (source_start_handles - source_first_frame) + 1
out_mark = in_mark + source_duration_handles
# make test for type of preset and available reel_clip
if (
not reel_clip
and export_type != "Sequence Publish"
):
self.log.warning((
"Skipping preset {}. Not available "
"reel clip for {}").format(
preset_file, segment_name
))
continue
# by default export source clips
exporting_clip = reel_clip
exporting_clip = None
name_patern_xml = "<name>_{}.".format(
unique_name)
if export_type == "Sequence Publish":
# change export clip to sequence
exporting_clip = sequence_clip
exporting_clip = flame.duplicate(sequence_clip)
# change in/out marks to timeline in/out
in_mark = clip_in
out_mark = clip_out
# only keep visible layer where instance segment is child
self.hide_others(
exporting_clip, segment_name, s_track_name)
# add xml tags modifications
modify_xml_data.update({
"exportHandles": True,
"nbHandles": handles,
"startFrame": frame_start
})
# change name patern
name_patern_xml = (
"<segment name>_<shot name>_{}.").format(
unique_name)
else:
exporting_clip = self.import_clip(clip_path)
exporting_clip.name.set_value("{}_{}".format(
asset_name, segment_name))
if not ignore_comment_attrs:
# add any xml overrides collected form segment.comment
modify_xml_data.update(instance.data["xml_overrides"])
# change in/out marks to timeline in/out
in_mark = clip_in
out_mark = clip_out
# add xml tags modifications
modify_xml_data.update({
"exportHandles": True,
"nbHandles": handles,
"startFrame": frame_start,
"namePattern": name_patern_xml
})
if parsed_comment_attrs:
# add any xml overrides collected form segment.comment
modify_xml_data.update(instance.data["xml_overrides"])
self.log.debug("__ modify_xml_data: {}".format(pformat(
modify_xml_data
)))
# with maintained duplication loop all presets
with opfapi.maintained_object_duplication(
exporting_clip) as duplclip:
kwargs = {}
export_kwargs = {}
# validate xml preset file is filled
if preset_file == "":
raise ValueError(
("Check Settings for {} preset: "
"`XML preset file` is not filled").format(
unique_name)
)
if export_type == "Sequence Publish":
# only keep visible layer where instance segment is child
self.hide_others(duplclip, segment_name, s_track_name)
# resolve xml preset dir if not filled
if preset_dir == "":
preset_dir = opfapi.get_preset_path_by_xml_name(
preset_file)
# validate xml preset file is filled
if preset_file == "":
if not preset_dir:
raise ValueError(
("Check Settings for {} preset: "
"`XML preset file` is not filled").format(
unique_name)
"`XML preset file` {} is not found").format(
unique_name, preset_file)
)
# resolve xml preset dir if not filled
if preset_dir == "":
preset_dir = opfapi.get_preset_path_by_xml_name(
preset_file)
# create preset path
preset_orig_xml_path = str(os.path.join(
preset_dir, preset_file
))
if not preset_dir:
raise ValueError(
("Check Settings for {} preset: "
"`XML preset file` {} is not found").format(
unique_name, preset_file)
)
preset_path = opfapi.modify_preset_file(
preset_orig_xml_path, staging_dir, modify_xml_data)
# create preset path
preset_orig_xml_path = str(os.path.join(
preset_dir, preset_file
))
# define kwargs based on preset type
if "thumbnail" in unique_name:
export_kwargs["thumb_frame_number"] = int(in_mark + (
source_duration_handles / 2))
else:
export_kwargs.update({
"in_mark": in_mark,
"out_mark": out_mark
})
preset_path = opfapi.modify_preset_file(
preset_orig_xml_path, staging_dir, modify_xml_data)
# get and make export dir paths
export_dir_path = str(os.path.join(
staging_dir, unique_name
))
os.makedirs(export_dir_path)
# define kwargs based on preset type
if "thumbnail" in unique_name:
kwargs["thumb_frame_number"] = in_mark + (
source_duration_handles / 2)
else:
kwargs.update({
"in_mark": in_mark,
"out_mark": out_mark
})
# export
opfapi.export_clip(
export_dir_path, exporting_clip, preset_path, **export_kwargs)
# get and make export dir paths
export_dir_path = str(os.path.join(
staging_dir, unique_name
))
os.makedirs(export_dir_path)
# create representation data
representation_data = {
"name": unique_name,
"outputName": unique_name,
"ext": extension,
"stagingDir": export_dir_path,
"tags": repre_tags,
"data": {
"colorspace": color_out
},
"load_to_batch_group": load_to_batch_group,
"batch_group_loader_name": batch_group_loader_name
}
# export
opfapi.export_clip(
export_dir_path, duplclip, preset_path, **kwargs)
# collect all available content of export dir
files = os.listdir(export_dir_path)
extension = preset_config["ext"]
# make sure no nested folders inside
n_stage_dir, n_files = self._unfolds_nested_folders(
export_dir_path, files, extension)
# create representation data
representation_data = {
"name": unique_name,
"outputName": unique_name,
"ext": extension,
"stagingDir": export_dir_path,
"tags": repre_tags,
"data": {
"colorspace": color_out
},
"load_to_batch_group": load_to_batch_group,
"batch_group_loader_name": batch_group_loader_name
}
# fix representation in case of nested folders
if n_stage_dir:
representation_data["stagingDir"] = n_stage_dir
files = n_files
# collect all available content of export dir
files = os.listdir(export_dir_path)
# add files to represetation but add
# imagesequence as list
if (
# first check if path in files is not mov extension
[
f for f in files
if os.path.splitext(f)[-1] == ".mov"
]
# then try if thumbnail is not in unique name
or unique_name == "thumbnail"
):
representation_data["files"] = files.pop()
else:
representation_data["files"] = files
# make sure no nested folders inside
n_stage_dir, n_files = self._unfolds_nested_folders(
export_dir_path, files, extension)
# add frame range
if preset_config["representation_add_range"]:
representation_data.update({
"frameStart": frame_start_handle,
"frameEnd": (
frame_start_handle + source_duration_handles),
"fps": instance.data["fps"]
})
# fix representation in case of nested folders
if n_stage_dir:
representation_data["stagingDir"] = n_stage_dir
files = n_files
instance.data["representations"].append(representation_data)
# add files to represetation but add
# imagesequence as list
if (
# first check if path in files is not mov extension
[
f for f in files
if os.path.splitext(f)[-1] == ".mov"
]
# then try if thumbnail is not in unique name
or unique_name == "thumbnail"
):
representation_data["files"] = files.pop()
else:
representation_data["files"] = files
# add review family if found in tags
if "review" in repre_tags:
instance.data["families"].append("review")
# add frame range
if preset_config["representation_add_range"]:
representation_data.update({
"frameStart": frame_start_handle,
"frameEnd": (
frame_start_handle + source_duration_handles),
"fps": instance.data["fps"]
})
self.log.info("Added representation: {}".format(
representation_data))
instance.data["representations"].append(representation_data)
# add review family if found in tags
if "review" in repre_tags:
instance.data["families"].append("review")
self.log.info("Added representation: {}".format(
representation_data))
if export_type == "Sequence Publish":
# at the end remove the duplicated clip
flame.delete(exporting_clip)
self.log.debug("All representations: {}".format(
pformat(instance.data["representations"])))
@ -373,3 +403,18 @@ class ExtractSubsetResources(openpype.api.Extractor):
for segment in track.segments:
if segment.name.get_value() != segment_name:
segment.hidden = True
def import_clip(self, path):
"""
Import clip from path
"""
clips = flame.import_clips(path)
self.log.info("Clips [{}] imported from `{}`".format(clips, path))
if not clips:
self.log.warning("Path `{}` is not having any clips".format(path))
return None
elif len(clips) > 1:
self.log.warning(
"Path `{}` is containing more that one clip".format(path)
)
return clips[0]

View file

@ -1,26 +0,0 @@
import pyblish
@pyblish.api.log
class ValidateSourceClip(pyblish.api.InstancePlugin):
"""Validate instance is not having empty `flameSourceClip`"""
order = pyblish.api.ValidatorOrder
label = "Validate Source Clip"
hosts = ["flame"]
families = ["clip"]
optional = True
active = False
def process(self, instance):
flame_source_clip = instance.data["flameSourceClip"]
self.log.debug("_ flame_source_clip: {}".format(flame_source_clip))
if flame_source_clip is None:
raise AttributeError((
"Timeline segment `{}` is not having "
"relative clip in reels. Please make sure "
"you push `Save Sources` button in Conform Tab").format(
instance.data["asset"]
))

View file

@ -6,8 +6,10 @@ import contextlib
from bson.objectid import ObjectId
from Qt import QtGui
from avalon import io
from openpype.pipeline import switch_container
from openpype.pipeline import (
switch_container,
legacy_io,
)
from .pipeline import get_current_comp, comp_lock_and_undo_chunk
self = sys.modules[__name__]
@ -94,8 +96,10 @@ def switch_item(container,
# so we can use the original name from those.
if any(not x for x in [asset_name, subset_name, representation_name]):
_id = ObjectId(container["representation"])
representation = io.find_one({"type": "representation", "_id": _id})
version, subset, asset, project = io.parenthood(representation)
representation = legacy_io.find_one({
"type": "representation", "_id": _id
})
version, subset, asset, project = legacy_io.parenthood(representation)
if asset_name is None:
asset_name = asset["name"]
@ -107,14 +111,14 @@ def switch_item(container,
representation_name = representation["name"]
# Find the new one
asset = io.find_one({
asset = legacy_io.find_one({
"name": asset_name,
"type": "asset"
})
assert asset, ("Could not find asset in the database with the name "
"'%s'" % asset_name)
subset = io.find_one({
subset = legacy_io.find_one({
"name": subset_name,
"type": "subset",
"parent": asset["_id"]
@ -122,7 +126,7 @@ def switch_item(container,
assert subset, ("Could not find subset in the database with the name "
"'%s'" % subset_name)
version = io.find_one(
version = legacy_io.find_one(
{
"type": "version",
"parent": subset["_id"]
@ -134,7 +138,7 @@ def switch_item(container,
asset_name, subset_name
)
representation = io.find_one({
representation = legacy_io.find_one({
"name": representation_name,
"type": "representation",
"parent": version["_id"]}

View file

@ -1,10 +1,9 @@
import os
import contextlib
from avalon import io
from openpype.pipeline import (
load,
legacy_io,
get_representation_path,
)
from openpype.hosts.fusion.api import (
@ -212,8 +211,10 @@ class FusionLoadSequence(load.LoaderPlugin):
path = self._get_first_image(root)
# Get start frame from version data
version = io.find_one({"type": "version",
"_id": representation["parent"]})
version = legacy_io.find_one({
"type": "version",
"_id": representation["parent"]
})
start = version["data"].get("frameStart")
if start is None:
self.log.warning("Missing start frame for updated version"

View file

@ -4,10 +4,10 @@ import getpass
import requests
from avalon import api
import pyblish.api
from openpype.pipeline import legacy_io
class FusionSubmitDeadline(pyblish.api.InstancePlugin):
"""Submit current Comp to Deadline
@ -133,7 +133,7 @@ class FusionSubmitDeadline(pyblish.api.InstancePlugin):
"FUSION9_MasterPrefs"
]
environment = dict({key: os.environ[key] for key in keys
if key in os.environ}, **api.Session)
if key in os.environ}, **legacy_io.Session)
payload["JobInfo"].update({
"EnvironmentKeyValue%d" % index: "{key}={value}".format(
@ -146,7 +146,7 @@ class FusionSubmitDeadline(pyblish.api.InstancePlugin):
self.log.info(json.dumps(payload, indent=4, sort_keys=True))
# E.g. http://192.168.0.1:8082/api/jobs
url = "{}/api/jobs".format(DEADLINE_REST_URL)
url = "{}/api/jobs".format(deadline_url)
response = requests.post(url, json=payload)
if not response.ok:
raise Exception(response.text)

View file

@ -4,10 +4,8 @@ import sys
import logging
# Pipeline imports
import avalon.api
from avalon import io
from openpype.pipeline import (
legacy_io,
install_host,
registered_host,
)
@ -167,7 +165,7 @@ def update_frame_range(comp, representations):
"""
version_ids = [r["parent"] for r in representations]
versions = io.find({"type": "version", "_id": {"$in": version_ids}})
versions = legacy_io.find({"type": "version", "_id": {"$in": version_ids}})
versions = list(versions)
versions = [v for v in versions
@ -205,12 +203,11 @@ def switch(asset_name, filepath=None, new=True):
# Assert asset name exists
# It is better to do this here then to wait till switch_shot does it
asset = io.find_one({"type": "asset", "name": asset_name})
asset = legacy_io.find_one({"type": "asset", "name": asset_name})
assert asset, "Could not find '%s' in the database" % asset_name
# Get current project
self._project = io.find_one({"type": "project",
"name": avalon.api.Session["AVALON_PROJECT"]})
self._project = legacy_io.find_one({"type": "project"})
# Go to comp
if not filepath:
@ -241,7 +238,7 @@ def switch(asset_name, filepath=None, new=True):
current_comp.Print(message)
# Build the session to switch to
switch_to_session = avalon.api.Session.copy()
switch_to_session = legacy_io.Session.copy()
switch_to_session["AVALON_ASSET"] = asset['name']
if new:

View file

@ -5,11 +5,13 @@ import logging
from Qt import QtWidgets, QtCore
from avalon import io
import qtawesome as qta
from openpype import style
from openpype.pipeline import install_host
from openpype.pipeline import (
install_host,
legacy_io,
)
from openpype.hosts.fusion import api
from openpype.lib.avalon_context import get_workdir_from_session
@ -164,7 +166,7 @@ class App(QtWidgets.QWidget):
return items
def collect_assets(self):
return list(io.find({"type": "asset"}, {"name": True}))
return list(legacy_io.find({"type": "asset"}, {"name": True}))
def populate_comp_box(self, files):
"""Ensure we display the filename only but the path is stored as well

View file

@ -419,7 +419,6 @@ class ExtractImage(pyblish.api.InstancePlugin):
```python
import os
from avalon import api, io
import openpype.hosts.harmony.api as harmony
signature = str(uuid4()).replace("-", "_")
@ -611,7 +610,7 @@ class ImageSequenceLoader(load.LoaderPlugin):
def update(self, container, representation):
node = container.pop("node")
version = io.find_one({"_id": representation["parent"]})
version = legacy_io.find_one({"_id": representation["parent"]})
files = []
for f in version["data"]["files"]:
files.append(

View file

@ -5,11 +5,10 @@ import logging
from bson.objectid import ObjectId
import pyblish.api
from avalon import io
from openpype import lib
from openpype.lib import register_event_callback
from openpype.pipeline import (
legacy_io,
register_loader_plugin_path,
register_creator_plugin_path,
deregister_loader_plugin_path,
@ -111,7 +110,7 @@ def check_inventory():
outdated_containers = []
for container in ls():
representation = container['representation']
representation_doc = io.find_one(
representation_doc = legacy_io.find_one(
{
"_id": ObjectId(representation),
"type": "representation"

View file

@ -3,13 +3,13 @@
from pathlib import Path
import attr
from avalon import api
from openpype.lib import get_formatted_current_time
import openpype.lib.abstract_collect_render
import openpype.hosts.harmony.api as harmony
from openpype.lib.abstract_collect_render import RenderInstance
import openpype.lib
import openpype.lib.abstract_collect_render
from openpype.lib.abstract_collect_render import RenderInstance
from openpype.lib import get_formatted_current_time
from openpype.pipeline import legacy_io
import openpype.hosts.harmony.api as harmony
@attr.s
@ -143,7 +143,7 @@ class CollectFarmRender(openpype.lib.abstract_collect_render.
source=context.data["currentFile"],
label=node.split("/")[1],
subset=subset_name,
asset=api.Session["AVALON_ASSET"],
asset=legacy_io.Session["AVALON_ASSET"],
attachTo=False,
setMembers=[node],
publish=info[4],

View file

@ -12,8 +12,7 @@ import hiero
from Qt import QtWidgets
from bson.objectid import ObjectId
import avalon.api as avalon
import avalon.io
from openpype.pipeline import legacy_io
from openpype.api import (Logger, Anatomy, get_anatomy_settings)
from . import tags
@ -38,8 +37,6 @@ self.pype_tag_name = "openpypeData"
self.default_sequence_name = "openpypeSequence"
self.default_bin_name = "openpypeBin"
AVALON_CONFIG = os.getenv("AVALON_CONFIG", "pype")
def flatten(_list):
for item in _list:
@ -49,6 +46,7 @@ def flatten(_list):
else:
yield item
def get_current_project(remove_untitled=False):
projects = flatten(hiero.core.projects())
if not remove_untitled:
@ -384,7 +382,7 @@ def get_publish_attribute(tag):
def sync_avalon_data_to_workfile():
# import session to get project dir
project_name = avalon.Session["AVALON_PROJECT"]
project_name = legacy_io.Session["AVALON_PROJECT"]
anatomy = Anatomy(project_name)
work_template = anatomy.templates["work"]["path"]
@ -409,7 +407,7 @@ def sync_avalon_data_to_workfile():
project.setProjectRoot(active_project_root)
# get project data from avalon db
project_doc = avalon.io.find_one({"type": "project"})
project_doc = legacy_io.find_one({"type": "project"})
project_data = project_doc["data"]
log.debug("project_data: {}".format(project_data))
@ -995,7 +993,6 @@ def check_inventory_versions():
it to red.
"""
from . import parse_container
from avalon import io
# presets
clip_color_last = "green"
@ -1007,19 +1004,19 @@ def check_inventory_versions():
if container:
# get representation from io
representation = io.find_one({
representation = legacy_io.find_one({
"type": "representation",
"_id": ObjectId(container["representation"])
})
# Get start frame from version data
version = io.find_one({
version = legacy_io.find_one({
"type": "version",
"_id": representation["parent"]
})
# get all versions in list
versions = io.find({
versions = legacy_io.find({
"type": "version",
"parent": version["parent"]
}).distinct('name')

View file

@ -1,14 +1,16 @@
import os
import sys
import hiero.core
from openpype.api import Logger
from openpype.tools.utils import host_tools
from avalon.api import Session
from hiero.ui import findMenuAction
from openpype.api import Logger
from openpype.pipeline import legacy_io
from openpype.tools.utils import host_tools
from . import tags
log = Logger().get_logger(__name__)
log = Logger.get_logger(__name__)
self = sys.modules[__name__]
self._change_context_menu = None
@ -24,8 +26,10 @@ def update_menu_task_label():
log.warning("Can't find menuItem: {}".format(object_name))
return
label = "{}, {}".format(Session["AVALON_ASSET"],
Session["AVALON_TASK"])
label = "{}, {}".format(
legacy_io.Session["AVALON_ASSET"],
legacy_io.Session["AVALON_TASK"]
)
menu = found_menu.menu()
self._change_context_menu = label
@ -51,7 +55,8 @@ def menu_install():
menu_name = os.environ['AVALON_LABEL']
context_label = "{0}, {1}".format(
Session["AVALON_ASSET"], Session["AVALON_TASK"]
legacy_io.Session["AVALON_ASSET"],
legacy_io.Session["AVALON_TASK"]
)
self._change_context_menu = context_label

View file

@ -5,10 +5,10 @@ import os
import contextlib
from collections import OrderedDict
from avalon import schema
from pyblish import api as pyblish
from openpype.api import Logger
from openpype.pipeline import (
schema,
register_creator_plugin_path,
register_loader_plugin_path,
deregister_creator_plugin_path,
@ -20,8 +20,6 @@ from . import lib, menu, events
log = Logger().get_logger(__name__)
AVALON_CONFIG = os.getenv("AVALON_CONFIG", "pype")
# plugin paths
API_DIR = os.path.dirname(os.path.abspath(__file__))
HOST_DIR = os.path.dirname(API_DIR)
@ -247,15 +245,10 @@ def reload_config():
import importlib
for module in (
"avalon",
"avalon.lib",
"avalon.pipeline",
"pyblish",
"pypeapp",
"{}.api".format(AVALON_CONFIG),
"{}.hosts.hiero.lib".format(AVALON_CONFIG),
"{}.hosts.hiero.menu".format(AVALON_CONFIG),
"{}.hosts.hiero.tags".format(AVALON_CONFIG)
"openpype.api",
"openpype.hosts.hiero.lib",
"openpype.hosts.hiero.menu",
"openpype.hosts.hiero.tags"
):
log.info("Reloading module: {}...".format(module))
try:

View file

@ -3,23 +3,13 @@ import os
import hiero
from openpype.api import Logger
from avalon import io
from openpype.pipeline import legacy_io
log = Logger().get_logger(__name__)
log = Logger.get_logger(__name__)
def tag_data():
return {
# "Retiming": {
# "editable": "1",
# "note": "Clip has retime or TimeWarp effects (or multiple effects stacked on the clip)", # noqa
# "icon": "retiming.png",
# "metadata": {
# "family": "retiming",
# "marginIn": 1,
# "marginOut": 1
# }
# },
"[Lenses]": {
"Set lense here": {
"editable": "1",
@ -48,6 +38,16 @@ def tag_data():
"family": "comment",
"subset": "main"
}
},
"FrameMain": {
"editable": "1",
"note": "Publishing a frame subset.",
"icon": "z_layer_main.png",
"metadata": {
"family": "frame",
"subset": "main",
"format": "png"
}
}
}
@ -141,7 +141,7 @@ def add_tags_to_workfile():
nks_pres_tags = tag_data()
# Get project task types.
tasks = io.find_one({"type": "project"})["config"]["tasks"]
tasks = legacy_io.find_one({"type": "project"})["config"]["tasks"]
nks_pres_tags["[Tasks]"] = {}
log.debug("__ tasks: {}".format(tasks))
for task_type in tasks.keys():
@ -159,7 +159,7 @@ def add_tags_to_workfile():
# asset builds and shots.
if int(os.getenv("TAG_ASSETBUILD_STARTUP", 0)) == 1:
nks_pres_tags["[AssetBuilds]"] = {}
for asset in io.find({"type": "asset"}):
for asset in legacy_io.find({"type": "asset"}):
if asset["data"]["entityType"] == "AssetBuild":
nks_pres_tags["[AssetBuilds]"][asset["name"]] = {
"editable": "1",

View file

@ -1,5 +1,7 @@
from avalon import io
from openpype.pipeline import get_representation_path
from openpype.pipeline import (
legacy_io,
get_representation_path,
)
import openpype.hosts.hiero.api as phiero
# from openpype.hosts.hiero.api import plugin, lib
# reload(lib)
@ -105,7 +107,7 @@ class LoadClip(phiero.SequenceLoader):
namespace = container['namespace']
track_item = phiero.get_track_items(
track_item_name=namespace)
version = io.find_one({
version = legacy_io.find_one({
"type": "version",
"_id": representation["parent"]
})
@ -174,7 +176,7 @@ class LoadClip(phiero.SequenceLoader):
# define version name
version_name = version.get("name", None)
# get all versions in list
versions = io.find({
versions = legacy_io.find({
"type": "version",
"parent": version["parent"]
}).distinct('name')

View file

@ -0,0 +1,142 @@
from pprint import pformat
import re
import ast
import json
import pyblish.api
class CollectFrameTagInstances(pyblish.api.ContextPlugin):
"""Collect frames from tags.
Tag is expected to have metadata:
{
"family": "frame"
"subset": "main"
}
"""
order = pyblish.api.CollectorOrder
label = "Collect Frames"
hosts = ["hiero"]
def process(self, context):
self._context = context
# collect all sequence tags
subset_data = self._create_frame_subset_data_sequence(context)
self.log.debug("__ subset_data: {}".format(
pformat(subset_data)
))
# create instances
self._create_instances(subset_data)
def _get_tag_data(self, tag):
data = {}
# get tag metadata attribute
tag_data = tag.metadata()
# convert tag metadata to normal keys names and values to correct types
for k, v in dict(tag_data).items():
key = k.replace("tag.", "")
try:
# capture exceptions which are related to strings only
if re.match(r"^[\d]+$", v):
value = int(v)
elif re.match(r"^True$", v):
value = True
elif re.match(r"^False$", v):
value = False
elif re.match(r"^None$", v):
value = None
elif re.match(r"^[\w\d_]+$", v):
value = v
else:
value = ast.literal_eval(v)
except (ValueError, SyntaxError):
value = v
data[key] = value
return data
def _create_frame_subset_data_sequence(self, context):
sequence_tags = []
sequence = context.data["activeTimeline"]
# get all publishable sequence frames
publish_frames = range(int(sequence.duration() + 1))
self.log.debug("__ publish_frames: {}".format(
pformat(publish_frames)
))
# get all sequence tags
for tag in sequence.tags():
tag_data = self._get_tag_data(tag)
self.log.debug("__ tag_data: {}".format(
pformat(tag_data)
))
if not tag_data:
continue
if "family" not in tag_data:
continue
if tag_data["family"] != "frame":
continue
sequence_tags.append(tag_data)
self.log.debug("__ sequence_tags: {}".format(
pformat(sequence_tags)
))
# first collect all available subset tag frames
subset_data = {}
for tag_data in sequence_tags:
frame = int(tag_data["start"])
if frame not in publish_frames:
continue
subset = tag_data["subset"]
if subset in subset_data:
# update existing subset key
subset_data[subset]["frames"].append(frame)
else:
# create new subset key
subset_data[subset] = {
"frames": [frame],
"format": tag_data["format"],
"asset": context.data["assetEntity"]["name"]
}
return subset_data
def _create_instances(self, subset_data):
# create instance per subset
for subset_name, subset_data in subset_data.items():
name = "frame" + subset_name.title()
data = {
"name": name,
"label": "{} {}".format(name, subset_data["frames"]),
"family": "image",
"families": ["frame"],
"asset": subset_data["asset"],
"subset": name,
"format": subset_data["format"],
"frames": subset_data["frames"]
}
self._context.create_instance(**data)
self.log.info(
"Created instance: {}".format(
json.dumps(data, sort_keys=True, indent=4)
)
)

View file

@ -0,0 +1,82 @@
import os
import pyblish.api
import openpype
class ExtractFrames(openpype.api.Extractor):
"""Extracts frames"""
order = pyblish.api.ExtractorOrder
label = "Extract Frames"
hosts = ["hiero"]
families = ["frame"]
movie_extensions = ["mov", "mp4"]
def process(self, instance):
oiio_tool_path = openpype.lib.get_oiio_tools_path()
staging_dir = self.staging_dir(instance)
output_template = os.path.join(staging_dir, instance.data["name"])
sequence = instance.context.data["activeTimeline"]
files = []
for frame in instance.data["frames"]:
track_item = sequence.trackItemAt(frame)
media_source = track_item.source().mediaSource()
input_path = media_source.fileinfos()[0].filename()
input_frame = (
track_item.mapTimelineToSource(frame) +
track_item.source().mediaSource().startTime()
)
output_ext = instance.data["format"]
output_path = output_template
output_path += ".{:04d}.{}".format(int(frame), output_ext)
args = [oiio_tool_path]
ext = os.path.splitext(input_path)[1][1:]
if ext in self.movie_extensions:
args.extend(["--subimage", str(int(input_frame))])
else:
args.extend(["--frames", str(int(input_frame))])
if ext == "exr":
args.extend(["--powc", "0.45,0.45,0.45,1.0"])
args.extend([input_path, "-o", output_path])
output = openpype.api.run_subprocess(args)
failed_output = "oiiotool produced no output."
if failed_output in output:
raise ValueError(
"oiiotool processing failed. Args: {}".format(args)
)
files.append(output_path)
# Feedback to user because "oiiotool" can make the publishing
# appear unresponsive.
self.log.info(
"Processed {} of {} frames".format(
instance.data["frames"].index(frame) + 1,
len(instance.data["frames"])
)
)
if len(files) == 1:
instance.data["representations"] = [
{
"name": output_ext,
"ext": output_ext,
"files": os.path.basename(files[0]),
"stagingDir": staging_dir
}
]
else:
instance.data["representations"] = [
{
"name": output_ext,
"ext": output_ext,
"files": [os.path.basename(x) for x in files],
"stagingDir": staging_dir
}
]

View file

@ -1,12 +1,15 @@
import os
import pyblish.api
import hiero.ui
from openpype.hosts.hiero import api as phiero
from avalon import api as avalon
from pprint import pformat
from openpype.hosts.hiero.api.otio import hiero_export
from Qt.QtGui import QPixmap
import tempfile
from pprint import pformat
import pyblish.api
from Qt.QtGui import QPixmap
import hiero.ui
from openpype.pipeline import legacy_io
from openpype.hosts.hiero import api as phiero
from openpype.hosts.hiero.api.otio import hiero_export
class PrecollectWorkfile(pyblish.api.ContextPlugin):
@ -17,7 +20,7 @@ class PrecollectWorkfile(pyblish.api.ContextPlugin):
def process(self, context):
asset = avalon.Session["AVALON_ASSET"]
asset = legacy_io.Session["AVALON_ASSET"]
subset = "workfile"
project = phiero.get_current_project()
active_timeline = hiero.ui.activeSequence()
@ -65,6 +68,7 @@ class PrecollectWorkfile(pyblish.api.ContextPlugin):
"subset": "{}{}".format(asset, subset.capitalize()),
"item": project,
"family": "workfile",
"families": [],
"representations": [workfile_representation, thumb_representation]
}
@ -74,6 +78,7 @@ class PrecollectWorkfile(pyblish.api.ContextPlugin):
# update context with main project attributes
context_data = {
"activeProject": project,
"activeTimeline": active_timeline,
"otioTimeline": otio_timeline,
"currentFile": curent_file,
"colorspace": self.get_colorspace(project),

View file

@ -1,5 +1,5 @@
from pyblish import api
from avalon import io
from openpype.pipeline import legacy_io
class CollectAssetBuilds(api.ContextPlugin):
@ -18,7 +18,7 @@ class CollectAssetBuilds(api.ContextPlugin):
def process(self, context):
asset_builds = {}
for asset in io.find({"type": "asset"}):
for asset in legacy_io.find({"type": "asset"}):
if asset["data"]["entityType"] == "AssetBuild":
self.log.debug("Found \"{}\" in database.".format(asset))
asset_builds[asset["name"]] = asset

View file

@ -1,38 +0,0 @@
import pyblish.api
class CollectClipResolution(pyblish.api.InstancePlugin):
"""Collect clip geometry resolution"""
order = pyblish.api.CollectorOrder - 0.1
label = "Collect Clip Resolution"
hosts = ["hiero"]
families = ["clip"]
def process(self, instance):
sequence = instance.context.data['activeSequence']
item = instance.data["item"]
source_resolution = instance.data.get("sourceResolution", None)
resolution_width = int(sequence.format().width())
resolution_height = int(sequence.format().height())
pixel_aspect = sequence.format().pixelAspect()
# source exception
if source_resolution:
resolution_width = int(item.source().mediaSource().width())
resolution_height = int(item.source().mediaSource().height())
pixel_aspect = item.source().mediaSource().pixelAspect()
resolution_data = {
"resolutionWidth": resolution_width,
"resolutionHeight": resolution_height,
"pixelAspect": pixel_aspect
}
# add to instacne data
instance.data.update(resolution_data)
self.log.info("Resolution of instance '{}' is: {}".format(
instance,
resolution_data
))

View file

@ -1,15 +0,0 @@
import pyblish.api
class CollectHostVersion(pyblish.api.ContextPlugin):
"""Inject the hosts version into context"""
label = "Collect Host and HostVersion"
order = pyblish.api.CollectorOrder - 0.5
def process(self, context):
import nuke
import pyblish.api
context.set_data("host", pyblish.api.current_host())
context.set_data('hostVersion', value=nuke.NUKE_VERSION_STRING)

View file

@ -1,32 +0,0 @@
from pyblish import api
class CollectTagRetime(api.InstancePlugin):
"""Collect Retiming from Tags of selected track items."""
order = api.CollectorOrder + 0.014
label = "Collect Retiming Tag"
hosts = ["hiero"]
families = ['clip']
def process(self, instance):
# gets tags
tags = instance.data["tags"]
for t in tags:
t_metadata = dict(t["metadata"])
t_family = t_metadata.get("tag.family", "")
# gets only task family tags and collect labels
if "retiming" in t_family:
margin_in = t_metadata.get("tag.marginIn", "")
margin_out = t_metadata.get("tag.marginOut", "")
instance.data["retimeMarginIn"] = int(margin_in)
instance.data["retimeMarginOut"] = int(margin_out)
instance.data["retime"] = True
self.log.info("retimeMarginIn: `{}`".format(margin_in))
self.log.info("retimeMarginOut: `{}`".format(margin_out))
instance.data["families"] += ["retime"]

View file

@ -1,223 +0,0 @@
from compiler.ast import flatten
from pyblish import api
from openpype.hosts.hiero import api as phiero
import hiero
# from openpype.hosts.hiero.api import lib
# reload(lib)
# reload(phiero)
class PreCollectInstances(api.ContextPlugin):
"""Collect all Track items selection."""
order = api.CollectorOrder - 0.509
label = "Pre-collect Instances"
hosts = ["hiero"]
def process(self, context):
track_items = phiero.get_track_items(
selected=True, check_tagged=True, check_enabled=True)
# only return enabled track items
if not track_items:
track_items = phiero.get_track_items(
check_enabled=True, check_tagged=True)
# get sequence and video tracks
sequence = context.data["activeSequence"]
tracks = sequence.videoTracks()
# add collection to context
tracks_effect_items = self.collect_sub_track_items(tracks)
context.data["tracksEffectItems"] = tracks_effect_items
self.log.info(
"Processing enabled track items: {}".format(len(track_items)))
for _ti in track_items:
data = {}
clip = _ti.source()
# get clips subtracks and anotations
annotations = self.clip_annotations(clip)
subtracks = self.clip_subtrack(_ti)
self.log.debug("Annotations: {}".format(annotations))
self.log.debug(">> Subtracks: {}".format(subtracks))
# get pype tag data
tag_parsed_data = phiero.get_track_item_pype_data(_ti)
# self.log.debug(pformat(tag_parsed_data))
if not tag_parsed_data:
continue
if tag_parsed_data.get("id") != "pyblish.avalon.instance":
continue
# add tag data to instance data
data.update({
k: v for k, v in tag_parsed_data.items()
if k not in ("id", "applieswhole", "label")
})
asset = tag_parsed_data["asset"]
subset = tag_parsed_data["subset"]
review_track = tag_parsed_data.get("reviewTrack")
hiero_track = tag_parsed_data.get("heroTrack")
audio = tag_parsed_data.get("audio")
# remove audio attribute from data
data.pop("audio")
# insert family into families
family = tag_parsed_data["family"]
families = [str(f) for f in tag_parsed_data["families"]]
families.insert(0, str(family))
track = _ti.parent()
media_source = _ti.source().mediaSource()
source_path = media_source.firstpath()
file_head = media_source.filenameHead()
file_info = media_source.fileinfos().pop()
source_first_frame = int(file_info.startFrame())
# apply only for review and master track instance
if review_track and hiero_track:
families += ["review", "ftrack"]
data.update({
"name": "{} {} {}".format(asset, subset, families),
"asset": asset,
"item": _ti,
"families": families,
# tags
"tags": _ti.tags(),
# track item attributes
"track": track.name(),
"trackItem": track,
"reviewTrack": review_track,
# version data
"versionData": {
"colorspace": _ti.sourceMediaColourTransform()
},
# source attribute
"source": source_path,
"sourceMedia": media_source,
"sourcePath": source_path,
"sourceFileHead": file_head,
"sourceFirst": source_first_frame,
# clip's effect
"clipEffectItems": subtracks
})
instance = context.create_instance(**data)
self.log.info("Creating instance.data: {}".format(instance.data))
if audio:
a_data = dict()
# add tag data to instance data
a_data.update({
k: v for k, v in tag_parsed_data.items()
if k not in ("id", "applieswhole", "label")
})
# create main attributes
subset = "audioMain"
family = "audio"
families = ["clip", "ftrack"]
families.insert(0, str(family))
name = "{} {} {}".format(asset, subset, families)
a_data.update({
"name": name,
"subset": subset,
"asset": asset,
"family": family,
"families": families,
"item": _ti,
# tags
"tags": _ti.tags(),
})
a_instance = context.create_instance(**a_data)
self.log.info("Creating audio instance: {}".format(a_instance))
@staticmethod
def clip_annotations(clip):
"""
Returns list of Clip's hiero.core.Annotation
"""
annotations = []
subTrackItems = flatten(clip.subTrackItems())
annotations += [item for item in subTrackItems if isinstance(
item, hiero.core.Annotation)]
return annotations
@staticmethod
def clip_subtrack(clip):
"""
Returns list of Clip's hiero.core.SubTrackItem
"""
subtracks = []
subTrackItems = flatten(clip.parent().subTrackItems())
for item in subTrackItems:
# avoid all anotation
if isinstance(item, hiero.core.Annotation):
continue
# # avoid all not anaibled
if not item.isEnabled():
continue
subtracks.append(item)
return subtracks
@staticmethod
def collect_sub_track_items(tracks):
"""
Returns dictionary with track index as key and list of subtracks
"""
# collect all subtrack items
sub_track_items = dict()
for track in tracks:
items = track.items()
# skip if no clips on track > need track with effect only
if items:
continue
# skip all disabled tracks
if not track.isEnabled():
continue
track_index = track.trackIndex()
_sub_track_items = flatten(track.subTrackItems())
# continue only if any subtrack items are collected
if len(_sub_track_items) < 1:
continue
enabled_sti = list()
# loop all found subtrack items and check if they are enabled
for _sti in _sub_track_items:
# checking if not enabled
if not _sti.isEnabled():
continue
if isinstance(_sti, hiero.core.Annotation):
continue
# collect the subtrack item
enabled_sti.append(_sti)
# continue only if any subtrack items are collected
if len(enabled_sti) < 1:
continue
# add collection of subtrackitems to dict
sub_track_items[track_index] = enabled_sti
return sub_track_items

View file

@ -1,74 +0,0 @@
import os
import pyblish.api
from openpype.hosts.hiero import api as phiero
from avalon import api as avalon
class PreCollectWorkfile(pyblish.api.ContextPlugin):
"""Inject the current working file into context"""
label = "Pre-collect Workfile"
order = pyblish.api.CollectorOrder - 0.51
def process(self, context):
asset = avalon.Session["AVALON_ASSET"]
subset = "workfile"
project = phiero.get_current_project()
active_sequence = phiero.get_current_sequence()
video_tracks = active_sequence.videoTracks()
audio_tracks = active_sequence.audioTracks()
current_file = project.path()
staging_dir = os.path.dirname(current_file)
base_name = os.path.basename(current_file)
# get workfile's colorspace properties
_clrs = {}
_clrs["useOCIOEnvironmentOverride"] = project.useOCIOEnvironmentOverride() # noqa
_clrs["lutSetting16Bit"] = project.lutSetting16Bit()
_clrs["lutSetting8Bit"] = project.lutSetting8Bit()
_clrs["lutSettingFloat"] = project.lutSettingFloat()
_clrs["lutSettingLog"] = project.lutSettingLog()
_clrs["lutSettingViewer"] = project.lutSettingViewer()
_clrs["lutSettingWorkingSpace"] = project.lutSettingWorkingSpace()
_clrs["lutUseOCIOForExport"] = project.lutUseOCIOForExport()
_clrs["ocioConfigName"] = project.ocioConfigName()
_clrs["ocioConfigPath"] = project.ocioConfigPath()
# set main project attributes to context
context.data["activeProject"] = project
context.data["activeSequence"] = active_sequence
context.data["videoTracks"] = video_tracks
context.data["audioTracks"] = audio_tracks
context.data["currentFile"] = current_file
context.data["colorspace"] = _clrs
self.log.info("currentFile: {}".format(current_file))
# creating workfile representation
representation = {
'name': 'hrox',
'ext': 'hrox',
'files': base_name,
"stagingDir": staging_dir,
}
instance_data = {
"name": "{}_{}".format(asset, subset),
"asset": asset,
"subset": "{}{}".format(asset, subset.capitalize()),
"item": project,
"family": "workfile",
# version data
"versionData": {
"colorspace": _clrs
},
# source attribute
"sourcePath": current_file,
"representations": [representation]
}
instance = context.create_instance(**instance_data)
self.log.info("Creating instance: {}".format(instance))

View file

@ -4,8 +4,8 @@ from contextlib import contextmanager
import six
from avalon import api, io
from openpype.api import get_asset
from openpype.pipeline import legacy_io
import hou
@ -75,9 +75,13 @@ def generate_ids(nodes, asset_id=None):
if asset_id is None:
# Get the asset ID from the database for the asset of current context
asset_data = io.find_one({"type": "asset",
"name": api.Session["AVALON_ASSET"]},
projection={"_id": True})
asset_data = legacy_io.find_one(
{
"type": "asset",
"name": legacy_io.Session["AVALON_ASSET"]
},
projection={"_id": True}
)
assert asset_data, "No current asset found in Session"
asset_id = asset_data['_id']
@ -155,7 +159,7 @@ def validate_fps():
if parent is None:
pass
else:
dialog = popup.Popup(parent=parent)
dialog = popup.PopupUpdateKeys(parent=parent)
dialog.setModal(True)
dialog.setWindowTitle("Houdini scene does not match project FPS")
dialog.setMessage("Scene %i FPS does not match project %i FPS" %
@ -163,7 +167,7 @@ def validate_fps():
dialog.setButtonText("Fix")
# on_show is the Fix button clicked callback
dialog.on_clicked.connect(lambda: set_scene_fps(fps))
dialog.on_clicked_state.connect(lambda: set_scene_fps(fps))
dialog.show()
@ -424,8 +428,8 @@ def maintained_selection():
def reset_framerange():
"""Set frame range to current asset"""
asset_name = api.Session["AVALON_ASSET"]
asset = io.find_one({"name": asset_name, "type": "asset"})
asset_name = legacy_io.Session["AVALON_ASSET"]
asset = legacy_io.find_one({"name": asset_name, "type": "asset"})
frame_start = asset["data"].get("frameStart")
frame_end = asset["data"].get("frameEnd")

View file

@ -1,11 +1,12 @@
"""Houdini-specific USD Library functions."""
import contextlib
import logging
from Qt import QtWidgets, QtCore, QtGui
from avalon import io
from openpype import style
from openpype.pipeline import legacy_io
from openpype.tools.utils.assets_widget import SingleSelectAssetsWidget
from pxr import Sdf
@ -20,11 +21,12 @@ class SelectAssetDialog(QtWidgets.QWidget):
Args:
parm: Parameter where selected asset name is set.
"""
def __init__(self, parm):
self.setWindowTitle("Pick Asset")
self.setWindowFlags(QtCore.Qt.FramelessWindowHint | QtCore.Qt.Popup)
assets_widget = SingleSelectAssetsWidget(io, parent=self)
assets_widget = SingleSelectAssetsWidget(legacy_io, parent=self)
layout = QtWidgets.QHBoxLayout(self)
layout.addWidget(assets_widget)
@ -44,7 +46,7 @@ class SelectAssetDialog(QtWidgets.QWidget):
select_id = None
name = self._parm.eval()
if name:
db_asset = io.find_one(
db_asset = legacy_io.find_one(
{"name": name, "type": "asset"},
{"_id": True}
)

View file

@ -1,6 +1,7 @@
# -*- coding: utf-8 -*-
import hou
from avalon import io
from openpype.pipeline import legacy_io
from openpype.hosts.houdini.api import lib
from openpype.hosts.houdini.api import plugin
@ -22,13 +23,16 @@ class CreateHDA(plugin.Creator):
# type: (str) -> bool
"""Check if existing subset name versions already exists."""
# Get all subsets of the current asset
asset_id = io.find_one({"name": self.data["asset"], "type": "asset"},
projection={"_id": True})['_id']
subset_docs = io.find(
asset_id = legacy_io.find_one(
{"name": self.data["asset"], "type": "asset"},
projection={"_id": True}
)['_id']
subset_docs = legacy_io.find(
{
"type": "subset",
"parent": asset_id
}, {"name": 1}
},
{"name": 1}
)
existing_subset_names = set(subset_docs.distinct("name"))
existing_subset_names_low = {

View file

@ -1,6 +1,6 @@
import pyblish.api
from avalon import io
from openpype.pipeline import legacy_io
import openpype.lib.usdlib as usdlib
@ -50,7 +50,10 @@ class CollectUsdBootstrap(pyblish.api.InstancePlugin):
self.log.debug("Add bootstrap for: %s" % bootstrap)
asset = io.find_one({"name": instance.data["asset"], "type": "asset"})
asset = legacy_io.find_one({
"name": instance.data["asset"],
"type": "asset"
})
assert asset, "Asset must exist: %s" % asset
# Check which are not about to be created and don't exist yet
@ -104,7 +107,8 @@ class CollectUsdBootstrap(pyblish.api.InstancePlugin):
# Or, if they already exist in the database we can
# skip them too.
return bool(
io.find_one(
{"name": subset, "type": "subset", "parent": asset["_id"]}
legacy_io.find_one(
{"name": subset, "type": "subset", "parent": asset["_id"]},
{"_id": True}
)
)

View file

@ -7,7 +7,10 @@ from collections import deque
import pyblish.api
import openpype.api
from openpype.pipeline import get_representation_path
from openpype.pipeline import (
get_representation_path,
legacy_io,
)
import openpype.hosts.houdini.api.usd as hou_usdlib
from openpype.hosts.houdini.api.lib import render_rop
@ -266,8 +269,6 @@ class ExtractUSDLayered(openpype.api.Extractor):
instance.data["files"].append(fname)
def _compare_with_latest_publish(self, dependency, new_file):
from avalon import api, io
import filecmp
_, ext = os.path.splitext(new_file)
@ -275,10 +276,10 @@ class ExtractUSDLayered(openpype.api.Extractor):
# Compare this dependency with the latest published version
# to detect whether we should make this into a new publish
# version. If not, skip it.
asset = io.find_one(
asset = legacy_io.find_one(
{"name": dependency.data["asset"], "type": "asset"}
)
subset = io.find_one(
subset = legacy_io.find_one(
{
"name": dependency.data["subset"],
"type": "subset",
@ -290,7 +291,7 @@ class ExtractUSDLayered(openpype.api.Extractor):
self.log.debug("No existing subset..")
return False
version = io.find_one(
version = legacy_io.find_one(
{"type": "version", "parent": subset["_id"], },
sort=[("name", -1)]
)
@ -298,7 +299,7 @@ class ExtractUSDLayered(openpype.api.Extractor):
self.log.debug("No existing version..")
return False
representation = io.find_one(
representation = legacy_io.find_one(
{
"name": ext.lstrip("."),
"type": "representation",

View file

@ -1,9 +1,9 @@
import re
import pyblish.api
import openpype.api
from avalon import io
import openpype.api
from openpype.pipeline import legacy_io
class ValidateUSDShadeModelExists(pyblish.api.InstancePlugin):
@ -23,16 +23,20 @@ class ValidateUSDShadeModelExists(pyblish.api.InstancePlugin):
shade_subset = subset.split(".", 1)[0]
model_subset = re.sub("^usdShade", "usdModel", shade_subset)
asset_doc = io.find_one({"name": asset, "type": "asset"})
asset_doc = legacy_io.find_one(
{"name": asset, "type": "asset"},
{"_id": True}
)
if not asset_doc:
raise RuntimeError("Asset does not exist: %s" % asset)
subset_doc = io.find_one(
subset_doc = legacy_io.find_one(
{
"name": model_subset,
"type": "subset",
"parent": asset_doc["_id"],
}
},
{"_id": True}
)
if not subset_doc:
raise RuntimeError(

View file

@ -1,17 +1,21 @@
import os
import hou
import husdoutputprocessors.base as base
import os
import re
import logging
import colorbleed.usdlib as usdlib
from openpype.pipeline import (
legacy_io,
registered_root,
)
def _get_project_publish_template():
"""Return publish template from database for current project"""
from avalon import io
project = io.find_one({"type": "project"},
projection={"config.template.publish": True})
project = legacy_io.find_one(
{"type": "project"},
projection={"config.template.publish": True}
)
return project["config"]["template"]["publish"]
@ -133,12 +137,11 @@ class AvalonURIOutputProcessor(base.OutputProcessorBase):
"""
from avalon import api, io
from openpype.pipeline import registered_root
PROJECT = api.Session["AVALON_PROJECT"]
asset_doc = io.find_one({"name": asset,
"type": "asset"})
PROJECT = legacy_io.Session["AVALON_PROJECT"]
asset_doc = legacy_io.find_one({
"name": asset,
"type": "asset"
})
if not asset_doc:
raise RuntimeError("Invalid asset name: '%s'" % asset)

View file

@ -2,8 +2,8 @@
from __future__ import absolute_import
import pyblish.api
from avalon import io
from openpype.pipeline import legacy_io
from openpype.api import get_errored_instances_from_context
@ -75,8 +75,10 @@ class GenerateUUIDsOnInvalidAction(pyblish.api.Action):
from . import lib
asset = instance.data['asset']
asset_id = io.find_one({"name": asset, "type": "asset"},
projection={"_id": True})['_id']
asset_id = legacy_io.find_one(
{"name": asset, "type": "asset"},
projection={"_id": True}
)['_id']
for node, _id in lib.generate_ids(nodes, asset_id=asset_id):
lib.set_id(node, _id, overwrite=True)

View file

@ -1,7 +1,8 @@
# -*- coding: utf-8 -*-
"""OpenPype script commands to be used directly in Maya."""
from maya import cmds
from avalon import api, io
from openpype.pipeline import legacy_io
class ToolWindows:
@ -73,13 +74,13 @@ def reset_frame_range():
59.94: '59.94fps',
44100: '44100fps',
48000: '48000fps'
}.get(float(api.Session.get("AVALON_FPS", 25)), "pal")
}.get(float(legacy_io.Session.get("AVALON_FPS", 25)), "pal")
cmds.currentUnit(time=fps)
# Set frame start/end
asset_name = api.Session["AVALON_ASSET"]
asset = io.find_one({"name": asset_name, "type": "asset"})
asset_name = legacy_io.Session["AVALON_ASSET"]
asset = legacy_io.find_one({"name": asset_name, "type": "asset"})
frame_start = asset["data"].get("frameStart")
frame_end = asset["data"].get("frameEnd")
@ -144,8 +145,8 @@ def reset_resolution():
resolution_height = 1080
# Get resolution from asset
asset_name = api.Session["AVALON_ASSET"]
asset_doc = io.find_one({"name": asset_name, "type": "asset"})
asset_name = legacy_io.Session["AVALON_ASSET"]
asset_doc = legacy_io.find_one({"name": asset_name, "type": "asset"})
resolution = _resolution_from_document(asset_doc)
# Try get resolution from project
if resolution is None:
@ -154,7 +155,7 @@ def reset_resolution():
"Asset \"{}\" does not have set resolution."
" Trying to get resolution from project"
).format(asset_name))
project_doc = io.find_one({"type": "project"})
project_doc = legacy_io.find_one({"type": "project"})
resolution = _resolution_from_document(project_doc)
if resolution is None:

View file

@ -17,11 +17,10 @@ import bson
from maya import cmds, mel
import maya.api.OpenMaya as om
from avalon import api, io
from openpype import lib
from openpype.api import get_anatomy_settings
from openpype.pipeline import (
legacy_io,
discover_loader_plugins,
loaders_from_representation,
get_representation_path,
@ -1388,9 +1387,13 @@ def generate_ids(nodes, asset_id=None):
if asset_id is None:
# Get the asset ID from the database for the asset of current context
asset_data = io.find_one({"type": "asset",
"name": api.Session["AVALON_ASSET"]},
projection={"_id": True})
asset_data = legacy_io.find_one(
{
"type": "asset",
"name": legacy_io.Session["AVALON_ASSET"]
},
projection={"_id": True}
)
assert asset_data, "No current asset found in Session"
asset_id = asset_data['_id']
@ -1545,9 +1548,11 @@ def list_looks(asset_id):
# # get all subsets with look leading in
# the name associated with the asset
subset = io.find({"parent": bson.ObjectId(asset_id),
"type": "subset",
"name": {"$regex": "look*"}})
subset = legacy_io.find({
"parent": bson.ObjectId(asset_id),
"type": "subset",
"name": {"$regex": "look*"}
})
return list(subset)
@ -1566,13 +1571,17 @@ def assign_look_by_version(nodes, version_id):
"""
# Get representations of shader file and relationships
look_representation = io.find_one({"type": "representation",
"parent": version_id,
"name": "ma"})
look_representation = legacy_io.find_one({
"type": "representation",
"parent": version_id,
"name": "ma"
})
json_representation = io.find_one({"type": "representation",
"parent": version_id,
"name": "json"})
json_representation = legacy_io.find_one({
"type": "representation",
"parent": version_id,
"name": "json"
})
# See if representation is already loaded, if so reuse it.
host = registered_host()
@ -1637,9 +1646,11 @@ def assign_look(nodes, subset="lookDefault"):
except bson.errors.InvalidId:
log.warning("Asset ID is not compatible with bson")
continue
subset_data = io.find_one({"type": "subset",
"name": subset,
"parent": asset_id})
subset_data = legacy_io.find_one({
"type": "subset",
"name": subset,
"parent": asset_id
})
if not subset_data:
log.warning("No subset '{}' found for {}".format(subset, asset_id))
@ -1647,13 +1658,18 @@ def assign_look(nodes, subset="lookDefault"):
# get last version
# with backwards compatibility
version = io.find_one({"parent": subset_data['_id'],
"type": "version",
"data.families":
{"$in": ["look"]}
},
sort=[("name", -1)],
projection={"_id": True, "name": True})
version = legacy_io.find_one(
{
"parent": subset_data['_id'],
"type": "version",
"data.families": {"$in": ["look"]}
},
sort=[("name", -1)],
projection={
"_id": True,
"name": True
}
)
log.debug("Assigning look '{}' <v{:03d}>".format(subset,
version["name"]))
@ -2136,7 +2152,7 @@ def reset_scene_resolution():
None
"""
project_doc = io.find_one({"type": "project"})
project_doc = legacy_io.find_one({"type": "project"})
project_data = project_doc["data"]
asset_data = lib.get_asset()["data"]
@ -2169,13 +2185,13 @@ def set_context_settings():
"""
# Todo (Wijnand): apply renderer and resolution of project
project_doc = io.find_one({"type": "project"})
project_doc = legacy_io.find_one({"type": "project"})
project_data = project_doc["data"]
asset_data = lib.get_asset()["data"]
# Set project fps
fps = asset_data.get("fps", project_data.get("fps", 25))
api.Session["AVALON_FPS"] = str(fps)
legacy_io.Session["AVALON_FPS"] = str(fps)
set_scene_fps(fps)
reset_scene_resolution()
@ -2210,15 +2226,17 @@ def validate_fps():
parent = get_main_window()
dialog = popup.Popup2(parent=parent)
dialog = popup.PopupUpdateKeys(parent=parent)
dialog.setModal(True)
dialog.setWindowTitle("Maya scene not in line with project")
dialog.setMessage("The FPS is out of sync, please fix")
dialog.setWindowTitle("Maya scene does not match project FPS")
dialog.setMessage("Scene %i FPS does not match project %i FPS" %
(current_fps, fps))
dialog.setButtonText("Fix")
# Set new text for button (add optional argument for the popup?)
toggle = dialog.widgets["toggle"]
update = toggle.isChecked()
dialog.on_show.connect(lambda: set_scene_fps(fps, update))
dialog.on_clicked_state.connect(lambda: set_scene_fps(fps, update))
dialog.show()
@ -2935,7 +2953,7 @@ def update_content_on_context_change():
This will update scene content to match new asset on context change
"""
scene_sets = cmds.listSets(allSets=True)
new_asset = api.Session["AVALON_ASSET"]
new_asset = legacy_io.Session["AVALON_ASSET"]
new_data = lib.get_asset()["data"]
for s in scene_sets:
try:

View file

@ -79,6 +79,7 @@ IMAGE_PREFIXES = {
"redshift": "defaultRenderGlobals.imageFilePrefix",
}
RENDERMAN_IMAGE_DIR = "maya/<scene>/<layer>"
@attr.s
class LayerMetadata(object):
@ -1054,6 +1055,8 @@ class RenderProductsRenderman(ARenderProducts):
:func:`ARenderProducts.get_render_products()`
"""
from rfm2.api.displays import get_displays # noqa
cameras = [
self.sanitize_camera_name(c)
for c in self.get_renderable_cameras()
@ -1066,42 +1069,56 @@ class RenderProductsRenderman(ARenderProducts):
]
products = []
default_ext = "exr"
displays = cmds.listConnections("rmanGlobals.displays")
for aov in displays:
enabled = self._get_attr(aov, "enabled")
# NOTE: This is guessing extensions from renderman display types.
# Some of them are just framebuffers, d_texture format can be
# set in display setting. We set those now to None, but it
# should be handled more gracefully.
display_types = {
"d_deepexr": "exr",
"d_it": None,
"d_null": None,
"d_openexr": "exr",
"d_png": "png",
"d_pointcloud": "ptc",
"d_targa": "tga",
"d_texture": None,
"d_tiff": "tif"
}
displays = get_displays()["displays"]
for name, display in displays.items():
enabled = display["params"]["enable"]["value"]
if not enabled:
continue
aov_name = str(aov)
aov_name = name
if aov_name == "rmanDefaultDisplay":
aov_name = "beauty"
extensions = display_types.get(
display["driverNode"]["type"], "exr")
for camera in cameras:
product = RenderProduct(productName=aov_name,
ext=default_ext,
ext=extensions,
camera=camera)
products.append(product)
return products
def get_files(self, product, camera):
def get_files(self, product):
"""Get expected files.
In renderman we hack it with prepending path. This path would
normally be translated from `rmanGlobals.imageOutputDir`. We skip
this and hardcode prepend path we expect. There is no place for user
to mess around with this settings anyway and it is enforced in
render settings validator.
"""
files = super(RenderProductsRenderman, self).get_files(product, camera)
files = super(RenderProductsRenderman, self).get_files(product)
layer_data = self.layer_data
new_files = []
resolved_image_dir = re.sub("<scene>", layer_data.sceneName, RENDERMAN_IMAGE_DIR, flags=re.IGNORECASE) # noqa: E501
resolved_image_dir = re.sub("<layer>", layer_data.layerName, resolved_image_dir, flags=re.IGNORECASE) # noqa: E501
for file in files:
new_file = "{}/{}/{}".format(
layer_data["sceneName"], layer_data["layerName"], file
)
new_file = "{}/{}".format(resolved_image_dir, file)
new_files.append(new_file)
return new_files

View file

@ -6,10 +6,9 @@ from Qt import QtWidgets, QtGui
import maya.utils
import maya.cmds as cmds
import avalon.api
from openpype.api import BuildWorkfile
from openpype.settings import get_project_settings
from openpype.pipeline import legacy_io
from openpype.tools.utils import host_tools
from openpype.hosts.maya.api import lib
from .lib import get_main_window, IS_HEADLESS
@ -40,15 +39,15 @@ def install():
parent_widget = get_main_window()
cmds.menu(
MENU_NAME,
label=avalon.api.Session["AVALON_LABEL"],
label=legacy_io.Session["AVALON_LABEL"],
tearOff=True,
parent="MayaWindow"
)
# Create context menu
context_label = "{}, {}".format(
avalon.api.Session["AVALON_ASSET"],
avalon.api.Session["AVALON_TASK"]
legacy_io.Session["AVALON_ASSET"],
legacy_io.Session["AVALON_TASK"]
)
cmds.menuItem(
"currentContext",
@ -211,7 +210,7 @@ def update_menu_task_label():
return
label = "{}, {}".format(
avalon.api.Session["AVALON_ASSET"],
avalon.api.Session["AVALON_TASK"]
legacy_io.Session["AVALON_ASSET"],
legacy_io.Session["AVALON_TASK"]
)
cmds.menuItem(object_name, edit=True, label=label)

View file

@ -7,7 +7,6 @@ from maya import utils, cmds, OpenMaya
import maya.api.OpenMaya as om
import pyblish.api
import avalon.api
import openpype.hosts.maya
from openpype.tools.utils import host_tools
@ -18,6 +17,7 @@ from openpype.lib import (
)
from openpype.lib.path_tools import HostDirmap
from openpype.pipeline import (
legacy_io,
register_loader_plugin_path,
register_inventory_action_path,
register_creator_plugin_path,
@ -93,7 +93,7 @@ def _set_project():
None
"""
workdir = avalon.api.Session["AVALON_WORKDIR"]
workdir = legacy_io.Session["AVALON_WORKDIR"]
try:
os.makedirs(workdir)
@ -448,7 +448,7 @@ def on_open():
dialog.setWindowTitle("Maya scene has outdated content")
dialog.setMessage("There are outdated containers in "
"your Maya scene.")
dialog.on_show.connect(_on_show_inventory)
dialog.on_clicked.connect(_on_show_inventory)
dialog.show()
@ -473,7 +473,7 @@ def on_task_changed():
# Run
menu.update_menu_task_label()
workdir = avalon.api.Session["AVALON_WORKDIR"]
workdir = legacy_io.Session["AVALON_WORKDIR"]
if os.path.exists(workdir):
log.info("Updating Maya workspace for task change to %s", workdir)
@ -494,9 +494,9 @@ def on_task_changed():
lib.update_content_on_context_change()
msg = " project: {}\n asset: {}\n task:{}".format(
avalon.api.Session["AVALON_PROJECT"],
avalon.api.Session["AVALON_ASSET"],
avalon.api.Session["AVALON_TASK"]
legacy_io.Session["AVALON_PROJECT"],
legacy_io.Session["AVALON_ASSET"],
legacy_io.Session["AVALON_TASK"]
)
lib.show_message(

View file

@ -10,8 +10,9 @@ from bson.objectid import ObjectId
from maya import cmds
from avalon import io
from openpype.pipeline import (
schema,
legacy_io,
discover_loader_plugins,
loaders_from_representation,
load_container,
@ -253,7 +254,6 @@ def get_contained_containers(container):
"""
import avalon.schema
from .pipeline import parse_container
# Get avalon containers in this package setdress container
@ -263,7 +263,7 @@ def get_contained_containers(container):
try:
member_container = parse_container(node)
containers.append(member_container)
except avalon.schema.ValidationError:
except schema.ValidationError:
pass
return containers
@ -283,21 +283,23 @@ def update_package_version(container, version):
"""
# Versioning (from `core.maya.pipeline`)
current_representation = io.find_one({
current_representation = legacy_io.find_one({
"_id": ObjectId(container["representation"])
})
assert current_representation is not None, "This is a bug"
version_, subset, asset, project = io.parenthood(current_representation)
version_, subset, asset, project = legacy_io.parenthood(
current_representation
)
if version == -1:
new_version = io.find_one({
new_version = legacy_io.find_one({
"type": "version",
"parent": subset["_id"]
}, sort=[("name", -1)])
else:
new_version = io.find_one({
new_version = legacy_io.find_one({
"type": "version",
"parent": subset["_id"],
"name": version,
@ -306,7 +308,7 @@ def update_package_version(container, version):
assert new_version is not None, "This is a bug"
# Get the new representation (new file)
new_representation = io.find_one({
new_representation = legacy_io.find_one({
"type": "representation",
"parent": new_version["_id"],
"name": current_representation["name"]
@ -328,7 +330,7 @@ def update_package(set_container, representation):
"""
# Load the original package data
current_representation = io.find_one({
current_representation = legacy_io.find_one({
"_id": ObjectId(set_container['representation']),
"type": "representation"
})
@ -479,10 +481,10 @@ def update_scene(set_container, containers, current_data, new_data, new_file):
# Check whether the conversion can be done by the Loader.
# They *must* use the same asset, subset and Loader for
# `update_container` to make sense.
old = io.find_one({
old = legacy_io.find_one({
"_id": ObjectId(representation_current)
})
new = io.find_one({
new = legacy_io.find_one({
"_id": ObjectId(representation_new)
})
is_valid = compare_representations(old=old, new=new)

View file

@ -18,9 +18,10 @@ from openpype.api import (
get_project_settings,
get_asset)
from openpype.modules import ModulesManager
from openpype.pipeline import CreatorError
from avalon.api import Session
from openpype.pipeline import (
CreatorError,
legacy_io,
)
class CreateRender(plugin.Creator):
@ -75,7 +76,7 @@ class CreateRender(plugin.Creator):
'mentalray': 'defaultRenderGlobals.imageFilePrefix',
'vray': 'vraySettings.fileNamePrefix',
'arnold': 'defaultRenderGlobals.imageFilePrefix',
'renderman': 'defaultRenderGlobals.imageFilePrefix',
'renderman': 'rmanGlobals.imageFileFormat',
'redshift': 'defaultRenderGlobals.imageFilePrefix'
}
@ -83,7 +84,9 @@ class CreateRender(plugin.Creator):
'mentalray': 'maya/<Scene>/<RenderLayer>/<RenderLayer>{aov_separator}<RenderPass>', # noqa
'vray': 'maya/<scene>/<Layer>/<Layer>',
'arnold': 'maya/<Scene>/<RenderLayer>/<RenderLayer>{aov_separator}<RenderPass>', # noqa
'renderman': 'maya/<Scene>/<layer>/<layer>{aov_separator}<aov>',
# this needs `imageOutputDir`
# (<ws>/renders/maya/<scene>) set separately
'renderman': '<layer>_<aov>.<f4>.<ext>',
'redshift': 'maya/<Scene>/<RenderLayer>/<RenderLayer>' # noqa
}
@ -103,7 +106,7 @@ class CreateRender(plugin.Creator):
self.deadline_servers = {}
return
self._project_settings = get_project_settings(
Session["AVALON_PROJECT"])
legacy_io.Session["AVALON_PROJECT"])
# project_settings/maya/create/CreateRender/aov_separator
try:
@ -439,6 +442,10 @@ class CreateRender(plugin.Creator):
self._set_global_output_settings()
if renderer == "renderman":
cmds.setAttr("rmanGlobals.imageOutputDir",
"maya/<scene>/<layer>", type="string")
def _set_vray_settings(self, asset):
# type: (dict) -> None
"""Sets important settings for Vray."""

View file

@ -1,7 +1,7 @@
# -*- coding: utf-8 -*-
"""Creator for Unreal Skeletal Meshes."""
from openpype.hosts.maya.api import plugin, lib
from avalon.api import Session
from openpype.pipeline import legacy_io
from maya import cmds # noqa
@ -26,7 +26,7 @@ class CreateUnrealSkeletalMesh(plugin.Creator):
dynamic_data = super(CreateUnrealSkeletalMesh, cls).get_dynamic_data(
variant, task_name, asset_id, project_name, host_name
)
dynamic_data["asset"] = Session.get("AVALON_ASSET")
dynamic_data["asset"] = legacy_io.Session.get("AVALON_ASSET")
return dynamic_data
def process(self):

View file

@ -1,8 +1,8 @@
# -*- coding: utf-8 -*-
"""Creator for Unreal Static Meshes."""
from openpype.hosts.maya.api import plugin, lib
from avalon.api import Session
from openpype.api import get_project_settings
from openpype.pipeline import legacy_io
from maya import cmds # noqa
@ -18,7 +18,7 @@ class CreateUnrealStaticMesh(plugin.Creator):
"""Constructor."""
super(CreateUnrealStaticMesh, self).__init__(*args, **kwargs)
self._project_settings = get_project_settings(
Session["AVALON_PROJECT"])
legacy_io.Session["AVALON_PROJECT"])
@classmethod
def get_dynamic_data(
@ -27,7 +27,7 @@ class CreateUnrealStaticMesh(plugin.Creator):
dynamic_data = super(CreateUnrealStaticMesh, cls).get_dynamic_data(
variant, task_name, asset_id, project_name, host_name
)
dynamic_data["asset"] = Session.get("AVALON_ASSET")
dynamic_data["asset"] = legacy_io.Session.get("AVALON_ASSET")
return dynamic_data
def process(self):

View file

@ -18,11 +18,12 @@ from openpype.api import (
)
from openpype.lib import requests_get
from openpype.pipeline import CreatorError
from openpype.pipeline import (
CreatorError,
legacy_io,
)
from openpype.modules import ModulesManager
from avalon.api import Session
class CreateVRayScene(plugin.Creator):
"""Create Vray Scene."""
@ -47,7 +48,7 @@ class CreateVRayScene(plugin.Creator):
self.deadline_servers = {}
return
self._project_settings = get_project_settings(
Session["AVALON_PROJECT"])
legacy_io.Session["AVALON_PROJECT"])
try:
default_servers = deadline_settings["deadline_urls"]

View file

@ -1,9 +1,10 @@
import json
from avalon import io
from bson.objectid import ObjectId
from openpype.pipeline import (
InventoryAction,
get_representation_context,
legacy_io,
)
from openpype.hosts.maya.api.lib import (
maintained_selection,
@ -39,7 +40,7 @@ class ImportModelRender(InventoryAction):
else:
nodes.append(n)
repr_doc = io.find_one({
repr_doc = legacy_io.find_one({
"_id": ObjectId(container["representation"]),
})
version_id = repr_doc["parent"]
@ -63,7 +64,7 @@ class ImportModelRender(InventoryAction):
from maya import cmds
# Get representations of shader file and relationships
look_repr = io.find_one({
look_repr = legacy_io.find_one({
"type": "representation",
"parent": version_id,
"name": {"$regex": self.scene_type_regex},
@ -72,7 +73,7 @@ class ImportModelRender(InventoryAction):
print("No model render sets for this model version..")
return
json_repr = io.find_one({
json_repr = legacy_io.find_one({
"type": "representation",
"parent": version_id,
"name": self.look_data_type,

View file

@ -1,8 +1,9 @@
from maya import cmds, mel
from avalon import io
from openpype.pipeline import (
legacy_io,
load,
get_representation_path
get_representation_path,
)
from openpype.hosts.maya.api.pipeline import containerise
from openpype.hosts.maya.api.lib import unique_namespace
@ -64,9 +65,9 @@ class AudioLoader(load.LoaderPlugin):
)
# Set frame range.
version = io.find_one({"_id": representation["parent"]})
subset = io.find_one({"_id": version["parent"]})
asset = io.find_one({"_id": subset["parent"]})
version = legacy_io.find_one({"_id": representation["parent"]})
subset = legacy_io.find_one({"_id": version["parent"]})
asset = legacy_io.find_one({"_id": subset["parent"]})
audio_node.sourceStart.set(1 - asset["data"]["frameStart"])
audio_node.sourceEnd.set(asset["data"]["frameEnd"])

View file

@ -1,7 +1,7 @@
from Qt import QtWidgets, QtCore
from avalon import io
from openpype.pipeline import (
legacy_io,
load,
get_representation_path
)
@ -216,9 +216,9 @@ class ImagePlaneLoader(load.LoaderPlugin):
)
# Set frame range.
version = io.find_one({"_id": representation["parent"]})
subset = io.find_one({"_id": version["parent"]})
asset = io.find_one({"_id": subset["parent"]})
version = legacy_io.find_one({"_id": representation["parent"]})
subset = legacy_io.find_one({"_id": version["parent"]})
asset = legacy_io.find_one({"_id": subset["parent"]})
start_frame = asset["data"]["frameStart"]
end_frame = asset["data"]["frameEnd"]
image_plane_shape.frameOffset.set(1 - start_frame)

View file

@ -5,8 +5,10 @@ from collections import defaultdict
from Qt import QtWidgets
from avalon import io
from openpype.pipeline import get_representation_path
from openpype.pipeline import (
legacy_io,
get_representation_path,
)
import openpype.hosts.maya.api.plugin
from openpype.hosts.maya.api import lib
from openpype.widgets.message_window import ScrollMessageBox
@ -71,7 +73,7 @@ class LookLoader(openpype.hosts.maya.api.plugin.ReferenceLoader):
shader_nodes = cmds.ls(members, type='shadingEngine')
nodes = set(self._get_nodes_with_shader(shader_nodes))
json_representation = io.find_one({
json_representation = legacy_io.find_one({
"type": "representation",
"parent": representation['parent'],
"name": "json"

View file

@ -1,10 +1,12 @@
import os
from maya import cmds
from avalon import api
from openpype.api import get_project_settings
from openpype.lib import get_creator_by_name
from openpype.pipeline import legacy_create
from openpype.pipeline import (
legacy_io,
legacy_create,
)
import openpype.hosts.maya.api.plugin
from openpype.hosts.maya.api.lib import maintained_selection
@ -143,7 +145,7 @@ class ReferenceLoader(openpype.hosts.maya.api.plugin.ReferenceLoader):
roots = cmds.ls(self[:], assemblies=True, long=True)
assert roots, "No root nodes in rig, this is a bug."
asset = api.Session["AVALON_ASSET"]
asset = legacy_io.Session["AVALON_ASSET"]
dependency = str(context["representation"]["_id"])
self.log.info("Creating subset: {}".format(namespace))

View file

@ -11,9 +11,9 @@ from bson.objectid import ObjectId
import maya.cmds as cmds
from avalon import io
from openpype.api import get_project_settings
from openpype.pipeline import (
legacy_io,
load,
get_representation_path
)
@ -185,12 +185,11 @@ class VRayProxyLoader(load.LoaderPlugin):
"""
self.log.debug(
"Looking for abc in published representations of this version.")
abc_rep = io.find_one(
{
"type": "representation",
"parent": ObjectId(version_id),
"name": "abc"
})
abc_rep = legacy_io.find_one({
"type": "representation",
"parent": ObjectId(version_id),
"name": "abc"
})
if abc_rep:
self.log.debug("Found, we'll link alembic to vray proxy.")

View file

@ -7,9 +7,9 @@ from pprint import pprint
from maya import cmds
from avalon import io
from openpype.api import get_project_settings
from openpype.pipeline import (
legacy_io,
load,
get_representation_path
)
@ -111,11 +111,11 @@ class YetiCacheLoader(load.LoaderPlugin):
def update(self, container, representation):
io.install()
legacy_io.install()
namespace = container["namespace"]
container_node = container["objectName"]
fur_settings = io.find_one(
fur_settings = legacy_io.find_one(
{"parent": representation["parent"], "name": "fursettings"}
)

View file

@ -1,23 +1,16 @@
from maya import cmds
import pymel.core as pm
import pyblish.api
import avalon.api
class CollectAssData(pyblish.api.InstancePlugin):
"""Collect Ass data
"""
"""Collect Ass data."""
order = pyblish.api.CollectorOrder + 0.2
label = 'Collect Ass'
families = ["ass"]
def process(self, instance):
context = instance.context
objsets = instance.data['setMembers']
for objset in objsets:

View file

@ -49,8 +49,8 @@ import maya.app.renderSetup.model.renderSetup as renderSetup
import pyblish.api
from avalon import api
from openpype.lib import get_formatted_current_time
from openpype.pipeline import legacy_io
from openpype.hosts.maya.api.lib_renderproducts import get as get_layer_render_products # noqa: E501
from openpype.hosts.maya.api import lib
@ -93,7 +93,7 @@ class CollectMayaRender(pyblish.api.ContextPlugin):
render_globals = render_instance
collected_render_layers = render_instance.data["setMembers"]
filepath = context.data["currentFile"].replace("\\", "/")
asset = api.Session["AVALON_ASSET"]
asset = legacy_io.Session["AVALON_ASSET"]
workspace = context.data["workspaceDir"]
deadline_settings = (
@ -194,13 +194,11 @@ class CollectMayaRender(pyblish.api.ContextPlugin):
assert render_products, "no render products generated"
exp_files = []
multipart = False
render_cameras = []
for product in render_products:
if product.multipart:
multipart = True
product_name = product.productName
if product.camera and layer_render_products.has_camera_token():
render_cameras.append(product.camera)
product_name = "{}{}".format(
product.camera,
"_" + product_name if product_name else "")
@ -210,7 +208,8 @@ class CollectMayaRender(pyblish.api.ContextPlugin):
product)
})
assert render_cameras, "No render cameras found."
has_cameras = any(product.camera for product in render_products)
assert has_cameras, "No render cameras found."
self.log.info("multipart: {}".format(
multipart))

View file

@ -2,7 +2,8 @@ from maya import cmds, mel
import pymel.core as pm
import pyblish.api
import avalon.api
from openpype.pipeline import legacy_io
class CollectReview(pyblish.api.InstancePlugin):
@ -19,7 +20,7 @@ class CollectReview(pyblish.api.InstancePlugin):
self.log.debug('instance: {}'.format(instance))
task = avalon.api.Session["AVALON_TASK"]
task = legacy_io.Session["AVALON_TASK"]
# get cameras
members = instance.data['setMembers']

Some files were not shown because too many files have changed in this diff Show more