Merge branch 'develop' into bugfix/napi_use_inpanel

This commit is contained in:
Zipodod 2023-06-30 10:18:49 -04:00 committed by GitHub
commit bb86c4fd05
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
265 changed files with 8260 additions and 2512 deletions

View file

@ -1,6 +1,6 @@
{
"projectName": "OpenPype",
"projectOwner": "pypeclub",
"projectOwner": "ynput",
"repoType": "github",
"repoHost": "https://github.com",
"files": [
@ -319,8 +319,18 @@
"code",
"doc"
]
},
{
"login": "movalex",
"name": "Alexey Bogomolov",
"avatar_url": "https://avatars.githubusercontent.com/u/11698866?v=4",
"profile": "http://abogomolov.com",
"contributions": [
"code"
]
}
],
"contributorsPerLine": 7,
"skipCi": true
"skipCi": true,
"commitType": "docs"
}

View file

@ -35,6 +35,18 @@ body:
label: Version
description: What version are you running? Look to OpenPype Tray
options:
- 3.15.12-nightly.1
- 3.15.11
- 3.15.11-nightly.5
- 3.15.11-nightly.4
- 3.15.11-nightly.3
- 3.15.11-nightly.2
- 3.15.11-nightly.1
- 3.15.10
- 3.15.10-nightly.2
- 3.15.10-nightly.1
- 3.15.9
- 3.15.9-nightly.2
- 3.15.9-nightly.1
- 3.15.8
- 3.15.8-nightly.3
@ -123,18 +135,6 @@ body:
- 3.14.4-nightly.3
- 3.14.4-nightly.2
- 3.14.4-nightly.1
- 3.14.3
- 3.14.3-nightly.7
- 3.14.3-nightly.6
- 3.14.3-nightly.5
- 3.14.3-nightly.4
- 3.14.3-nightly.3
- 3.14.3-nightly.2
- 3.14.3-nightly.1
- 3.14.2
- 3.14.2-nightly.5
- 3.14.2-nightly.4
- 3.14.2-nightly.3
validations:
required: true
- type: dropdown

File diff suppressed because it is too large Load diff

View file

@ -1,6 +1,6 @@
<!-- ALL-CONTRIBUTORS-BADGE:START - Do not remove or modify this section -->
[![All Contributors](https://img.shields.io/badge/all_contributors-27-orange.svg?style=flat-square)](#contributors-)
[![All Contributors](https://img.shields.io/badge/all_contributors-28-orange.svg?style=flat-square)](#contributors-)
<!-- ALL-CONTRIBUTORS-BADGE:END -->
OpenPype
====
@ -303,41 +303,44 @@ Thanks goes to these wonderful people ([emoji key](https://allcontributors.org/d
<!-- prettier-ignore-start -->
<!-- markdownlint-disable -->
<table>
<tr>
<td align="center"><a href="http://pype.club/"><img src="https://avatars.githubusercontent.com/u/3333008?v=4?s=100" width="100px;" alt=""/><br /><sub><b>Milan Kolar</b></sub></a><br /><a href="https://github.com/pypeclub/OpenPype/commits?author=mkolar" title="Code">💻</a> <a href="https://github.com/pypeclub/OpenPype/commits?author=mkolar" title="Documentation">📖</a> <a href="#infra-mkolar" title="Infrastructure (Hosting, Build-Tools, etc)">🚇</a> <a href="#business-mkolar" title="Business development">💼</a> <a href="#content-mkolar" title="Content">🖋</a> <a href="#fundingFinding-mkolar" title="Funding Finding">🔍</a> <a href="#maintenance-mkolar" title="Maintenance">🚧</a> <a href="#projectManagement-mkolar" title="Project Management">📆</a> <a href="https://github.com/pypeclub/OpenPype/pulls?q=is%3Apr+reviewed-by%3Amkolar" title="Reviewed Pull Requests">👀</a> <a href="#mentoring-mkolar" title="Mentoring">🧑‍🏫</a> <a href="#question-mkolar" title="Answering Questions">💬</a></td>
<td align="center"><a href="https://www.linkedin.com/in/jakubjezek79"><img src="https://avatars.githubusercontent.com/u/40640033?v=4?s=100" width="100px;" alt=""/><br /><sub><b>Jakub Ježek</b></sub></a><br /><a href="https://github.com/pypeclub/OpenPype/commits?author=jakubjezek001" title="Code">💻</a> <a href="https://github.com/pypeclub/OpenPype/commits?author=jakubjezek001" title="Documentation">📖</a> <a href="#infra-jakubjezek001" title="Infrastructure (Hosting, Build-Tools, etc)">🚇</a> <a href="#content-jakubjezek001" title="Content">🖋</a> <a href="https://github.com/pypeclub/OpenPype/pulls?q=is%3Apr+reviewed-by%3Ajakubjezek001" title="Reviewed Pull Requests">👀</a> <a href="#maintenance-jakubjezek001" title="Maintenance">🚧</a> <a href="#mentoring-jakubjezek001" title="Mentoring">🧑‍🏫</a> <a href="#projectManagement-jakubjezek001" title="Project Management">📆</a> <a href="#question-jakubjezek001" title="Answering Questions">💬</a></td>
<td align="center"><a href="https://github.com/antirotor"><img src="https://avatars.githubusercontent.com/u/33513211?v=4?s=100" width="100px;" alt=""/><br /><sub><b>Ondřej Samohel</b></sub></a><br /><a href="https://github.com/pypeclub/OpenPype/commits?author=antirotor" title="Code">💻</a> <a href="https://github.com/pypeclub/OpenPype/commits?author=antirotor" title="Documentation">📖</a> <a href="#infra-antirotor" title="Infrastructure (Hosting, Build-Tools, etc)">🚇</a> <a href="#content-antirotor" title="Content">🖋</a> <a href="https://github.com/pypeclub/OpenPype/pulls?q=is%3Apr+reviewed-by%3Aantirotor" title="Reviewed Pull Requests">👀</a> <a href="#maintenance-antirotor" title="Maintenance">🚧</a> <a href="#mentoring-antirotor" title="Mentoring">🧑‍🏫</a> <a href="#projectManagement-antirotor" title="Project Management">📆</a> <a href="#question-antirotor" title="Answering Questions">💬</a></td>
<td align="center"><a href="https://github.com/iLLiCiTiT"><img src="https://avatars.githubusercontent.com/u/43494761?v=4?s=100" width="100px;" alt=""/><br /><sub><b>Jakub Trllo</b></sub></a><br /><a href="https://github.com/pypeclub/OpenPype/commits?author=iLLiCiTiT" title="Code">💻</a> <a href="https://github.com/pypeclub/OpenPype/commits?author=iLLiCiTiT" title="Documentation">📖</a> <a href="#infra-iLLiCiTiT" title="Infrastructure (Hosting, Build-Tools, etc)">🚇</a> <a href="https://github.com/pypeclub/OpenPype/pulls?q=is%3Apr+reviewed-by%3AiLLiCiTiT" title="Reviewed Pull Requests">👀</a> <a href="#maintenance-iLLiCiTiT" title="Maintenance">🚧</a> <a href="#question-iLLiCiTiT" title="Answering Questions">💬</a></td>
<td align="center"><a href="https://github.com/kalisp"><img src="https://avatars.githubusercontent.com/u/4457962?v=4?s=100" width="100px;" alt=""/><br /><sub><b>Petr Kalis</b></sub></a><br /><a href="https://github.com/pypeclub/OpenPype/commits?author=kalisp" title="Code">💻</a> <a href="https://github.com/pypeclub/OpenPype/commits?author=kalisp" title="Documentation">📖</a> <a href="#infra-kalisp" title="Infrastructure (Hosting, Build-Tools, etc)">🚇</a> <a href="https://github.com/pypeclub/OpenPype/pulls?q=is%3Apr+reviewed-by%3Akalisp" title="Reviewed Pull Requests">👀</a> <a href="#maintenance-kalisp" title="Maintenance">🚧</a> <a href="#question-kalisp" title="Answering Questions">💬</a></td>
<td align="center"><a href="https://github.com/64qam"><img src="https://avatars.githubusercontent.com/u/26925793?v=4?s=100" width="100px;" alt=""/><br /><sub><b>64qam</b></sub></a><br /><a href="https://github.com/pypeclub/OpenPype/commits?author=64qam" title="Code">💻</a> <a href="https://github.com/pypeclub/OpenPype/pulls?q=is%3Apr+reviewed-by%3A64qam" title="Reviewed Pull Requests">👀</a> <a href="https://github.com/pypeclub/OpenPype/commits?author=64qam" title="Documentation">📖</a> <a href="#infra-64qam" title="Infrastructure (Hosting, Build-Tools, etc)">🚇</a> <a href="#projectManagement-64qam" title="Project Management">📆</a> <a href="#maintenance-64qam" title="Maintenance">🚧</a> <a href="#content-64qam" title="Content">🖋</a> <a href="#userTesting-64qam" title="User Testing">📓</a></td>
<td align="center"><a href="http://www.colorbleed.nl/"><img src="https://avatars.githubusercontent.com/u/2439881?v=4?s=100" width="100px;" alt=""/><br /><sub><b>Roy Nieterau</b></sub></a><br /><a href="https://github.com/pypeclub/OpenPype/commits?author=BigRoy" title="Code">💻</a> <a href="https://github.com/pypeclub/OpenPype/commits?author=BigRoy" title="Documentation">📖</a> <a href="https://github.com/pypeclub/OpenPype/pulls?q=is%3Apr+reviewed-by%3ABigRoy" title="Reviewed Pull Requests">👀</a> <a href="#mentoring-BigRoy" title="Mentoring">🧑‍🏫</a> <a href="#question-BigRoy" title="Answering Questions">💬</a></td>
</tr>
<tr>
<td align="center"><a href="https://github.com/tokejepsen"><img src="https://avatars.githubusercontent.com/u/1860085?v=4?s=100" width="100px;" alt=""/><br /><sub><b>Toke Jepsen</b></sub></a><br /><a href="https://github.com/pypeclub/OpenPype/commits?author=tokejepsen" title="Code">💻</a> <a href="https://github.com/pypeclub/OpenPype/commits?author=tokejepsen" title="Documentation">📖</a> <a href="https://github.com/pypeclub/OpenPype/pulls?q=is%3Apr+reviewed-by%3Atokejepsen" title="Reviewed Pull Requests">👀</a> <a href="#mentoring-tokejepsen" title="Mentoring">🧑‍🏫</a> <a href="#question-tokejepsen" title="Answering Questions">💬</a></td>
<td align="center"><a href="https://github.com/jrsndl"><img src="https://avatars.githubusercontent.com/u/45896205?v=4?s=100" width="100px;" alt=""/><br /><sub><b>Jiri Sindelar</b></sub></a><br /><a href="https://github.com/pypeclub/OpenPype/commits?author=jrsndl" title="Code">💻</a> <a href="https://github.com/pypeclub/OpenPype/pulls?q=is%3Apr+reviewed-by%3Ajrsndl" title="Reviewed Pull Requests">👀</a> <a href="https://github.com/pypeclub/OpenPype/commits?author=jrsndl" title="Documentation">📖</a> <a href="#content-jrsndl" title="Content">🖋</a> <a href="#tutorial-jrsndl" title="Tutorials"></a> <a href="#userTesting-jrsndl" title="User Testing">📓</a></td>
<td align="center"><a href="https://barbierisimone.com/"><img src="https://avatars.githubusercontent.com/u/1087869?v=4?s=100" width="100px;" alt=""/><br /><sub><b>Simone Barbieri</b></sub></a><br /><a href="https://github.com/pypeclub/OpenPype/commits?author=simonebarbieri" title="Code">💻</a> <a href="https://github.com/pypeclub/OpenPype/commits?author=simonebarbieri" title="Documentation">📖</a></td>
<td align="center"><a href="http://karimmozilla.xyz/"><img src="https://avatars.githubusercontent.com/u/82811760?v=4?s=100" width="100px;" alt=""/><br /><sub><b>karimmozilla</b></sub></a><br /><a href="https://github.com/pypeclub/OpenPype/commits?author=karimmozilla" title="Code">💻</a></td>
<td align="center"><a href="https://github.com/Allan-I"><img src="https://avatars.githubusercontent.com/u/76656700?v=4?s=100" width="100px;" alt=""/><br /><sub><b>Allan I. A.</b></sub></a><br /><a href="https://github.com/pypeclub/OpenPype/commits?author=Allan-I" title="Code">💻</a></td>
<td align="center"><a href="https://www.linkedin.com/in/mmuurrpphhyy/"><img src="https://avatars.githubusercontent.com/u/352795?v=4?s=100" width="100px;" alt=""/><br /><sub><b>murphy</b></sub></a><br /><a href="https://github.com/pypeclub/OpenPype/commits?author=m-u-r-p-h-y" title="Code">💻</a> <a href="https://github.com/pypeclub/OpenPype/pulls?q=is%3Apr+reviewed-by%3Am-u-r-p-h-y" title="Reviewed Pull Requests">👀</a> <a href="#userTesting-m-u-r-p-h-y" title="User Testing">📓</a> <a href="https://github.com/pypeclub/OpenPype/commits?author=m-u-r-p-h-y" title="Documentation">📖</a> <a href="#projectManagement-m-u-r-p-h-y" title="Project Management">📆</a></td>
<td align="center"><a href="https://github.com/aardschok"><img src="https://avatars.githubusercontent.com/u/26920875?v=4?s=100" width="100px;" alt=""/><br /><sub><b>Wijnand Koreman</b></sub></a><br /><a href="https://github.com/pypeclub/OpenPype/commits?author=aardschok" title="Code">💻</a></td>
</tr>
<tr>
<td align="center"><a href="http://jedimaster.cnblogs.com/"><img src="https://avatars.githubusercontent.com/u/1798206?v=4?s=100" width="100px;" alt=""/><br /><sub><b>Bo Zhou</b></sub></a><br /><a href="https://github.com/pypeclub/OpenPype/commits?author=zhoub" title="Code">💻</a></td>
<td align="center"><a href="https://www.linkedin.com/in/clementhector/"><img src="https://avatars.githubusercontent.com/u/7068597?v=4?s=100" width="100px;" alt=""/><br /><sub><b>Clément Hector</b></sub></a><br /><a href="https://github.com/pypeclub/OpenPype/commits?author=ClementHector" title="Code">💻</a> <a href="https://github.com/pypeclub/OpenPype/pulls?q=is%3Apr+reviewed-by%3AClementHector" title="Reviewed Pull Requests">👀</a></td>
<td align="center"><a href="https://twitter.com/davidlatwe"><img src="https://avatars.githubusercontent.com/u/3357009?v=4?s=100" width="100px;" alt=""/><br /><sub><b>David Lai</b></sub></a><br /><a href="https://github.com/pypeclub/OpenPype/commits?author=davidlatwe" title="Code">💻</a> <a href="https://github.com/pypeclub/OpenPype/pulls?q=is%3Apr+reviewed-by%3Adavidlatwe" title="Reviewed Pull Requests">👀</a></td>
<td align="center"><a href="https://github.com/2-REC"><img src="https://avatars.githubusercontent.com/u/42170307?v=4?s=100" width="100px;" alt=""/><br /><sub><b>Derek </b></sub></a><br /><a href="https://github.com/pypeclub/OpenPype/commits?author=2-REC" title="Code">💻</a> <a href="https://github.com/pypeclub/OpenPype/commits?author=2-REC" title="Documentation">📖</a></td>
<td align="center"><a href="https://github.com/gabormarinov"><img src="https://avatars.githubusercontent.com/u/8620515?v=4?s=100" width="100px;" alt=""/><br /><sub><b>Gábor Marinov</b></sub></a><br /><a href="https://github.com/pypeclub/OpenPype/commits?author=gabormarinov" title="Code">💻</a> <a href="https://github.com/pypeclub/OpenPype/commits?author=gabormarinov" title="Documentation">📖</a></td>
<td align="center"><a href="https://github.com/icyvapor"><img src="https://avatars.githubusercontent.com/u/1195278?v=4?s=100" width="100px;" alt=""/><br /><sub><b>icyvapor</b></sub></a><br /><a href="https://github.com/pypeclub/OpenPype/commits?author=icyvapor" title="Code">💻</a> <a href="https://github.com/pypeclub/OpenPype/commits?author=icyvapor" title="Documentation">📖</a></td>
<td align="center"><a href="https://github.com/jlorrain"><img src="https://avatars.githubusercontent.com/u/7955673?v=4?s=100" width="100px;" alt=""/><br /><sub><b>Jérôme LORRAIN</b></sub></a><br /><a href="https://github.com/pypeclub/OpenPype/commits?author=jlorrain" title="Code">💻</a></td>
</tr>
<tr>
<td align="center"><a href="https://github.com/dmo-j-cube"><img src="https://avatars.githubusercontent.com/u/89823400?v=4?s=100" width="100px;" alt=""/><br /><sub><b>David Morris-Oliveros</b></sub></a><br /><a href="https://github.com/pypeclub/OpenPype/commits?author=dmo-j-cube" title="Code">💻</a></td>
<td align="center"><a href="https://github.com/BenoitConnan"><img src="https://avatars.githubusercontent.com/u/82808268?v=4?s=100" width="100px;" alt=""/><br /><sub><b>BenoitConnan</b></sub></a><br /><a href="https://github.com/pypeclub/OpenPype/commits?author=BenoitConnan" title="Code">💻</a></td>
<td align="center"><a href="https://github.com/Malthaldar"><img src="https://avatars.githubusercontent.com/u/33671694?v=4?s=100" width="100px;" alt=""/><br /><sub><b>Malthaldar</b></sub></a><br /><a href="https://github.com/pypeclub/OpenPype/commits?author=Malthaldar" title="Code">💻</a></td>
<td align="center"><a href="http://www.svenneve.com/"><img src="https://avatars.githubusercontent.com/u/2472863?v=4?s=100" width="100px;" alt=""/><br /><sub><b>Sven Neve</b></sub></a><br /><a href="https://github.com/pypeclub/OpenPype/commits?author=svenneve" title="Code">💻</a></td>
<td align="center"><a href="https://github.com/zafrs"><img src="https://avatars.githubusercontent.com/u/26890002?v=4?s=100" width="100px;" alt=""/><br /><sub><b>zafrs</b></sub></a><br /><a href="https://github.com/pypeclub/OpenPype/commits?author=zafrs" title="Code">💻</a></td>
<td align="center"><a href="http://felixdavid.com/"><img src="https://avatars.githubusercontent.com/u/22875539?v=4?s=100" width="100px;" alt=""/><br /><sub><b>Félix David</b></sub></a><br /><a href="https://github.com/pypeclub/OpenPype/commits?author=Tilix4" title="Code">💻</a> <a href="https://github.com/pypeclub/OpenPype/commits?author=Tilix4" title="Documentation">📖</a></td>
</tr>
<tbody>
<tr>
<td align="center" valign="top" width="14.28%"><a href="http://pype.club/"><img src="https://avatars.githubusercontent.com/u/3333008?v=4?s=100" width="100px;" alt="Milan Kolar"/><br /><sub><b>Milan Kolar</b></sub></a><br /><a href="https://github.com/ynput/OpenPype/commits?author=mkolar" title="Code">💻</a> <a href="https://github.com/ynput/OpenPype/commits?author=mkolar" title="Documentation">📖</a> <a href="#infra-mkolar" title="Infrastructure (Hosting, Build-Tools, etc)">🚇</a> <a href="#business-mkolar" title="Business development">💼</a> <a href="#content-mkolar" title="Content">🖋</a> <a href="#fundingFinding-mkolar" title="Funding Finding">🔍</a> <a href="#maintenance-mkolar" title="Maintenance">🚧</a> <a href="#projectManagement-mkolar" title="Project Management">📆</a> <a href="https://github.com/ynput/OpenPype/pulls?q=is%3Apr+reviewed-by%3Amkolar" title="Reviewed Pull Requests">👀</a> <a href="#mentoring-mkolar" title="Mentoring">🧑‍🏫</a> <a href="#question-mkolar" title="Answering Questions">💬</a></td>
<td align="center" valign="top" width="14.28%"><a href="https://www.linkedin.com/in/jakubjezek79"><img src="https://avatars.githubusercontent.com/u/40640033?v=4?s=100" width="100px;" alt="Jakub Ježek"/><br /><sub><b>Jakub Ježek</b></sub></a><br /><a href="https://github.com/ynput/OpenPype/commits?author=jakubjezek001" title="Code">💻</a> <a href="https://github.com/ynput/OpenPype/commits?author=jakubjezek001" title="Documentation">📖</a> <a href="#infra-jakubjezek001" title="Infrastructure (Hosting, Build-Tools, etc)">🚇</a> <a href="#content-jakubjezek001" title="Content">🖋</a> <a href="https://github.com/ynput/OpenPype/pulls?q=is%3Apr+reviewed-by%3Ajakubjezek001" title="Reviewed Pull Requests">👀</a> <a href="#maintenance-jakubjezek001" title="Maintenance">🚧</a> <a href="#mentoring-jakubjezek001" title="Mentoring">🧑‍🏫</a> <a href="#projectManagement-jakubjezek001" title="Project Management">📆</a> <a href="#question-jakubjezek001" title="Answering Questions">💬</a></td>
<td align="center" valign="top" width="14.28%"><a href="https://github.com/antirotor"><img src="https://avatars.githubusercontent.com/u/33513211?v=4?s=100" width="100px;" alt="Ondřej Samohel"/><br /><sub><b>Ondřej Samohel</b></sub></a><br /><a href="https://github.com/ynput/OpenPype/commits?author=antirotor" title="Code">💻</a> <a href="https://github.com/ynput/OpenPype/commits?author=antirotor" title="Documentation">📖</a> <a href="#infra-antirotor" title="Infrastructure (Hosting, Build-Tools, etc)">🚇</a> <a href="#content-antirotor" title="Content">🖋</a> <a href="https://github.com/ynput/OpenPype/pulls?q=is%3Apr+reviewed-by%3Aantirotor" title="Reviewed Pull Requests">👀</a> <a href="#maintenance-antirotor" title="Maintenance">🚧</a> <a href="#mentoring-antirotor" title="Mentoring">🧑‍🏫</a> <a href="#projectManagement-antirotor" title="Project Management">📆</a> <a href="#question-antirotor" title="Answering Questions">💬</a></td>
<td align="center" valign="top" width="14.28%"><a href="https://github.com/iLLiCiTiT"><img src="https://avatars.githubusercontent.com/u/43494761?v=4?s=100" width="100px;" alt="Jakub Trllo"/><br /><sub><b>Jakub Trllo</b></sub></a><br /><a href="https://github.com/ynput/OpenPype/commits?author=iLLiCiTiT" title="Code">💻</a> <a href="https://github.com/ynput/OpenPype/commits?author=iLLiCiTiT" title="Documentation">📖</a> <a href="#infra-iLLiCiTiT" title="Infrastructure (Hosting, Build-Tools, etc)">🚇</a> <a href="https://github.com/ynput/OpenPype/pulls?q=is%3Apr+reviewed-by%3AiLLiCiTiT" title="Reviewed Pull Requests">👀</a> <a href="#maintenance-iLLiCiTiT" title="Maintenance">🚧</a> <a href="#question-iLLiCiTiT" title="Answering Questions">💬</a></td>
<td align="center" valign="top" width="14.28%"><a href="https://github.com/kalisp"><img src="https://avatars.githubusercontent.com/u/4457962?v=4?s=100" width="100px;" alt="Petr Kalis"/><br /><sub><b>Petr Kalis</b></sub></a><br /><a href="https://github.com/ynput/OpenPype/commits?author=kalisp" title="Code">💻</a> <a href="https://github.com/ynput/OpenPype/commits?author=kalisp" title="Documentation">📖</a> <a href="#infra-kalisp" title="Infrastructure (Hosting, Build-Tools, etc)">🚇</a> <a href="https://github.com/ynput/OpenPype/pulls?q=is%3Apr+reviewed-by%3Akalisp" title="Reviewed Pull Requests">👀</a> <a href="#maintenance-kalisp" title="Maintenance">🚧</a> <a href="#question-kalisp" title="Answering Questions">💬</a></td>
<td align="center" valign="top" width="14.28%"><a href="https://github.com/64qam"><img src="https://avatars.githubusercontent.com/u/26925793?v=4?s=100" width="100px;" alt="64qam"/><br /><sub><b>64qam</b></sub></a><br /><a href="https://github.com/ynput/OpenPype/commits?author=64qam" title="Code">💻</a> <a href="https://github.com/ynput/OpenPype/pulls?q=is%3Apr+reviewed-by%3A64qam" title="Reviewed Pull Requests">👀</a> <a href="https://github.com/ynput/OpenPype/commits?author=64qam" title="Documentation">📖</a> <a href="#infra-64qam" title="Infrastructure (Hosting, Build-Tools, etc)">🚇</a> <a href="#projectManagement-64qam" title="Project Management">📆</a> <a href="#maintenance-64qam" title="Maintenance">🚧</a> <a href="#content-64qam" title="Content">🖋</a> <a href="#userTesting-64qam" title="User Testing">📓</a></td>
<td align="center" valign="top" width="14.28%"><a href="http://www.colorbleed.nl/"><img src="https://avatars.githubusercontent.com/u/2439881?v=4?s=100" width="100px;" alt="Roy Nieterau"/><br /><sub><b>Roy Nieterau</b></sub></a><br /><a href="https://github.com/ynput/OpenPype/commits?author=BigRoy" title="Code">💻</a> <a href="https://github.com/ynput/OpenPype/commits?author=BigRoy" title="Documentation">📖</a> <a href="https://github.com/ynput/OpenPype/pulls?q=is%3Apr+reviewed-by%3ABigRoy" title="Reviewed Pull Requests">👀</a> <a href="#mentoring-BigRoy" title="Mentoring">🧑‍🏫</a> <a href="#question-BigRoy" title="Answering Questions">💬</a></td>
</tr>
<tr>
<td align="center" valign="top" width="14.28%"><a href="https://github.com/tokejepsen"><img src="https://avatars.githubusercontent.com/u/1860085?v=4?s=100" width="100px;" alt="Toke Jepsen"/><br /><sub><b>Toke Jepsen</b></sub></a><br /><a href="https://github.com/ynput/OpenPype/commits?author=tokejepsen" title="Code">💻</a> <a href="https://github.com/ynput/OpenPype/commits?author=tokejepsen" title="Documentation">📖</a> <a href="https://github.com/ynput/OpenPype/pulls?q=is%3Apr+reviewed-by%3Atokejepsen" title="Reviewed Pull Requests">👀</a> <a href="#mentoring-tokejepsen" title="Mentoring">🧑‍🏫</a> <a href="#question-tokejepsen" title="Answering Questions">💬</a></td>
<td align="center" valign="top" width="14.28%"><a href="https://github.com/jrsndl"><img src="https://avatars.githubusercontent.com/u/45896205?v=4?s=100" width="100px;" alt="Jiri Sindelar"/><br /><sub><b>Jiri Sindelar</b></sub></a><br /><a href="https://github.com/ynput/OpenPype/commits?author=jrsndl" title="Code">💻</a> <a href="https://github.com/ynput/OpenPype/pulls?q=is%3Apr+reviewed-by%3Ajrsndl" title="Reviewed Pull Requests">👀</a> <a href="https://github.com/ynput/OpenPype/commits?author=jrsndl" title="Documentation">📖</a> <a href="#content-jrsndl" title="Content">🖋</a> <a href="#tutorial-jrsndl" title="Tutorials"></a> <a href="#userTesting-jrsndl" title="User Testing">📓</a></td>
<td align="center" valign="top" width="14.28%"><a href="https://barbierisimone.com/"><img src="https://avatars.githubusercontent.com/u/1087869?v=4?s=100" width="100px;" alt="Simone Barbieri"/><br /><sub><b>Simone Barbieri</b></sub></a><br /><a href="https://github.com/ynput/OpenPype/commits?author=simonebarbieri" title="Code">💻</a> <a href="https://github.com/ynput/OpenPype/commits?author=simonebarbieri" title="Documentation">📖</a></td>
<td align="center" valign="top" width="14.28%"><a href="http://karimmozilla.xyz/"><img src="https://avatars.githubusercontent.com/u/82811760?v=4?s=100" width="100px;" alt="karimmozilla"/><br /><sub><b>karimmozilla</b></sub></a><br /><a href="https://github.com/ynput/OpenPype/commits?author=karimmozilla" title="Code">💻</a></td>
<td align="center" valign="top" width="14.28%"><a href="https://github.com/Allan-I"><img src="https://avatars.githubusercontent.com/u/76656700?v=4?s=100" width="100px;" alt="Allan I. A."/><br /><sub><b>Allan I. A.</b></sub></a><br /><a href="https://github.com/ynput/OpenPype/commits?author=Allan-I" title="Code">💻</a></td>
<td align="center" valign="top" width="14.28%"><a href="https://www.linkedin.com/in/mmuurrpphhyy/"><img src="https://avatars.githubusercontent.com/u/352795?v=4?s=100" width="100px;" alt="murphy"/><br /><sub><b>murphy</b></sub></a><br /><a href="https://github.com/ynput/OpenPype/commits?author=m-u-r-p-h-y" title="Code">💻</a> <a href="https://github.com/ynput/OpenPype/pulls?q=is%3Apr+reviewed-by%3Am-u-r-p-h-y" title="Reviewed Pull Requests">👀</a> <a href="#userTesting-m-u-r-p-h-y" title="User Testing">📓</a> <a href="https://github.com/ynput/OpenPype/commits?author=m-u-r-p-h-y" title="Documentation">📖</a> <a href="#projectManagement-m-u-r-p-h-y" title="Project Management">📆</a></td>
<td align="center" valign="top" width="14.28%"><a href="https://github.com/aardschok"><img src="https://avatars.githubusercontent.com/u/26920875?v=4?s=100" width="100px;" alt="Wijnand Koreman"/><br /><sub><b>Wijnand Koreman</b></sub></a><br /><a href="https://github.com/ynput/OpenPype/commits?author=aardschok" title="Code">💻</a></td>
</tr>
<tr>
<td align="center" valign="top" width="14.28%"><a href="http://jedimaster.cnblogs.com/"><img src="https://avatars.githubusercontent.com/u/1798206?v=4?s=100" width="100px;" alt="Bo Zhou"/><br /><sub><b>Bo Zhou</b></sub></a><br /><a href="https://github.com/ynput/OpenPype/commits?author=zhoub" title="Code">💻</a></td>
<td align="center" valign="top" width="14.28%"><a href="https://www.linkedin.com/in/clementhector/"><img src="https://avatars.githubusercontent.com/u/7068597?v=4?s=100" width="100px;" alt="Clément Hector"/><br /><sub><b>Clément Hector</b></sub></a><br /><a href="https://github.com/ynput/OpenPype/commits?author=ClementHector" title="Code">💻</a> <a href="https://github.com/ynput/OpenPype/pulls?q=is%3Apr+reviewed-by%3AClementHector" title="Reviewed Pull Requests">👀</a></td>
<td align="center" valign="top" width="14.28%"><a href="https://twitter.com/davidlatwe"><img src="https://avatars.githubusercontent.com/u/3357009?v=4?s=100" width="100px;" alt="David Lai"/><br /><sub><b>David Lai</b></sub></a><br /><a href="https://github.com/ynput/OpenPype/commits?author=davidlatwe" title="Code">💻</a> <a href="https://github.com/ynput/OpenPype/pulls?q=is%3Apr+reviewed-by%3Adavidlatwe" title="Reviewed Pull Requests">👀</a></td>
<td align="center" valign="top" width="14.28%"><a href="https://github.com/2-REC"><img src="https://avatars.githubusercontent.com/u/42170307?v=4?s=100" width="100px;" alt="Derek "/><br /><sub><b>Derek </b></sub></a><br /><a href="https://github.com/ynput/OpenPype/commits?author=2-REC" title="Code">💻</a> <a href="https://github.com/ynput/OpenPype/commits?author=2-REC" title="Documentation">📖</a></td>
<td align="center" valign="top" width="14.28%"><a href="https://github.com/gabormarinov"><img src="https://avatars.githubusercontent.com/u/8620515?v=4?s=100" width="100px;" alt="Gábor Marinov"/><br /><sub><b>Gábor Marinov</b></sub></a><br /><a href="https://github.com/ynput/OpenPype/commits?author=gabormarinov" title="Code">💻</a> <a href="https://github.com/ynput/OpenPype/commits?author=gabormarinov" title="Documentation">📖</a></td>
<td align="center" valign="top" width="14.28%"><a href="https://github.com/icyvapor"><img src="https://avatars.githubusercontent.com/u/1195278?v=4?s=100" width="100px;" alt="icyvapor"/><br /><sub><b>icyvapor</b></sub></a><br /><a href="https://github.com/ynput/OpenPype/commits?author=icyvapor" title="Code">💻</a> <a href="https://github.com/ynput/OpenPype/commits?author=icyvapor" title="Documentation">📖</a></td>
<td align="center" valign="top" width="14.28%"><a href="https://github.com/jlorrain"><img src="https://avatars.githubusercontent.com/u/7955673?v=4?s=100" width="100px;" alt="Jérôme LORRAIN"/><br /><sub><b>Jérôme LORRAIN</b></sub></a><br /><a href="https://github.com/ynput/OpenPype/commits?author=jlorrain" title="Code">💻</a></td>
</tr>
<tr>
<td align="center" valign="top" width="14.28%"><a href="https://github.com/dmo-j-cube"><img src="https://avatars.githubusercontent.com/u/89823400?v=4?s=100" width="100px;" alt="David Morris-Oliveros"/><br /><sub><b>David Morris-Oliveros</b></sub></a><br /><a href="https://github.com/ynput/OpenPype/commits?author=dmo-j-cube" title="Code">💻</a></td>
<td align="center" valign="top" width="14.28%"><a href="https://github.com/BenoitConnan"><img src="https://avatars.githubusercontent.com/u/82808268?v=4?s=100" width="100px;" alt="BenoitConnan"/><br /><sub><b>BenoitConnan</b></sub></a><br /><a href="https://github.com/ynput/OpenPype/commits?author=BenoitConnan" title="Code">💻</a></td>
<td align="center" valign="top" width="14.28%"><a href="https://github.com/Malthaldar"><img src="https://avatars.githubusercontent.com/u/33671694?v=4?s=100" width="100px;" alt="Malthaldar"/><br /><sub><b>Malthaldar</b></sub></a><br /><a href="https://github.com/ynput/OpenPype/commits?author=Malthaldar" title="Code">💻</a></td>
<td align="center" valign="top" width="14.28%"><a href="http://www.svenneve.com/"><img src="https://avatars.githubusercontent.com/u/2472863?v=4?s=100" width="100px;" alt="Sven Neve"/><br /><sub><b>Sven Neve</b></sub></a><br /><a href="https://github.com/ynput/OpenPype/commits?author=svenneve" title="Code">💻</a></td>
<td align="center" valign="top" width="14.28%"><a href="https://github.com/zafrs"><img src="https://avatars.githubusercontent.com/u/26890002?v=4?s=100" width="100px;" alt="zafrs"/><br /><sub><b>zafrs</b></sub></a><br /><a href="https://github.com/ynput/OpenPype/commits?author=zafrs" title="Code">💻</a></td>
<td align="center" valign="top" width="14.28%"><a href="http://felixdavid.com/"><img src="https://avatars.githubusercontent.com/u/22875539?v=4?s=100" width="100px;" alt="Félix David"/><br /><sub><b>Félix David</b></sub></a><br /><a href="https://github.com/ynput/OpenPype/commits?author=Tilix4" title="Code">💻</a> <a href="https://github.com/ynput/OpenPype/commits?author=Tilix4" title="Documentation">📖</a></td>
<td align="center" valign="top" width="14.28%"><a href="http://abogomolov.com"><img src="https://avatars.githubusercontent.com/u/11698866?v=4?s=100" width="100px;" alt="Alexey Bogomolov"/><br /><sub><b>Alexey Bogomolov</b></sub></a><br /><a href="https://github.com/ynput/OpenPype/commits?author=movalex" title="Code">💻</a></td>
</tr>
</tbody>
</table>
<!-- markdownlint-restore -->

View file

@ -855,12 +855,13 @@ def get_output_link_versions(project_name, version_id, fields=None):
return conn.find(query_filter, _prepare_fields(fields))
def get_last_versions(project_name, subset_ids, fields=None):
def get_last_versions(project_name, subset_ids, active=None, fields=None):
"""Latest versions for entered subset_ids.
Args:
project_name (str): Name of project where to look for queried entities.
subset_ids (Iterable[Union[str, ObjectId]]): List of subset ids.
active (Optional[bool]): If True only active versions are returned.
fields (Optional[Iterable[str]]): Fields that should be returned. All
fields are returned if 'None' is passed.
@ -899,12 +900,21 @@ def get_last_versions(project_name, subset_ids, fields=None):
if name_needed:
group_item["name"] = {"$last": "$name"}
aggregate_filter = {
"type": "version",
"parent": {"$in": subset_ids}
}
if active is False:
aggregate_filter["data.active"] = active
elif active is True:
aggregate_filter["$or"] = [
{"data.active": {"$exists": 0}},
{"data.active": active},
]
aggregation_pipeline = [
# Find all versions of those subsets
{"$match": {
"type": "version",
"parent": {"$in": subset_ids}
}},
{"$match": aggregate_filter},
# Sorting versions all together
{"$sort": {"name": 1}},
# Group them by "parent", but only take the last

View file

@ -220,7 +220,6 @@ def new_representation_doc(
"parent": version_id,
"name": name,
"data": data,
# Imprint shortcut to context for performance reasons.
"context": context
}
@ -708,7 +707,11 @@ class OperationsSession(object):
return operation
def create_project(project_name, project_code, library_project=False):
def create_project(
project_name,
project_code,
library_project=False,
):
"""Create project using OpenPype settings.
This project creation function is not validating project document on
@ -752,7 +755,7 @@ def create_project(project_name, project_code, library_project=False):
"name": project_name,
"data": {
"code": project_code,
"library_project": library_project
"library_project": library_project,
},
"schema": CURRENT_PROJECT_SCHEMA
}

View file

@ -1,37 +0,0 @@
from openpype.lib import PreLaunchHook
from openpype.pipeline.colorspace import get_imageio_config
from openpype.pipeline.template_data import get_template_data
class PreLaunchHostSetOCIO(PreLaunchHook):
"""Set OCIO environment for the host"""
order = 0
app_groups = ["substancepainter"]
def execute(self):
"""Hook entry method."""
anatomy_data = get_template_data(
project_doc=self.data["project_doc"],
asset_doc=self.data["asset_doc"],
task_name=self.data["task_name"],
host_name=self.host_name,
system_settings=self.data["system_settings"]
)
ocio_config = get_imageio_config(
project_name=self.data["project_doc"]["name"],
host_name=self.host_name,
project_settings=self.data["project_settings"],
anatomy_data=anatomy_data,
anatomy=self.data["anatomy"]
)
if ocio_config:
ocio_path = ocio_config["path"]
self.log.info(f"Setting OCIO config path: {ocio_path}")
self.launch_context.env["OCIO"] = ocio_path
else:
self.log.debug("OCIO not set or enabled")

View file

@ -1,12 +1,27 @@
from openpype.lib import PreLaunchHook
from openpype.pipeline.colorspace import get_imageio_config
from openpype.pipeline.colorspace import (
get_imageio_config
)
from openpype.pipeline.template_data import get_template_data_with_names
class FusionPreLaunchOCIO(PreLaunchHook):
"""Set OCIO environment variable for Fusion"""
app_groups = ["fusion"]
class OCIOEnvHook(PreLaunchHook):
"""Set OCIO environment variable for hosts that use OpenColorIO."""
order = 0
hosts = [
"substancepainter",
"fusion",
"blender",
"aftereffects",
"max",
"houdini",
"maya",
"nuke",
"hiero",
"resolve"
]
def execute(self):
"""Hook entry method."""
@ -26,7 +41,13 @@ class FusionPreLaunchOCIO(PreLaunchHook):
anatomy_data=template_data,
anatomy=self.data["anatomy"]
)
ocio_path = config_data["path"]
self.log.info(f"Setting OCIO config path: {ocio_path}")
self.launch_context.env["OCIO"] = ocio_path
if config_data:
ocio_path = config_data["path"]
self.log.info(
f"Setting OCIO environment to config path: {ocio_path}")
self.launch_context.env["OCIO"] = ocio_path
else:
self.log.debug("OCIO not set or enabled")

View file

@ -1,5 +1,5 @@
<?xml version="1.0" encoding="UTF-8"?>
<ExtensionManifest Version="8.0" ExtensionBundleId="com.openpype.AE.panel" ExtensionBundleVersion="1.0.25"
<ExtensionManifest Version="8.0" ExtensionBundleId="com.openpype.AE.panel" ExtensionBundleVersion="1.0.26"
ExtensionBundleName="com.openpype.AE.panel" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">
<ExtensionList>
<Extension Id="com.openpype.AE.panel" Version="1.0" />

View file

@ -104,6 +104,39 @@
});
</script>
<script type=text/javascript>
$(function() {
$("a#create-placeholder-button").bind("click", function() {
RPC.call('AfterEffects.create_placeholder_route').then(function (data) {
}, function (error) {
alert(error);
});
});
});
</script>
<script type=text/javascript>
$(function() {
$("a#update-placeholder-button").bind("click", function() {
RPC.call('AfterEffects.update_placeholder_route').then(function (data) {
}, function (error) {
alert(error);
});
});
});
</script>
<script type=text/javascript>
$(function() {
$("a#build-workfile-button").bind("click", function() {
RPC.call('AfterEffects.build_workfile_template_route').then(function (data) {
}, function (error) {
alert(error);
});
});
});
</script>
<script type=text/javascript>
$(function() {
$("a#experimental-button").bind("click", function() {
@ -127,9 +160,15 @@
<div><a href=# id=loader-button><button class="hostFontSize">Load...</button></a></div>
<div><a href=# id=publish-button><button class="hostFontSize">Publish...</button></a></div>
<div><a href=# id=sceneinventory-button><button class="hostFontSize">Manage...</button></a></div>
<div><a href=# id=separator0><button class="hostFontSize">&nbsp;</button></a></div>
<div><a href=# id=setresolution-button><button class="hostFontSize">Set Resolution</button></a></div>
<div><a href=# id=setframes-button><button class="hostFontSize">Set Frame Range</button></a></div>
<div><a href=# id=setall-button><button class="hostFontSize">Apply All Settings</button></a></div>
<div><a href=# id=separator1><button class="hostFontSize">&nbsp;</button></a></div>
<div><a href=# id=create-placeholder-button><button class="hostFontSize">Create placeholder</button></a></div>
<div><a href=# id=update-placeholder-button><button class="hostFontSize">Update placeholder</button></a></div>
<div><a href=# id=build-workfile-button><button class="hostFontSize">Build Workfile from template</button></a></div>
<div><a href=# id=separator3><button class="hostFontSize">&nbsp;</button></a></div>
<div><a href=# id=experimental-button><button class="hostFontSize">Experimental Tools...</button></a></div>
</div>

View file

@ -107,6 +107,17 @@ function main(websocket_url){
});
});
RPC.addRoute('AfterEffects.add_item', function (data) {
log.warn('Server called client route "add_item":', data);
var escapedName = EscapeStringForJSX(data.name);
return runEvalScript("addItem('" + escapedName +"', " +
"'" + data.item_type + "')")
.then(function(result){
log.warn("get_items: " + result);
return result;
});
});
RPC.addRoute('AfterEffects.get_items', function (data) {
log.warn('Server called client route "get_items":', data);
return runEvalScript("getItems(" + data.comps + "," +
@ -118,6 +129,15 @@ function main(websocket_url){
});
});
RPC.addRoute('AfterEffects.select_items', function (data) {
log.warn('Server called client route "select_items":', data);
return runEvalScript("selectItems(" + JSON.stringify(data.items) + ")")
.then(function(result){
log.warn("select_items: " + result);
return result;
});
});
RPC.addRoute('AfterEffects.get_selected_items', function (data) {
log.warn('Server called client route "get_selected_items":', data);
@ -280,7 +300,7 @@ function main(websocket_url){
RPC.addRoute('AfterEffects.add_item_as_layer', function (data) {
log.warn('Server called client route "add_item_as_layer":', data);
return runEvalScript("addItemAsLayerToComp(" + data.comp_id + ", " +
data.item_id + "," +
data.item_id + "," +
" null )")
.then(function(result){
log.warn("addItemAsLayerToComp: " + result);
@ -288,6 +308,16 @@ function main(websocket_url){
});
});
RPC.addRoute('AfterEffects.add_item_instead_placeholder', function (data) {
log.warn('Server called client route "add_item_instead_placeholder":', data);
return runEvalScript("addItemInstead(" + data.placeholder_item_id + ", " +
data.item_id + ")")
.then(function(result){
log.warn("add_item_instead_placeholder: " + result);
return result;
});
});
RPC.addRoute('AfterEffects.render', function (data) {
log.warn('Server called client route "render":', data);
var escapedPath = EscapeStringForJSX(data.folder_url);
@ -312,6 +342,20 @@ function main(websocket_url){
});
});
RPC.addRoute('AfterEffects.add_placeholder', function (data) {
log.warn('Server called client route "add_placeholder":', data);
var escapedName = EscapeStringForJSX(data.name);
return runEvalScript("addPlaceholder('" + escapedName +"',"+
data.width + ',' +
data.height + ',' +
data.fps + ',' +
data.duration + ")")
.then(function(result){
log.warn("add_placeholder: " + result);
return result;
});
});
RPC.addRoute('AfterEffects.close', function (data) {
log.warn('Server called client route "close":', data);
return runEvalScript("close()");

View file

@ -112,6 +112,32 @@ function getActiveDocumentFullName(){
return _prepareError("No file open currently");
}
function addItem(name, item_type){
/**
* Adds comp or folder to project items.
*
* Could be called when creating publishable instance to prepare
* composition (and render queue).
*
* Args:
* name (str): composition name
* item_type (str): COMP|FOLDER
* Returns:
* SingleItemValue: eg {"result": VALUE}
*/
if (item_type == "COMP"){
// dummy values, will be rewritten later
item = app.project.items.addComp(name, 1920, 1060, 1, 10, 25);
}else if (item_type == "FOLDER"){
item = app.project.items.addFolder(name);
}else{
return _prepareError("Only 'COMP' or 'FOLDER' can be created");
}
return _prepareSingleValue(item.id);
}
function getItems(comps, folders, footages){
/**
* Returns JSON representation of compositions and
@ -139,6 +165,24 @@ function getItems(comps, folders, footages){
}
function selectItems(items){
/**
* Select all items from `items`, deselect other.
*
* Args:
* items (list)
*/
for (i = 1; i <= app.project.items.length; ++i){
item = app.project.items[i];
if (items.indexOf(item.id) > -1){
item.selected = true;
}else{
item.selected = false;
}
}
}
function getSelectedItems(comps, folders, footages){
/**
* Returns list of selected items from Project menu
@ -280,12 +324,12 @@ function setLabelColor(comp_id, color_idx){
}
}
function replaceItem(comp_id, path, item_name){
function replaceItem(item_id, path, item_name){
/**
* Replaces loaded file with new file and updates name
*
* Args:
* comp_id (int): id of composition, not a index!
* item_id (int): id of composition, not a index!
* path (string): absolute path to new file
* item_name (string): new composition name
*/
@ -295,7 +339,7 @@ function replaceItem(comp_id, path, item_name){
if (!fp.exists){
return _prepareError("File " + path + " not found.");
}
var item = app.project.itemByID(comp_id);
var item = app.project.itemByID(item_id);
if (item){
try{
if (isFileSequence(item)) {
@ -311,7 +355,7 @@ function replaceItem(comp_id, path, item_name){
fp.close();
}
}else{
return _prepareError("There is no composition with "+ comp_id);
return _prepareError("There is no item with "+ item_id);
}
app.endUndoGroup();
}
@ -821,6 +865,67 @@ function printMsg(msg){
alert(msg);
}
function addPlaceholder(name, width, height, fps, duration){
/** Add AE PlaceholderItem to Project list.
*
* PlaceholderItem chosen as it doesn't require existing file and
* might potentially allow nice functionality in the future.
*
*/
app.beginUndoGroup('change comp properties');
try{
item = app.project.importPlaceholder(name, width, height,
fps, duration);
return _prepareSingleValue(item.id);
}catch (error) {
writeLn(_prepareError("Cannot add placeholder " + error.toString()));
}
app.endUndoGroup();
}
function addItemInstead(placeholder_item_id, item_id){
/** Add new loaded item in place of load placeholder.
*
* Each placeholder could be placed multiple times into multiple
* composition. This loops through all compositions and
* places loaded item under placeholder.
* Placeholder item gets deleted later separately according
* to configuration in Settings.
*
* Args:
* placeholder_item_id (int)
* item_id (int)
*/
var item = app.project.itemByID(item_id);
if (!item){
return _prepareError("There is no item with "+ item_id);
}
app.beginUndoGroup('Add loaded items');
for (i = 1; i <= app.project.items.length; ++i){
var comp = app.project.items[i];
if (!(comp instanceof CompItem)){
continue
}
var i = 1;
while (i <= comp.numLayers) {
var layer = comp.layer(i);
var layer_source = layer.source;
if (layer_source && layer_source.id == placeholder_item_id){
var new_layer = comp.layers.add(item);
new_layer.moveAfter(layer);
// copy all(?) properties to new layer
layer.property("ADBE Transform Group").copyToComp(new_layer);
i = i + 1;
}
i = i + 1;
}
}
app.endUndoGroup();
}
function _prepareSingleValue(value){
return JSON.stringify({"result": value})
}

View file

@ -357,3 +357,33 @@ class AfterEffectsRoute(WebSocketRoute):
# Required return statement.
return "nothing"
def create_placeholder_route(self):
from openpype.hosts.aftereffects.api.workfile_template_builder import \
create_placeholder
partial_method = functools.partial(create_placeholder)
ProcessLauncher.execute_in_main_thread(partial_method)
# Required return statement.
return "nothing"
def update_placeholder_route(self):
from openpype.hosts.aftereffects.api.workfile_template_builder import \
update_placeholder
partial_method = functools.partial(update_placeholder)
ProcessLauncher.execute_in_main_thread(partial_method)
# Required return statement.
return "nothing"
def build_workfile_template_route(self):
from openpype.hosts.aftereffects.api.workfile_template_builder import \
build_workfile_template
partial_method = functools.partial(build_workfile_template)
ProcessLauncher.execute_in_main_thread(partial_method)
# Required return statement.
return "nothing"

View file

@ -10,6 +10,10 @@ from openpype.pipeline import (
register_creator_plugin_path,
AVALON_CONTAINER_ID,
)
from openpype.hosts.aftereffects.api.workfile_template_builder import (
AEPlaceholderLoadPlugin,
AEPlaceholderCreatePlugin
)
from openpype.pipeline.load import any_outdated_containers
import openpype.hosts.aftereffects
@ -116,6 +120,12 @@ class AfterEffectsHost(HostBase, IWorkfileHost, ILoadHost, IPublishHost):
item["id"] = "publish_context"
self.stub.imprint(item["id"], item)
def get_workfile_build_placeholder_plugins(self):
return [
AEPlaceholderLoadPlugin,
AEPlaceholderCreatePlugin
]
# created instances section
def list_instances(self):
"""List all created instances from current workfile which

View file

@ -1,7 +1,11 @@
import six
from abc import ABCMeta
from openpype.pipeline import LoaderPlugin
from .launch_logic import get_stub
@six.add_metaclass(ABCMeta)
class AfterEffectsLoader(LoaderPlugin):
@staticmethod
def get_stub():

View file

@ -0,0 +1,271 @@
import os.path
import uuid
import shutil
from openpype.pipeline import registered_host
from openpype.tools.workfile_template_build import (
WorkfileBuildPlaceholderDialog,
)
from openpype.pipeline.workfile.workfile_template_builder import (
AbstractTemplateBuilder,
PlaceholderPlugin,
LoadPlaceholderItem,
CreatePlaceholderItem,
PlaceholderLoadMixin,
PlaceholderCreateMixin
)
from openpype.hosts.aftereffects.api import get_stub
from openpype.hosts.aftereffects.api.lib import set_settings
PLACEHOLDER_SET = "PLACEHOLDERS_SET"
PLACEHOLDER_ID = "openpype.placeholder"
class AETemplateBuilder(AbstractTemplateBuilder):
"""Concrete implementation of AbstractTemplateBuilder for AE"""
def import_template(self, path):
"""Import template into current scene.
Block if a template is already loaded.
Args:
path (str): A path to current template (usually given by
get_template_preset implementation)
Returns:
bool: Whether the template was successfully imported or not
"""
stub = get_stub()
if not os.path.exists(path):
stub.print_msg(f"Template file on {path} doesn't exist.")
return
stub.save()
workfile_path = stub.get_active_document_full_name()
shutil.copy2(path, workfile_path)
stub.open(workfile_path)
return True
class AEPlaceholderPlugin(PlaceholderPlugin):
"""Contains generic methods for all PlaceholderPlugins."""
def collect_placeholders(self):
"""Collect info from file metadata about created placeholders.
Returns:
(list) (LoadPlaceholderItem)
"""
output = []
scene_placeholders = self._collect_scene_placeholders()
for item in scene_placeholders:
if item.get("plugin_identifier") != self.identifier:
continue
if isinstance(self, AEPlaceholderLoadPlugin):
item = LoadPlaceholderItem(item["uuid"],
item["data"],
self)
elif isinstance(self, AEPlaceholderCreatePlugin):
item = CreatePlaceholderItem(item["uuid"],
item["data"],
self)
else:
raise NotImplementedError(f"Not implemented for {type(self)}")
output.append(item)
return output
def update_placeholder(self, placeholder_item, placeholder_data):
"""Resave changed properties for placeholders"""
item_id, metadata_item = self._get_item(placeholder_item)
stub = get_stub()
if not item_id:
stub.print_msg("Cannot find item for "
f"{placeholder_item.scene_identifier}")
return
metadata_item["data"] = placeholder_data
stub.imprint(item_id, metadata_item)
def _get_item(self, placeholder_item):
"""Returns item id and item metadata for placeholder from file meta"""
stub = get_stub()
placeholder_uuid = placeholder_item.scene_identifier
for metadata_item in stub.get_metadata():
if not metadata_item.get("is_placeholder"):
continue
if placeholder_uuid in metadata_item.get("uuid"):
return metadata_item["members"][0], metadata_item
return None, None
def _collect_scene_placeholders(self):
"""" Cache placeholder data to shared data.
Returns:
(list) of dicts
"""
placeholder_items = self.builder.get_shared_populate_data(
"placeholder_items"
)
if not placeholder_items:
placeholder_items = []
for item in get_stub().get_metadata():
if not item.get("is_placeholder"):
continue
placeholder_items.append(item)
self.builder.set_shared_populate_data(
"placeholder_items", placeholder_items
)
return placeholder_items
def _imprint_item(self, item_id, name, placeholder_data, stub):
if not item_id:
raise ValueError("Couldn't create a placeholder")
container_data = {
"id": "openpype.placeholder",
"name": name,
"is_placeholder": True,
"plugin_identifier": self.identifier,
"uuid": str(uuid.uuid4()), # scene_identifier
"data": placeholder_data,
"members": [item_id]
}
stub.imprint(item_id, container_data)
class AEPlaceholderCreatePlugin(AEPlaceholderPlugin, PlaceholderCreateMixin):
"""Adds Create placeholder.
This adds composition and runs Create
"""
identifier = "aftereffects.create"
label = "AfterEffects create"
def create_placeholder(self, placeholder_data):
stub = get_stub()
name = "CREATEPLACEHOLDER"
item_id = stub.add_item(name, "COMP")
self._imprint_item(item_id, name, placeholder_data, stub)
def populate_placeholder(self, placeholder):
"""Replace 'placeholder' with publishable instance.
Renames prepared composition name, creates publishable instance, sets
frame/duration settings according to DB.
"""
pre_create_data = {"use_selection": True}
item_id, item = self._get_item(placeholder)
get_stub().select_items([item_id])
self.populate_create_placeholder(placeholder, pre_create_data)
# apply settings for populated composition
item_id, metadata_item = self._get_item(placeholder)
set_settings(True, True, [item_id])
def get_placeholder_options(self, options=None):
return self.get_create_plugin_options(options)
class AEPlaceholderLoadPlugin(AEPlaceholderPlugin, PlaceholderLoadMixin):
identifier = "aftereffects.load"
label = "AfterEffects load"
def create_placeholder(self, placeholder_data):
"""Creates AE's Placeholder item in Project items list.
Sets dummy resolution/duration/fps settings, will be replaced when
populated.
"""
stub = get_stub()
name = "LOADERPLACEHOLDER"
item_id = stub.add_placeholder(name, 1920, 1060, 25, 10)
self._imprint_item(item_id, name, placeholder_data, stub)
def populate_placeholder(self, placeholder):
"""Use Openpype Loader from `placeholder` to create new FootageItems
New FootageItems are created, files are imported.
"""
self.populate_load_placeholder(placeholder)
errors = placeholder.get_errors()
stub = get_stub()
if errors:
stub.print_msg("\n".join(errors))
else:
if not placeholder.data["keep_placeholder"]:
metadata = stub.get_metadata()
for item in metadata:
if not item.get("is_placeholder"):
continue
scene_identifier = item.get("uuid")
if (scene_identifier and
scene_identifier == placeholder.scene_identifier):
stub.delete_item(item["members"][0])
stub.remove_instance(placeholder.scene_identifier, metadata)
def get_placeholder_options(self, options=None):
return self.get_load_plugin_options(options)
def load_succeed(self, placeholder, container):
placeholder_item_id, _ = self._get_item(placeholder)
item_id = container.id
get_stub().add_item_instead_placeholder(placeholder_item_id, item_id)
def build_workfile_template(*args, **kwargs):
builder = AETemplateBuilder(registered_host())
builder.build_template(*args, **kwargs)
def update_workfile_template(*args):
builder = AETemplateBuilder(registered_host())
builder.rebuild_template()
def create_placeholder(*args):
"""Called when new workile placeholder should be created."""
host = registered_host()
builder = AETemplateBuilder(host)
window = WorkfileBuildPlaceholderDialog(host, builder)
window.exec_()
def update_placeholder(*args):
"""Called after placeholder item is selected to modify it."""
host = registered_host()
builder = AETemplateBuilder(host)
stub = get_stub()
selected_items = stub.get_selected_items(True, True, True)
if len(selected_items) != 1:
stub.print_msg("Please select just 1 placeholder")
return
selected_id = selected_items[0].id
placeholder_item = None
placeholder_items_by_id = {
placeholder_item.scene_identifier: placeholder_item
for placeholder_item in builder.get_placeholders()
}
for metadata_item in stub.get_metadata():
if not metadata_item.get("is_placeholder"):
continue
if selected_id in metadata_item.get("members"):
placeholder_item = placeholder_items_by_id.get(
metadata_item["uuid"])
break
if not placeholder_item:
stub.print_msg("Didn't find placeholder metadata. "
"Remove and re-create placeholder.")
return
window = WorkfileBuildPlaceholderDialog(host, builder)
window.set_update_mode(placeholder_item)
window.exec_()

View file

@ -35,6 +35,8 @@ class AEItem(object):
instance_id = attr.ib(default=None) # New Publisher
width = attr.ib(default=None)
height = attr.ib(default=None)
is_placeholder = attr.ib(default=False)
uuid = attr.ib(default=False)
class AfterEffectsServerStub():
@ -220,6 +222,16 @@ class AfterEffectsServerStub():
)
return self._to_records(self._handle_return(res))
def select_items(self, items):
"""
Select items in Project list
Args:
items (list): of int item ids
"""
self.websocketserver.call(
self.client.call('AfterEffects.select_items', items=items))
def get_selected_items(self, comps, folders=False, footages=False):
"""
Same as get_items but using selected items only
@ -240,6 +252,21 @@ class AfterEffectsServerStub():
)
return self._to_records(self._handle_return(res))
def add_item(self, name, item_type):
"""
Adds either composition or folder to project item list.
Args:
name (str)
item_type (str): COMP|FOLDER
"""
res = self.websocketserver.call(self.client.call
('AfterEffects.add_item',
name=name,
item_type=item_type))
return self._handle_return(res)
def get_item(self, item_id):
"""
Returns metadata for particular 'item_id' or None
@ -316,7 +343,7 @@ class AfterEffectsServerStub():
return self._handle_return(res)
def remove_instance(self, instance_id):
def remove_instance(self, instance_id, metadata=None):
"""
Removes instance with 'instance_id' from file's metadata and
saves them.
@ -328,7 +355,10 @@ class AfterEffectsServerStub():
"""
cleaned_data = []
for instance in self.get_metadata():
if metadata is None:
metadata = self.get_metadata()
for instance in metadata:
inst_id = instance.get("instance_id") or instance.get("uuid")
if inst_id != instance_id:
cleaned_data.append(instance)
@ -534,6 +564,47 @@ class AfterEffectsServerStub():
if records:
return records.pop()
def add_item_instead_placeholder(self, placeholder_item_id, item_id):
"""
Adds item_id to layers where plaeholder_item_id is present.
1 placeholder could result in multiple loaded containers (eg items)
Args:
placeholder_item_id (int): id of placeholder item
item_id (int): loaded FootageItem id
"""
res = self.websocketserver.call(self.client.call
('AfterEffects.add_item_instead_placeholder', # noqa
placeholder_item_id=placeholder_item_id, # noqa
item_id=item_id))
return self._handle_return(res)
def add_placeholder(self, name, width, height, fps, duration):
"""
Adds new FootageItem as a placeholder for workfile builder
Placeholder requires width etc, currently probably only hardcoded
values.
Args:
name (str)
width (int)
height (int)
fps (float)
duration (int)
"""
res = self.websocketserver.call(self.client.call
('AfterEffects.add_placeholder',
name=name,
width=width,
height=height,
fps=fps,
duration=duration))
return self._handle_return(res)
def render(self, folder_url, comp_id):
"""
Render all renderqueueitem to 'folder_url'
@ -632,7 +703,8 @@ class AfterEffectsServerStub():
d.get('file_name'),
d.get("instance_id"),
d.get("width"),
d.get("height"))
d.get("height"),
d.get("is_placeholder"))
ret.append(item)
return ret

View file

@ -1,17 +1,15 @@
import re
from openpype.pipeline import get_representation_path
from openpype.hosts.aftereffects.api import (
AfterEffectsLoader,
containerise
)
from openpype.hosts.aftereffects import api
from openpype.hosts.aftereffects.api.lib import (
get_background_layers,
get_unique_layer_name,
)
class BackgroundLoader(AfterEffectsLoader):
class BackgroundLoader(api.AfterEffectsLoader):
"""
Load images from Background family
Creates for each background separate folder with all imported images
@ -21,6 +19,7 @@ class BackgroundLoader(AfterEffectsLoader):
For each load container is created and stored in project (.aep)
metadata
"""
label = "Load JSON Background"
families = ["background"]
representations = ["json"]
@ -48,7 +47,7 @@ class BackgroundLoader(AfterEffectsLoader):
self[:] = [comp]
namespace = namespace or comp_name
return containerise(
return api.containerise(
name,
namespace,
comp,

View file

@ -1,14 +1,11 @@
import re
from openpype.pipeline import get_representation_path
from openpype.hosts.aftereffects.api import (
AfterEffectsLoader,
containerise
)
from openpype.hosts.aftereffects import api
from openpype.hosts.aftereffects.api.lib import get_unique_layer_name
class FileLoader(AfterEffectsLoader):
class FileLoader(api.AfterEffectsLoader):
"""Load images
Stores the imported asset in a container named after the asset.
@ -64,7 +61,7 @@ class FileLoader(AfterEffectsLoader):
self[:] = [comp]
namespace = namespace or comp_name
return containerise(
return api.containerise(
name,
namespace,
comp,

View file

@ -134,6 +134,27 @@ def append_user_scripts():
traceback.print_exc()
def set_app_templates_path():
# Blender requires the app templates to be in `BLENDER_USER_SCRIPTS`.
# After running Blender, we set that variable to our custom path, so
# that the user can use their custom app templates.
# We look among the scripts paths for one of the paths that contains
# the app templates. The path must contain the subfolder
# `startup/bl_app_templates_user`.
paths = os.environ.get("OPENPYPE_BLENDER_USER_SCRIPTS").split(os.pathsep)
app_templates_path = None
for path in paths:
if os.path.isdir(
os.path.join(path, "startup", "bl_app_templates_user")):
app_templates_path = path
break
if app_templates_path and os.path.isdir(app_templates_path):
os.environ["BLENDER_USER_SCRIPTS"] = app_templates_path
def imprint(node: bpy.types.bpy_struct_meta_idprop, data: Dict):
r"""Write `data` to `node` as userDefined attributes

View file

@ -60,6 +60,7 @@ def install():
register_creator_plugin_path(str(CREATE_PATH))
lib.append_user_scripts()
lib.set_app_templates_path()
register_event_callback("new", on_new)
register_event_callback("open", on_open)

View file

@ -0,0 +1,55 @@
from pathlib import Path
from openpype.lib import PreLaunchHook
class AddPythonScriptToLaunchArgs(PreLaunchHook):
"""Add python script to be executed before Blender launch."""
# Append after file argument
order = 15
app_groups = [
"blender",
]
def execute(self):
if not self.launch_context.data.get("python_scripts"):
return
# Add path to workfile to arguments
for python_script_path in self.launch_context.data["python_scripts"]:
self.log.info(
f"Adding python script {python_script_path} to launch"
)
# Test script path exists
python_script_path = Path(python_script_path)
if not python_script_path.exists():
self.log.warning(
f"Python script {python_script_path} doesn't exist. "
"Skipped..."
)
continue
if "--" in self.launch_context.launch_args:
# Insert before separator
separator_index = self.launch_context.launch_args.index("--")
self.launch_context.launch_args.insert(
separator_index,
"-P",
)
self.launch_context.launch_args.insert(
separator_index + 1,
python_script_path.as_posix(),
)
else:
self.launch_context.launch_args.extend(
["-P", python_script_path.as_posix()]
)
# Ensure separator
if "--" not in self.launch_context.launch_args:
self.launch_context.launch_args.append("--")
self.launch_context.launch_args.extend(
[*self.launch_context.data.get("script_args", [])]
)

View file

@ -10,6 +10,7 @@ from qtpy import QtCore, QtWidgets
from openpype import style
from openpype.lib import Logger, StringTemplate
from openpype.pipeline import LegacyCreator, LoaderPlugin
from openpype.pipeline.colorspace import get_remapped_colorspace_to_native
from openpype.settings import get_current_project_settings
from . import constants
@ -701,6 +702,7 @@ class ClipLoader(LoaderPlugin):
]
_mapping = None
_host_settings = None
def apply_settings(cls, project_settings, system_settings):
@ -769,15 +771,26 @@ class ClipLoader(LoaderPlugin):
Returns:
str: native colorspace name defined in mapping or None
"""
# TODO: rewrite to support only pipeline's remapping
if not cls._host_settings:
cls._host_settings = get_current_project_settings()["flame"]
# [Deprecated] way of remapping
if not cls._mapping:
settings = get_current_project_settings()["flame"]
mapping = settings["imageio"]["profilesMapping"]["inputs"]
mapping = (
cls._host_settings["imageio"]["profilesMapping"]["inputs"])
cls._mapping = {
input["ocioName"]: input["flameName"]
for input in mapping
}
return cls._mapping.get(input_colorspace)
native_name = cls._mapping.get(input_colorspace)
if not native_name:
native_name = get_remapped_colorspace_to_native(
input_colorspace, "flame", cls._host_settings["imageio"])
return native_name
class OpenClipSolver(flib.MediaInfoFile):

View file

@ -47,6 +47,17 @@ class FlamePrelaunch(PreLaunchHook):
imageio_flame = project_settings["flame"]["imageio"]
# Check whether 'enabled' key from host imageio settings exists
# so we can tell if host is using the new colormanagement framework.
# If the 'enabled' isn't found we want 'colormanaged' set to True
# because prior to the key existing we always did colormanagement for
# Flame
colormanaged = imageio_flame.get("enabled")
# if key was not found, set to True
# ensuring backward compatibility
if colormanaged is None:
colormanaged = True
# get user name and host name
user_name = get_openpype_username()
user_name = user_name.replace(".", "_")
@ -68,9 +79,7 @@ class FlamePrelaunch(PreLaunchHook):
"FrameWidth": int(width),
"FrameHeight": int(height),
"AspectRatio": float((width / height) * _db_p_data["pixelAspect"]),
"FrameRate": self._get_flame_fps(fps),
"FrameDepth": str(imageio_flame["project"]["frameDepth"]),
"FieldDominance": str(imageio_flame["project"]["fieldDominance"])
"FrameRate": self._get_flame_fps(fps)
}
data_to_script = {
@ -78,7 +87,6 @@ class FlamePrelaunch(PreLaunchHook):
"host_name": _env.get("FLAME_WIRETAP_HOSTNAME") or hostname,
"volume_name": volume_name,
"group_name": _env.get("FLAME_WIRETAP_GROUP"),
"color_policy": str(imageio_flame["project"]["colourPolicy"]),
# from project
"project_name": project_name,
@ -86,6 +94,16 @@ class FlamePrelaunch(PreLaunchHook):
"project_data": project_data
}
# add color management data
if colormanaged:
project_data.update({
"FrameDepth": str(imageio_flame["project"]["frameDepth"]),
"FieldDominance": str(
imageio_flame["project"]["fieldDominance"])
})
data_to_script["color_policy"] = str(
imageio_flame["project"]["colourPolicy"])
self.log.info(pformat(dict(_env)))
self.log.info(pformat(data_to_script))

View file

@ -21,8 +21,13 @@ from .pipeline import (
reset_selection
)
from .constants import (
OPENPYPE_TAG_NAME,
DEFAULT_SEQUENCE_NAME,
DEFAULT_BIN_NAME
)
from .lib import (
pype_tag_name,
flatten,
get_track_items,
get_current_project,
@ -82,8 +87,12 @@ __all__ = [
"file_extensions",
"work_root",
# Constants
"OPENPYPE_TAG_NAME",
"DEFAULT_SEQUENCE_NAME",
"DEFAULT_BIN_NAME",
# Lib functions
"pype_tag_name",
"flatten",
"get_track_items",
"get_current_project",

View file

@ -0,0 +1,3 @@
OPENPYPE_TAG_NAME = "openpypeData"
DEFAULT_SEQUENCE_NAME = "openpypeSequence"
DEFAULT_BIN_NAME = "openpypeBin"

View file

@ -5,7 +5,6 @@ Host specific functions where host api is connected
from copy import deepcopy
import os
import re
import sys
import platform
import functools
import warnings
@ -23,10 +22,26 @@ except ImportError:
from openpype.client import get_project
from openpype.settings import get_project_settings
from openpype.pipeline import legacy_io, Anatomy
from openpype.pipeline import (
get_current_project_name, legacy_io, Anatomy
)
from openpype.pipeline.load import filter_containers
from openpype.lib import Logger
from . import tags
from .constants import (
OPENPYPE_TAG_NAME,
DEFAULT_SEQUENCE_NAME,
DEFAULT_BIN_NAME
)
from openpype.pipeline.colorspace import (
get_imageio_config
)
class _CTX:
has_been_setup = False
has_menu = False
parent_gui = None
class DeprecatedWarning(DeprecationWarning):
@ -76,23 +91,14 @@ def deprecated(new_destination):
log = Logger.get_logger(__name__)
self = sys.modules[__name__]
self._has_been_setup = False
self._has_menu = False
self._registered_gui = None
self._parent = None
self.pype_tag_name = "openpypeData"
self.default_sequence_name = "openpypeSequence"
self.default_bin_name = "openpypeBin"
def flatten(_list):
for item in _list:
if isinstance(item, (list, tuple)):
for sub_item in flatten(item):
def flatten(list_):
for item_ in list_:
if isinstance(item_, (list, tuple)):
for sub_item in flatten(item_):
yield sub_item
else:
yield item
yield item_
def get_current_project(remove_untitled=False):
@ -125,7 +131,7 @@ def get_current_sequence(name=None, new=False):
if new:
# create new
name = name or self.default_sequence_name
name = name or DEFAULT_SEQUENCE_NAME
sequence = hiero.core.Sequence(name)
root_bin.addItem(hiero.core.BinItem(sequence))
elif name:
@ -339,7 +345,7 @@ def get_track_item_tags(track_item):
# collect all tags which are not openpype tag
returning_tag_data.extend(
tag for tag in _tags
if tag.name() != self.pype_tag_name
if tag.name() != OPENPYPE_TAG_NAME
)
return returning_tag_data
@ -379,7 +385,7 @@ def set_track_openpype_tag(track, data=None):
# if pype tag available then update with input data
tag = tags.create_tag(
"{}_{}".format(
self.pype_tag_name,
OPENPYPE_TAG_NAME,
_get_tag_unique_hash()
),
tag_data
@ -406,7 +412,7 @@ def get_track_openpype_tag(track):
return None
for tag in _tags:
# return only correct tag defined by global name
if self.pype_tag_name in tag.name():
if OPENPYPE_TAG_NAME in tag.name():
return tag
@ -478,7 +484,7 @@ def get_trackitem_openpype_tag(track_item):
return None
for tag in _tags:
# return only correct tag defined by global name
if self.pype_tag_name in tag.name():
if OPENPYPE_TAG_NAME in tag.name():
return tag
@ -510,7 +516,7 @@ def set_trackitem_openpype_tag(track_item, data=None):
# if pype tag available then update with input data
tag = tags.create_tag(
"{}_{}".format(
self.pype_tag_name,
OPENPYPE_TAG_NAME,
_get_tag_unique_hash()
),
tag_data
@ -692,29 +698,29 @@ def setup(console=False, port=None, menu=True):
menu (bool, optional): Display file menu in Hiero.
"""
if self._has_been_setup:
if _CTX.has_been_setup:
teardown()
add_submission()
if menu:
add_to_filemenu()
self._has_menu = True
_CTX.has_menu = True
self._has_been_setup = True
_CTX.has_been_setup = True
log.debug("pyblish: Loaded successfully.")
def teardown():
"""Remove integration"""
if not self._has_been_setup:
if not _CTX.has_been_setup:
return
if self._has_menu:
if _CTX.has_menu:
remove_from_filemenu()
self._has_menu = False
_CTX.has_menu = False
self._has_been_setup = False
_CTX.has_been_setup = False
log.debug("pyblish: Integration torn down successfully")
@ -922,7 +928,7 @@ def create_bin(path=None, project=None):
# get the first loaded project
project = project or get_current_project()
path = path or self.default_bin_name
path = path or DEFAULT_BIN_NAME
path = path.replace("\\", "/").split("/")
@ -1047,6 +1053,18 @@ def apply_colorspace_project():
imageio = get_project_settings(project_name)["hiero"]["imageio"]
presets = imageio.get("workfile")
# backward compatibility layer
# TODO: remove this after some time
config_data = get_imageio_config(
project_name=get_current_project_name(),
host_name="hiero"
)
if config_data:
presets.update({
"ocioConfigName": "custom"
})
# save the workfile as subversion "comment:_colorspaceChange"
split_current_file = os.path.splitext(current_file)
copy_current_file = current_file
@ -1293,11 +1311,11 @@ def before_project_save(event):
def get_main_window():
"""Acquire Nuke's main window"""
if self._parent is None:
if _CTX.parent_gui is None:
top_widgets = QtWidgets.QApplication.topLevelWidgets()
name = "Foundry::UI::DockMainWindow"
main_window = next(widget for widget in top_widgets if
widget.inherits("QMainWindow") and
widget.metaObject().className() == name)
self._parent = main_window
return self._parent
_CTX.parent_gui = main_window
return _CTX.parent_gui

View file

@ -3,20 +3,18 @@
import os
import re
import sys
import ast
import opentimelineio as otio
from . import utils
import hiero.core
import hiero.ui
self = sys.modules[__name__]
self.track_types = {
TRACK_TYPE_MAP = {
hiero.core.VideoTrack: otio.schema.TrackKind.Video,
hiero.core.AudioTrack: otio.schema.TrackKind.Audio
}
self.project_fps = None
self.marker_color_map = {
MARKER_COLOR_MAP = {
"magenta": otio.schema.MarkerColor.MAGENTA,
"red": otio.schema.MarkerColor.RED,
"yellow": otio.schema.MarkerColor.YELLOW,
@ -24,30 +22,21 @@ self.marker_color_map = {
"cyan": otio.schema.MarkerColor.CYAN,
"blue": otio.schema.MarkerColor.BLUE,
}
self.timeline = None
self.include_tags = True
def flatten(_list):
for item in _list:
if isinstance(item, (list, tuple)):
for sub_item in flatten(item):
class CTX:
project_fps = None
timeline = None
include_tags = True
def flatten(list_):
for item_ in list_:
if isinstance(item_, (list, tuple)):
for sub_item in flatten(item_):
yield sub_item
else:
yield item
def get_current_hiero_project(remove_untitled=False):
projects = flatten(hiero.core.projects())
if not remove_untitled:
return next(iter(projects))
# if remove_untitled
for proj in projects:
if "Untitled" in proj.name():
proj.close()
else:
return proj
yield item_
def create_otio_rational_time(frame, fps):
@ -152,7 +141,7 @@ def create_otio_reference(clip):
file_head = media_source.filenameHead()
is_sequence = not media_source.singleFile()
frame_duration = media_source.duration()
fps = utils.get_rate(clip) or self.project_fps
fps = utils.get_rate(clip) or CTX.project_fps
extension = os.path.splitext(path)[-1]
if is_sequence:
@ -217,8 +206,8 @@ def get_marker_color(tag):
res = re.search(pat, icon)
if res:
color = res.groupdict().get('color')
if color.lower() in self.marker_color_map:
return self.marker_color_map[color.lower()]
if color.lower() in MARKER_COLOR_MAP:
return MARKER_COLOR_MAP[color.lower()]
return otio.schema.MarkerColor.RED
@ -232,7 +221,7 @@ def create_otio_markers(otio_item, item):
# Hiero adds this tag to a lot of clips
continue
frame_rate = utils.get_rate(item) or self.project_fps
frame_rate = utils.get_rate(item) or CTX.project_fps
marked_range = otio.opentime.TimeRange(
start_time=otio.opentime.RationalTime(
@ -279,7 +268,7 @@ def create_otio_clip(track_item):
duration = int(track_item.duration())
fps = utils.get_rate(track_item) or self.project_fps
fps = utils.get_rate(track_item) or CTX.project_fps
name = track_item.name()
media_reference = create_otio_reference(clip)
@ -296,7 +285,7 @@ def create_otio_clip(track_item):
)
# Add tags as markers
if self.include_tags:
if CTX.include_tags:
create_otio_markers(otio_clip, track_item)
create_otio_markers(otio_clip, track_item.source())
@ -319,13 +308,13 @@ def create_otio_gap(gap_start, clip_start, tl_start_frame, fps):
def _create_otio_timeline():
project = get_current_hiero_project(remove_untitled=False)
metadata = _get_metadata(self.timeline)
project = CTX.timeline.project()
metadata = _get_metadata(CTX.timeline)
metadata.update({
"openpype.timeline.width": int(self.timeline.format().width()),
"openpype.timeline.height": int(self.timeline.format().height()),
"openpype.timeline.pixelAspect": int(self.timeline.format().pixelAspect()), # noqa
"openpype.timeline.width": int(CTX.timeline.format().width()),
"openpype.timeline.height": int(CTX.timeline.format().height()),
"openpype.timeline.pixelAspect": int(CTX.timeline.format().pixelAspect()), # noqa
"openpype.project.useOCIOEnvironmentOverride": project.useOCIOEnvironmentOverride(), # noqa
"openpype.project.lutSetting16Bit": project.lutSetting16Bit(),
"openpype.project.lutSetting8Bit": project.lutSetting8Bit(),
@ -339,10 +328,10 @@ def _create_otio_timeline():
})
start_time = create_otio_rational_time(
self.timeline.timecodeStart(), self.project_fps)
CTX.timeline.timecodeStart(), CTX.project_fps)
return otio.schema.Timeline(
name=self.timeline.name(),
name=CTX.timeline.name(),
global_start_time=start_time,
metadata=metadata
)
@ -351,7 +340,7 @@ def _create_otio_timeline():
def create_otio_track(track_type, track_name):
return otio.schema.Track(
name=track_name,
kind=self.track_types[track_type]
kind=TRACK_TYPE_MAP[track_type]
)
@ -363,7 +352,7 @@ def add_otio_gap(track_item, otio_track, prev_out):
gap = otio.opentime.TimeRange(
duration=otio.opentime.RationalTime(
gap_length,
self.project_fps
CTX.project_fps
)
)
otio_gap = otio.schema.Gap(source_range=gap)
@ -396,14 +385,14 @@ def create_otio_timeline():
return track_item.parent().items()[itemindex - 1]
# get current timeline
self.timeline = hiero.ui.activeSequence()
self.project_fps = self.timeline.framerate().toFloat()
CTX.timeline = hiero.ui.activeSequence()
CTX.project_fps = CTX.timeline.framerate().toFloat()
# convert timeline to otio
otio_timeline = _create_otio_timeline()
# loop all defined track types
for track in self.timeline.items():
for track in CTX.timeline.items():
# skip if track is disabled
if not track.isEnabled():
continue
@ -441,7 +430,7 @@ def create_otio_timeline():
otio_track.append(otio_clip)
# Add tags as markers
if self.include_tags:
if CTX.include_tags:
create_otio_markers(otio_track, track)
# add track to otio timeline

View file

@ -41,8 +41,8 @@ class LoadClip(phiero.SequenceLoader):
clip_name_template = "{asset}_{subset}_{representation}"
@classmethod
def apply_settings(cls, project_settings, system_settings):
plugin_type_settings = (
project_settings
.get("hiero", {})

View file

@ -310,7 +310,7 @@ class PrecollectInstances(pyblish.api.ContextPlugin):
# add pypedata marker to otio_clip metadata
for marker in otio_clip.markers:
if phiero.pype_tag_name in marker.name:
if phiero.OPENPYPE_TAG_NAME in marker.name:
otio_clip.metadata.update(marker.metadata)
return {"otioClip": otio_clip}

View file

@ -8,7 +8,6 @@ from qtpy.QtGui import QPixmap
import hiero.ui
from openpype.pipeline import legacy_io
from openpype.hosts.hiero import api as phiero
from openpype.hosts.hiero.api.otio import hiero_export
@ -22,8 +21,8 @@ class PrecollectWorkfile(pyblish.api.ContextPlugin):
asset = legacy_io.Session["AVALON_ASSET"]
subset = "workfile"
project = phiero.get_current_project()
active_timeline = hiero.ui.activeSequence()
project = active_timeline.project()
fps = active_timeline.framerate().toFloat()
# adding otio timeline to context

View file

@ -0,0 +1,56 @@
import attr
import hou
from openpype.hosts.houdini.api.lib import get_color_management_preferences
@attr.s
class LayerMetadata(object):
"""Data class for Render Layer metadata."""
frameStart = attr.ib()
frameEnd = attr.ib()
@attr.s
class RenderProduct(object):
"""Getting Colorspace as
Specific Render Product Parameter for submitting
publish job.
"""
colorspace = attr.ib() # colorspace
view = attr.ib()
productName = attr.ib(default=None)
class ARenderProduct(object):
def __init__(self):
"""Constructor."""
# Initialize
self.layer_data = self._get_layer_data()
self.layer_data.products = self.get_colorspace_data()
def _get_layer_data(self):
return LayerMetadata(
frameStart=int(hou.playbar.frameRange()[0]),
frameEnd=int(hou.playbar.frameRange()[1]),
)
def get_colorspace_data(self):
"""To be implemented by renderer class.
This should return a list of RenderProducts.
Returns:
list: List of RenderProduct
"""
data = get_color_management_preferences()
colorspace_data = [
RenderProduct(
colorspace=data["display"],
view=data["view"],
productName=""
)
]
return colorspace_data

View file

@ -1,6 +1,7 @@
# -*- coding: utf-8 -*-
import sys
import os
import re
import uuid
import logging
from contextlib import contextmanager
@ -581,3 +582,74 @@ def splitext(name, allowed_multidot_extensions):
return name[:-len(ext)], ext
return os.path.splitext(name)
def get_top_referenced_parm(parm):
processed = set() # disallow infinite loop
while True:
if parm.path() in processed:
raise RuntimeError("Parameter references result in cycle.")
processed.add(parm.path())
ref = parm.getReferencedParm()
if ref.path() == parm.path():
# It returns itself when it doesn't reference
# another parameter
return ref
else:
parm = ref
def evalParmNoFrame(node, parm, pad_character="#"):
parameter = node.parm(parm)
assert parameter, "Parameter does not exist: %s.%s" % (node, parm)
# If the parameter has a parameter reference, then get that
# parameter instead as otherwise `unexpandedString()` fails.
parameter = get_top_referenced_parm(parameter)
# Substitute out the frame numbering with padded characters
try:
raw = parameter.unexpandedString()
except hou.Error as exc:
print("Failed: %s" % parameter)
raise RuntimeError(exc)
def replace(match):
padding = 1
n = match.group(2)
if n and int(n):
padding = int(n)
return pad_character * padding
expression = re.sub(r"(\$F([0-9]*))", replace, raw)
with hou.ScriptEvalContext(parameter):
return hou.expandStringAtFrame(expression, 0)
def get_color_management_preferences():
"""Get default OCIO preferences"""
data = {
"config": hou.Color.ocio_configPath()
}
# Get default display and view from OCIO
display = hou.Color.ocio_defaultDisplay()
disp_regex = re.compile(r"^(?P<name>.+-)(?P<display>.+)$")
disp_match = disp_regex.match(display)
view = hou.Color.ocio_defaultView()
view_regex = re.compile(r"^(?P<name>.+- )(?P<view>.+)$")
view_match = view_regex.match(view)
data.update({
"display": disp_match.group("display"),
"view": view_match.group("view")
})
return data

View file

@ -0,0 +1,71 @@
from openpype.hosts.houdini.api import plugin
from openpype.lib import EnumDef
class CreateArnoldRop(plugin.HoudiniCreator):
"""Arnold ROP"""
identifier = "io.openpype.creators.houdini.arnold_rop"
label = "Arnold ROP"
family = "arnold_rop"
icon = "magic"
defaults = ["master"]
# Default extension
ext = "exr"
def create(self, subset_name, instance_data, pre_create_data):
import hou
# Remove the active, we are checking the bypass flag of the nodes
instance_data.pop("active", None)
instance_data.update({"node_type": "arnold"})
# Add chunk size attribute
instance_data["chunkSize"] = 1
# Submit for job publishing
instance_data["farm"] = True
instance = super(CreateArnoldRop, self).create(
subset_name,
instance_data,
pre_create_data) # type: plugin.CreatedInstance
instance_node = hou.node(instance.get("instance_node"))
ext = pre_create_data.get("image_format")
filepath = "{renders_dir}{subset_name}/{subset_name}.$F4.{ext}".format(
renders_dir=hou.text.expandString("$HIP/pyblish/renders/"),
subset_name=subset_name,
ext=ext,
)
parms = {
# Render frame range
"trange": 1,
# Arnold ROP settings
"ar_picture": filepath,
"ar_exr_half_precision": 1 # half precision
}
instance_node.setParms(parms)
# Lock any parameters in this list
to_lock = ["family", "id"]
self.lock_parameters(instance_node, to_lock)
def get_pre_create_attr_defs(self):
attrs = super(CreateArnoldRop, self).get_pre_create_attr_defs()
image_format_enum = [
"bmp", "cin", "exr", "jpg", "pic", "pic.gz", "png",
"rad", "rat", "rta", "sgi", "tga", "tif",
]
return attrs + [
EnumDef("image_format",
image_format_enum,
default=self.ext,
label="Image Format Options")
]

View file

@ -0,0 +1,114 @@
# -*- coding: utf-8 -*-
"""Creator plugin to create Karma ROP."""
from openpype.hosts.houdini.api import plugin
from openpype.pipeline import CreatedInstance
from openpype.lib import BoolDef, EnumDef, NumberDef
class CreateKarmaROP(plugin.HoudiniCreator):
"""Karma ROP"""
identifier = "io.openpype.creators.houdini.karma_rop"
label = "Karma ROP"
family = "karma_rop"
icon = "magic"
defaults = ["master"]
def create(self, subset_name, instance_data, pre_create_data):
import hou # noqa
instance_data.pop("active", None)
instance_data.update({"node_type": "karma"})
# Add chunk size attribute
instance_data["chunkSize"] = 10
# Submit for job publishing
instance_data["farm"] = True
instance = super(CreateKarmaROP, self).create(
subset_name,
instance_data,
pre_create_data) # type: CreatedInstance
instance_node = hou.node(instance.get("instance_node"))
ext = pre_create_data.get("image_format")
filepath = "{renders_dir}{subset_name}/{subset_name}.$F4.{ext}".format(
renders_dir=hou.text.expandString("$HIP/pyblish/renders/"),
subset_name=subset_name,
ext=ext,
)
checkpoint = "{cp_dir}{subset_name}.$F4.checkpoint".format(
cp_dir=hou.text.expandString("$HIP/pyblish/"),
subset_name=subset_name
)
usd_directory = "{usd_dir}{subset_name}_$RENDERID".format(
usd_dir=hou.text.expandString("$HIP/pyblish/renders/usd_renders/"), # noqa
subset_name=subset_name
)
parms = {
# Render Frame Range
"trange": 1,
# Karma ROP Setting
"picture": filepath,
# Karma Checkpoint Setting
"productName": checkpoint,
# USD Output Directory
"savetodirectory": usd_directory,
}
res_x = pre_create_data.get("res_x")
res_y = pre_create_data.get("res_y")
if self.selected_nodes:
# If camera found in selection
# we will use as render camera
camera = None
for node in self.selected_nodes:
if node.type().name() == "cam":
has_camera = pre_create_data.get("cam_res")
if has_camera:
res_x = node.evalParm("resx")
res_y = node.evalParm("resy")
if not camera:
self.log.warning("No render camera found in selection")
parms.update({
"camera": camera or "",
"resolutionx": res_x,
"resolutiony": res_y,
})
instance_node.setParms(parms)
# Lock some Avalon attributes
to_lock = ["family", "id"]
self.lock_parameters(instance_node, to_lock)
def get_pre_create_attr_defs(self):
attrs = super(CreateKarmaROP, self).get_pre_create_attr_defs()
image_format_enum = [
"bmp", "cin", "exr", "jpg", "pic", "pic.gz", "png",
"rad", "rat", "rta", "sgi", "tga", "tif",
]
return attrs + [
EnumDef("image_format",
image_format_enum,
default="exr",
label="Image Format Options"),
NumberDef("res_x",
label="width",
default=1920,
decimals=0),
NumberDef("res_y",
label="height",
default=720,
decimals=0),
BoolDef("cam_res",
label="Camera Resolution",
default=False)
]

View file

@ -0,0 +1,88 @@
# -*- coding: utf-8 -*-
"""Creator plugin to create Mantra ROP."""
from openpype.hosts.houdini.api import plugin
from openpype.pipeline import CreatedInstance
from openpype.lib import EnumDef, BoolDef
class CreateMantraROP(plugin.HoudiniCreator):
"""Mantra ROP"""
identifier = "io.openpype.creators.houdini.mantra_rop"
label = "Mantra ROP"
family = "mantra_rop"
icon = "magic"
defaults = ["master"]
def create(self, subset_name, instance_data, pre_create_data):
import hou # noqa
instance_data.pop("active", None)
instance_data.update({"node_type": "ifd"})
# Add chunk size attribute
instance_data["chunkSize"] = 10
# Submit for job publishing
instance_data["farm"] = True
instance = super(CreateMantraROP, self).create(
subset_name,
instance_data,
pre_create_data) # type: CreatedInstance
instance_node = hou.node(instance.get("instance_node"))
ext = pre_create_data.get("image_format")
filepath = "{renders_dir}{subset_name}/{subset_name}.$F4.{ext}".format(
renders_dir=hou.text.expandString("$HIP/pyblish/renders/"),
subset_name=subset_name,
ext=ext,
)
parms = {
# Render Frame Range
"trange": 1,
# Mantra ROP Setting
"vm_picture": filepath,
}
if self.selected_nodes:
# If camera found in selection
# we will use as render camera
camera = None
for node in self.selected_nodes:
if node.type().name() == "cam":
camera = node.path()
if not camera:
self.log.warning("No render camera found in selection")
parms.update({"camera": camera or ""})
custom_res = pre_create_data.get("override_resolution")
if custom_res:
parms.update({"override_camerares": 1})
instance_node.setParms(parms)
# Lock some Avalon attributes
to_lock = ["family", "id"]
self.lock_parameters(instance_node, to_lock)
def get_pre_create_attr_defs(self):
attrs = super(CreateMantraROP, self).get_pre_create_attr_defs()
image_format_enum = [
"bmp", "cin", "exr", "jpg", "pic", "pic.gz", "png",
"rad", "rat", "rta", "sgi", "tga", "tif",
]
return attrs + [
EnumDef("image_format",
image_format_enum,
default="exr",
label="Image Format Options"),
BoolDef("override_resolution",
label="Override Camera Resolution",
tooltip="Override the current camera "
"resolution, recommended for IPR.",
default=False)
]

View file

@ -1,44 +1,42 @@
# -*- coding: utf-8 -*-
"""Creator plugin to create Redshift ROP."""
import hou # noqa
from openpype.hosts.houdini.api import plugin
from openpype.pipeline import CreatedInstance
from openpype.lib import EnumDef
class CreateRedshiftROP(plugin.HoudiniCreator):
"""Redshift ROP"""
identifier = "io.openpype.creators.houdini.redshift_rop"
label = "Redshift ROP"
family = "redshift_rop"
icon = "magic"
defaults = ["master"]
ext = "exr"
def create(self, subset_name, instance_data, pre_create_data):
import hou # noqa
instance_data.pop("active", None)
instance_data.update({"node_type": "Redshift_ROP"})
# Add chunk size attribute
instance_data["chunkSize"] = 10
# Clear the family prefix from the subset
subset = subset_name
subset_no_prefix = subset[len(self.family):]
subset_no_prefix = subset_no_prefix[0].lower() + subset_no_prefix[1:]
subset_name = subset_no_prefix
# Submit for job publishing
instance_data["farm"] = True
instance = super(CreateRedshiftROP, self).create(
subset_name,
instance_data,
pre_create_data) # type: CreatedInstance
pre_create_data)
instance_node = hou.node(instance.get("instance_node"))
basename = instance_node.name()
instance_node.setName(basename + "_ROP", unique_name=True)
# Also create the linked Redshift IPR Rop
try:
ipr_rop = self.parent.createNode(
ipr_rop = instance_node.parent().createNode(
"Redshift_IPR", node_name=basename + "_IPR"
)
except hou.OperationFailed:
@ -50,19 +48,60 @@ class CreateRedshiftROP(plugin.HoudiniCreator):
ipr_rop.setPosition(instance_node.position() + hou.Vector2(0, -1))
# Set the linked rop to the Redshift ROP
ipr_rop.parm("linked_rop").set(ipr_rop.relativePathTo(instance))
ipr_rop.parm("linked_rop").set(instance_node.path())
ext = pre_create_data.get("image_format")
filepath = "{renders_dir}{subset_name}/{subset_name}.{fmt}".format(
renders_dir=hou.text.expandString("$HIP/pyblish/renders/"),
subset_name=subset_name,
fmt="${aov}.$F4.{ext}".format(aov="AOV", ext=ext)
)
ext_format_index = {"exr": 0, "tif": 1, "jpg": 2, "png": 3}
prefix = '${HIP}/render/${HIPNAME}/`chs("subset")`.${AOV}.$F4.exr'
parms = {
# Render frame range
"trange": 1,
# Redshift ROP settings
"RS_outputFileNamePrefix": prefix,
"RS_outputMultilayerMode": 0, # no multi-layered exr
"RS_outputFileNamePrefix": filepath,
"RS_outputMultilayerMode": "1", # no multi-layered exr
"RS_outputBeautyAOVSuffix": "beauty",
"RS_outputFileFormat": ext_format_index[ext],
}
if self.selected_nodes:
# set up the render camera from the selected node
camera = None
for node in self.selected_nodes:
if node.type().name() == "cam":
camera = node.path()
parms.update({
"RS_renderCamera": camera or ""})
instance_node.setParms(parms)
# Lock some Avalon attributes
to_lock = ["family", "id"]
self.lock_parameters(instance_node, to_lock)
def remove_instances(self, instances):
for instance in instances:
node = instance.data.get("instance_node")
ipr_node = hou.node(f"{node}_IPR")
if ipr_node:
ipr_node.destroy()
return super(CreateRedshiftROP, self).remove_instances(instances)
def get_pre_create_attr_defs(self):
attrs = super(CreateRedshiftROP, self).get_pre_create_attr_defs()
image_format_enum = [
"exr", "tif", "jpg", "png",
]
return attrs + [
EnumDef("image_format",
image_format_enum,
default=self.ext,
label="Image Format Options")
]

View file

@ -0,0 +1,156 @@
# -*- coding: utf-8 -*-
"""Creator plugin to create VRay ROP."""
import hou
from openpype.hosts.houdini.api import plugin
from openpype.pipeline import CreatedInstance
from openpype.lib import EnumDef, BoolDef
class CreateVrayROP(plugin.HoudiniCreator):
"""VRay ROP"""
identifier = "io.openpype.creators.houdini.vray_rop"
label = "VRay ROP"
family = "vray_rop"
icon = "magic"
defaults = ["master"]
ext = "exr"
def create(self, subset_name, instance_data, pre_create_data):
instance_data.pop("active", None)
instance_data.update({"node_type": "vray_renderer"})
# Add chunk size attribute
instance_data["chunkSize"] = 10
# Submit for job publishing
instance_data["farm"] = True
instance = super(CreateVrayROP, self).create(
subset_name,
instance_data,
pre_create_data) # type: CreatedInstance
instance_node = hou.node(instance.get("instance_node"))
# Add IPR for Vray
basename = instance_node.name()
try:
ipr_rop = instance_node.parent().createNode(
"vray", node_name=basename + "_IPR"
)
except hou.OperationFailed:
raise plugin.OpenPypeCreatorError(
"Cannot create Vray render node. "
"Make sure Vray installed and enabled!"
)
ipr_rop.setPosition(instance_node.position() + hou.Vector2(0, -1))
ipr_rop.parm("rop").set(instance_node.path())
parms = {
"trange": 1,
"SettingsEXR_bits_per_channel": "16" # half precision
}
if self.selected_nodes:
# set up the render camera from the selected node
camera = None
for node in self.selected_nodes:
if node.type().name() == "cam":
camera = node.path()
parms.update({
"render_camera": camera or ""
})
# Enable render element
ext = pre_create_data.get("image_format")
instance_data["RenderElement"] = pre_create_data.get("render_element_enabled") # noqa
if pre_create_data.get("render_element_enabled", True):
# Vray has its own tag for AOV file output
filepath = "{renders_dir}{subset_name}/{subset_name}.{fmt}".format(
renders_dir=hou.text.expandString("$HIP/pyblish/renders/"),
subset_name=subset_name,
fmt="${aov}.$F4.{ext}".format(aov="AOV",
ext=ext)
)
filepath = "{}{}".format(
hou.text.expandString("$HIP/pyblish/renders/"),
"{}/{}.${}.$F4.{}".format(subset_name,
subset_name,
"AOV",
ext)
)
re_rop = instance_node.parent().createNode(
"vray_render_channels",
node_name=basename + "_render_element"
)
# move the render element node next to the vray renderer node
re_rop.setPosition(instance_node.position() + hou.Vector2(0, 1))
re_path = re_rop.path()
parms.update({
"use_render_channels": 1,
"SettingsOutput_img_file_path": filepath,
"render_network_render_channels": re_path
})
else:
filepath = "{renders_dir}{subset_name}/{subset_name}.{fmt}".format(
renders_dir=hou.text.expandString("$HIP/pyblish/renders/"),
subset_name=subset_name,
fmt="$F4.{ext}".format(ext=ext)
)
parms.update({
"use_render_channels": 0,
"SettingsOutput_img_file_path": filepath
})
custom_res = pre_create_data.get("override_resolution")
if custom_res:
parms.update({"override_camerares": 1})
instance_node.setParms(parms)
# lock parameters from AVALON
to_lock = ["family", "id"]
self.lock_parameters(instance_node, to_lock)
def remove_instances(self, instances):
for instance in instances:
node = instance.data.get("instance_node")
# for the extra render node from the plugins
# such as vray and redshift
ipr_node = hou.node("{}{}".format(node, "_IPR"))
if ipr_node:
ipr_node.destroy()
re_node = hou.node("{}{}".format(node,
"_render_element"))
if re_node:
re_node.destroy()
return super(CreateVrayROP, self).remove_instances(instances)
def get_pre_create_attr_defs(self):
attrs = super(CreateVrayROP, self).get_pre_create_attr_defs()
image_format_enum = [
"bmp", "cin", "exr", "jpg", "pic", "pic.gz", "png",
"rad", "rat", "rta", "sgi", "tga", "tif",
]
return attrs + [
EnumDef("image_format",
image_format_enum,
default=self.ext,
label="Image Format Options"),
BoolDef("override_resolution",
label="Override Camera Resolution",
tooltip="Override the current camera "
"resolution, recommended for IPR.",
default=False),
BoolDef("render_element_enabled",
label="Render Element",
tooltip="Create Render Element Node "
"if enabled",
default=False)
]

View file

@ -0,0 +1,135 @@
import os
import re
import hou
import pyblish.api
from openpype.hosts.houdini.api import colorspace
from openpype.hosts.houdini.api.lib import (
evalParmNoFrame, get_color_management_preferences)
class CollectArnoldROPRenderProducts(pyblish.api.InstancePlugin):
"""Collect Arnold ROP Render Products
Collects the instance.data["files"] for the render products.
Provides:
instance -> files
"""
label = "Arnold ROP Render Products"
order = pyblish.api.CollectorOrder + 0.4
hosts = ["houdini"]
families = ["arnold_rop"]
def process(self, instance):
rop = hou.node(instance.data.get("instance_node"))
# Collect chunkSize
chunk_size_parm = rop.parm("chunkSize")
if chunk_size_parm:
chunk_size = int(chunk_size_parm.eval())
instance.data["chunkSize"] = chunk_size
self.log.debug("Chunk Size: %s" % chunk_size)
default_prefix = evalParmNoFrame(rop, "ar_picture")
render_products = []
# Default beauty AOV
beauty_product = self.get_render_product_name(prefix=default_prefix,
suffix=None)
render_products.append(beauty_product)
files_by_aov = {
"": self.generate_expected_files(instance, beauty_product)
}
num_aovs = rop.evalParm("ar_aovs")
for index in range(1, num_aovs + 1):
# Skip disabled AOVs
if not rop.evalParm("ar_enable_aovP{}".format(index)):
continue
if rop.evalParm("ar_aov_exr_enable_layer_name{}".format(index)):
label = rop.evalParm("ar_aov_exr_layer_name{}".format(index))
else:
label = evalParmNoFrame(rop, "ar_aov_label{}".format(index))
aov_product = self.get_render_product_name(default_prefix,
suffix=label)
render_products.append(aov_product)
files_by_aov[label] = self.generate_expected_files(instance,
aov_product)
for product in render_products:
self.log.debug("Found render product: {}".format(product))
instance.data["files"] = list(render_products)
instance.data["renderProducts"] = colorspace.ARenderProduct()
# For now by default do NOT try to publish the rendered output
instance.data["publishJobState"] = "Suspended"
instance.data["attachTo"] = [] # stub required data
if "expectedFiles" not in instance.data:
instance.data["expectedFiles"] = list()
instance.data["expectedFiles"].append(files_by_aov)
# update the colorspace data
colorspace_data = get_color_management_preferences()
instance.data["colorspaceConfig"] = colorspace_data["config"]
instance.data["colorspaceDisplay"] = colorspace_data["display"]
instance.data["colorspaceView"] = colorspace_data["view"]
def get_render_product_name(self, prefix, suffix):
"""Return the output filename using the AOV prefix and suffix"""
# When AOV is explicitly defined in prefix we just swap it out
# directly with the AOV suffix to embed it.
# Note: ${AOV} seems to be evaluated in the parameter as %AOV%
if "%AOV%" in prefix:
# It seems that when some special separator characters are present
# before the %AOV% token that Redshift will secretly remove it if
# there is no suffix for the current product, for example:
# foo_%AOV% -> foo.exr
pattern = "%AOV%" if suffix else "[._-]?%AOV%"
product_name = re.sub(pattern,
suffix,
prefix,
flags=re.IGNORECASE)
else:
if suffix:
# Add ".{suffix}" before the extension
prefix_base, ext = os.path.splitext(prefix)
product_name = prefix_base + "." + suffix + ext
else:
product_name = prefix
return product_name
def generate_expected_files(self, instance, path):
"""Create expected files in instance data"""
dir = os.path.dirname(path)
file = os.path.basename(path)
if "#" in file:
def replace(match):
return "%0{}d".format(len(match.group()))
file = re.sub("#+", replace, file)
if "%" not in file:
return path
expected_files = []
start = instance.data["frameStart"]
end = instance.data["frameEnd"]
for i in range(int(start), (int(end) + 1)):
expected_files.append(
os.path.join(dir, (file % i)).replace("\\", "/"))
return expected_files

View file

@ -11,15 +11,13 @@ from openpype.hosts.houdini.api import lib
class CollectFrames(pyblish.api.InstancePlugin):
"""Collect all frames which would be saved from the ROP nodes"""
order = pyblish.api.CollectorOrder
order = pyblish.api.CollectorOrder + 0.01
label = "Collect Frames"
families = ["vdbcache", "imagesequence", "ass", "redshiftproxy", "review"]
def process(self, instance):
ropnode = hou.node(instance.data["instance_node"])
frame_data = lib.get_frame_data(ropnode)
instance.data.update(frame_data)
start_frame = instance.data.get("frameStart", None)
end_frame = instance.data.get("frameEnd", None)

View file

@ -0,0 +1,56 @@
import hou
import pyblish.api
class CollectInstanceNodeFrameRange(pyblish.api.InstancePlugin):
"""Collect time range frame data for the instance node."""
order = pyblish.api.CollectorOrder + 0.001
label = "Instance Node Frame Range"
hosts = ["houdini"]
def process(self, instance):
node_path = instance.data.get("instance_node")
node = hou.node(node_path) if node_path else None
if not node_path or not node:
self.log.debug("No instance node found for instance: "
"{}".format(instance))
return
frame_data = self.get_frame_data(node)
if not frame_data:
return
self.log.info("Collected time data: {}".format(frame_data))
instance.data.update(frame_data)
def get_frame_data(self, node):
"""Get the frame data: start frame, end frame and steps
Args:
node(hou.Node)
Returns:
dict
"""
data = {}
if node.parm("trange") is None:
self.log.debug("Node has no 'trange' parameter: "
"{}".format(node.path()))
return data
if node.evalParm("trange") == 0:
# Ignore 'render current frame'
self.log.debug("Node '{}' has 'Render current frame' set. "
"Time range data ignored.".format(node.path()))
return data
data["frameStart"] = node.evalParm("f1")
data["frameEnd"] = node.evalParm("f2")
data["byFrameStep"] = node.evalParm("f3")
return data

View file

@ -70,16 +70,10 @@ class CollectInstances(pyblish.api.ContextPlugin):
if "active" in data:
data["publish"] = data["active"]
data.update(self.get_frame_data(node))
# Create nice name if the instance has a frame range.
label = data.get("name", node.name())
label += " (%s)" % data["asset"] # include asset in name
if "frameStart" in data and "frameEnd" in data:
frames = "[{frameStart} - {frameEnd}]".format(**data)
label = "{} {}".format(label, frames)
instance = context.create_instance(label)
# Include `families` using `family` data
@ -118,6 +112,6 @@ class CollectInstances(pyblish.api.ContextPlugin):
data["frameStart"] = node.evalParm("f1")
data["frameEnd"] = node.evalParm("f2")
data["steps"] = node.evalParm("f3")
data["byFrameStep"] = node.evalParm("f3")
return data

View file

@ -0,0 +1,104 @@
import re
import os
import hou
import pyblish.api
from openpype.hosts.houdini.api.lib import (
evalParmNoFrame,
get_color_management_preferences
)
from openpype.hosts.houdini.api import (
colorspace
)
class CollectKarmaROPRenderProducts(pyblish.api.InstancePlugin):
"""Collect Karma Render Products
Collects the instance.data["files"] for the multipart render product.
Provides:
instance -> files
"""
label = "Karma ROP Render Products"
order = pyblish.api.CollectorOrder + 0.4
hosts = ["houdini"]
families = ["karma_rop"]
def process(self, instance):
rop = hou.node(instance.data.get("instance_node"))
# Collect chunkSize
chunk_size_parm = rop.parm("chunkSize")
if chunk_size_parm:
chunk_size = int(chunk_size_parm.eval())
instance.data["chunkSize"] = chunk_size
self.log.debug("Chunk Size: %s" % chunk_size)
default_prefix = evalParmNoFrame(rop, "picture")
render_products = []
# Default beauty AOV
beauty_product = self.get_render_product_name(
prefix=default_prefix, suffix=None
)
render_products.append(beauty_product)
files_by_aov = {
"beauty": self.generate_expected_files(instance,
beauty_product)
}
filenames = list(render_products)
instance.data["files"] = filenames
instance.data["renderProducts"] = colorspace.ARenderProduct()
for product in render_products:
self.log.debug("Found render product: %s" % product)
if "expectedFiles" not in instance.data:
instance.data["expectedFiles"] = list()
instance.data["expectedFiles"].append(files_by_aov)
# update the colorspace data
colorspace_data = get_color_management_preferences()
instance.data["colorspaceConfig"] = colorspace_data["config"]
instance.data["colorspaceDisplay"] = colorspace_data["display"]
instance.data["colorspaceView"] = colorspace_data["view"]
def get_render_product_name(self, prefix, suffix):
product_name = prefix
if suffix:
# Add ".{suffix}" before the extension
prefix_base, ext = os.path.splitext(prefix)
product_name = "{}.{}{}".format(prefix_base, suffix, ext)
return product_name
def generate_expected_files(self, instance, path):
"""Create expected files in instance data"""
dir = os.path.dirname(path)
file = os.path.basename(path)
if "#" in file:
def replace(match):
return "%0{}d".format(len(match.group()))
file = re.sub("#+", replace, file)
if "%" not in file:
return path
expected_files = []
start = instance.data["frameStart"]
end = instance.data["frameEnd"]
for i in range(int(start), (int(end) + 1)):
expected_files.append(
os.path.join(dir, (file % i)).replace("\\", "/"))
return expected_files

View file

@ -0,0 +1,127 @@
import re
import os
import hou
import pyblish.api
from openpype.hosts.houdini.api.lib import (
evalParmNoFrame,
get_color_management_preferences
)
from openpype.hosts.houdini.api import (
colorspace
)
class CollectMantraROPRenderProducts(pyblish.api.InstancePlugin):
"""Collect Mantra Render Products
Collects the instance.data["files"] for the render products.
Provides:
instance -> files
"""
label = "Mantra ROP Render Products"
order = pyblish.api.CollectorOrder + 0.4
hosts = ["houdini"]
families = ["mantra_rop"]
def process(self, instance):
rop = hou.node(instance.data.get("instance_node"))
# Collect chunkSize
chunk_size_parm = rop.parm("chunkSize")
if chunk_size_parm:
chunk_size = int(chunk_size_parm.eval())
instance.data["chunkSize"] = chunk_size
self.log.debug("Chunk Size: %s" % chunk_size)
default_prefix = evalParmNoFrame(rop, "vm_picture")
render_products = []
# Default beauty AOV
beauty_product = self.get_render_product_name(
prefix=default_prefix, suffix=None
)
render_products.append(beauty_product)
files_by_aov = {
"beauty": self.generate_expected_files(instance,
beauty_product)
}
aov_numbers = rop.evalParm("vm_numaux")
if aov_numbers > 0:
# get the filenames of the AOVs
for i in range(1, aov_numbers + 1):
var = rop.evalParm("vm_variable_plane%d" % i)
if var:
aov_name = "vm_filename_plane%d" % i
aov_boolean = "vm_usefile_plane%d" % i
aov_enabled = rop.evalParm(aov_boolean)
has_aov_path = rop.evalParm(aov_name)
if has_aov_path and aov_enabled == 1:
aov_prefix = evalParmNoFrame(rop, aov_name)
aov_product = self.get_render_product_name(
prefix=aov_prefix, suffix=None
)
render_products.append(aov_product)
files_by_aov[var] = self.generate_expected_files(instance, aov_product) # noqa
for product in render_products:
self.log.debug("Found render product: %s" % product)
filenames = list(render_products)
instance.data["files"] = filenames
instance.data["renderProducts"] = colorspace.ARenderProduct()
# For now by default do NOT try to publish the rendered output
instance.data["publishJobState"] = "Suspended"
instance.data["attachTo"] = [] # stub required data
if "expectedFiles" not in instance.data:
instance.data["expectedFiles"] = list()
instance.data["expectedFiles"].append(files_by_aov)
# update the colorspace data
colorspace_data = get_color_management_preferences()
instance.data["colorspaceConfig"] = colorspace_data["config"]
instance.data["colorspaceDisplay"] = colorspace_data["display"]
instance.data["colorspaceView"] = colorspace_data["view"]
def get_render_product_name(self, prefix, suffix):
product_name = prefix
if suffix:
# Add ".{suffix}" before the extension
prefix_base, ext = os.path.splitext(prefix)
product_name = prefix_base + "." + suffix + ext
return product_name
def generate_expected_files(self, instance, path):
"""Create expected files in instance data"""
dir = os.path.dirname(path)
file = os.path.basename(path)
if "#" in file:
def replace(match):
return "%0{}d".format(len(match.group()))
file = re.sub("#+", replace, file)
if "%" not in file:
return path
expected_files = []
start = instance.data["frameStart"]
end = instance.data["frameEnd"]
for i in range(int(start), (int(end) + 1)):
expected_files.append(
os.path.join(dir, (file % i)).replace("\\", "/"))
return expected_files

View file

@ -4,52 +4,13 @@ import os
import hou
import pyblish.api
def get_top_referenced_parm(parm):
processed = set() # disallow infinite loop
while True:
if parm.path() in processed:
raise RuntimeError("Parameter references result in cycle.")
processed.add(parm.path())
ref = parm.getReferencedParm()
if ref.path() == parm.path():
# It returns itself when it doesn't reference
# another parameter
return ref
else:
parm = ref
def evalParmNoFrame(node, parm, pad_character="#"):
parameter = node.parm(parm)
assert parameter, "Parameter does not exist: %s.%s" % (node, parm)
# If the parameter has a parameter reference, then get that
# parameter instead as otherwise `unexpandedString()` fails.
parameter = get_top_referenced_parm(parameter)
# Substitute out the frame numbering with padded characters
try:
raw = parameter.unexpandedString()
except hou.Error as exc:
print("Failed: %s" % parameter)
raise RuntimeError(exc)
def replace(match):
padding = 1
n = match.group(2)
if n and int(n):
padding = int(n)
return pad_character * padding
expression = re.sub(r"(\$F([0-9]*))", replace, raw)
with hou.ScriptEvalContext(parameter):
return hou.expandStringAtFrame(expression, 0)
from openpype.hosts.houdini.api.lib import (
evalParmNoFrame,
get_color_management_preferences
)
from openpype.hosts.houdini.api import (
colorspace
)
class CollectRedshiftROPRenderProducts(pyblish.api.InstancePlugin):
@ -87,6 +48,9 @@ class CollectRedshiftROPRenderProducts(pyblish.api.InstancePlugin):
prefix=default_prefix, suffix=beauty_suffix
)
render_products.append(beauty_product)
files_by_aov = {
"_": self.generate_expected_files(instance,
beauty_product)}
num_aovs = rop.evalParm("RS_aov")
for index in range(num_aovs):
@ -104,11 +68,29 @@ class CollectRedshiftROPRenderProducts(pyblish.api.InstancePlugin):
aov_product = self.get_render_product_name(aov_prefix, aov_suffix)
render_products.append(aov_product)
files_by_aov[aov_suffix] = self.generate_expected_files(instance,
aov_product) # noqa
for product in render_products:
self.log.debug("Found render product: %s" % product)
filenames = list(render_products)
instance.data["files"] = filenames
instance.data["renderProducts"] = colorspace.ARenderProduct()
# For now by default do NOT try to publish the rendered output
instance.data["publishJobState"] = "Suspended"
instance.data["attachTo"] = [] # stub required data
if "expectedFiles" not in instance.data:
instance.data["expectedFiles"] = list()
instance.data["expectedFiles"].append(files_by_aov)
# update the colorspace data
colorspace_data = get_color_management_preferences()
instance.data["colorspaceConfig"] = colorspace_data["config"]
instance.data["colorspaceDisplay"] = colorspace_data["display"]
instance.data["colorspaceView"] = colorspace_data["view"]
def get_render_product_name(self, prefix, suffix):
"""Return the output filename using the AOV prefix and suffix"""
@ -133,3 +115,27 @@ class CollectRedshiftROPRenderProducts(pyblish.api.InstancePlugin):
product_name = prefix
return product_name
def generate_expected_files(self, instance, path):
"""Create expected files in instance data"""
dir = os.path.dirname(path)
file = os.path.basename(path)
if "#" in file:
def replace(match):
return "%0{}d".format(len(match.group()))
file = re.sub("#+", replace, file)
if "%" not in file:
return path
expected_files = []
start = instance.data["frameStart"]
end = instance.data["frameEnd"]
for i in range(int(start), (int(end) + 1)):
expected_files.append(
os.path.join(dir, (file % i)).replace("\\", "/"))
return expected_files

View file

@ -0,0 +1,41 @@
# -*- coding: utf-8 -*-
"""Collector plugin for frames data on ROP instances."""
import hou # noqa
import pyblish.api
from openpype.hosts.houdini.api import lib
class CollectRopFrameRange(pyblish.api.InstancePlugin):
"""Collect all frames which would be saved from the ROP nodes"""
order = pyblish.api.CollectorOrder
label = "Collect RopNode Frame Range"
def process(self, instance):
node_path = instance.data.get("instance_node")
if node_path is None:
# Instance without instance node like a workfile instance
return
ropnode = hou.node(node_path)
frame_data = lib.get_frame_data(ropnode)
if "frameStart" in frame_data and "frameEnd" in frame_data:
# Log artist friendly message about the collected frame range
message = (
"Frame range {0[frameStart]} - {0[frameEnd]}"
).format(frame_data)
if frame_data.get("step", 1.0) != 1.0:
message += " with step {0[step]}".format(frame_data)
self.log.info(message)
instance.data.update(frame_data)
# Add frame range to label if the instance has a frame range.
label = instance.data.get("label", instance.data["name"])
instance.data["label"] = (
"{0} [{1[frameStart]} - {1[frameEnd]}]".format(label,
frame_data)
)

View file

@ -0,0 +1,129 @@
import re
import os
import hou
import pyblish.api
from openpype.hosts.houdini.api.lib import (
evalParmNoFrame,
get_color_management_preferences
)
from openpype.hosts.houdini.api import (
colorspace
)
class CollectVrayROPRenderProducts(pyblish.api.InstancePlugin):
"""Collect Vray Render Products
Collects the instance.data["files"] for the render products.
Provides:
instance -> files
"""
label = "VRay ROP Render Products"
order = pyblish.api.CollectorOrder + 0.4
hosts = ["houdini"]
families = ["vray_rop"]
def process(self, instance):
rop = hou.node(instance.data.get("instance_node"))
# Collect chunkSize
chunk_size_parm = rop.parm("chunkSize")
if chunk_size_parm:
chunk_size = int(chunk_size_parm.eval())
instance.data["chunkSize"] = chunk_size
self.log.debug("Chunk Size: %s" % chunk_size)
default_prefix = evalParmNoFrame(rop, "SettingsOutput_img_file_path")
render_products = []
# TODO: add render elements if render element
beauty_product = self.get_beauty_render_product(default_prefix)
render_products.append(beauty_product)
files_by_aov = {
"RGB Color": self.generate_expected_files(instance,
beauty_product)}
if instance.data.get("RenderElement", True):
render_element = self.get_render_element_name(rop, default_prefix)
if render_element:
for aov, renderpass in render_element.items():
render_products.append(renderpass)
files_by_aov[aov] = self.generate_expected_files(instance, renderpass) # noqa
for product in render_products:
self.log.debug("Found render product: %s" % product)
filenames = list(render_products)
instance.data["files"] = filenames
instance.data["renderProducts"] = colorspace.ARenderProduct()
# For now by default do NOT try to publish the rendered output
instance.data["publishJobState"] = "Suspended"
instance.data["attachTo"] = [] # stub required data
if "expectedFiles" not in instance.data:
instance.data["expectedFiles"] = list()
instance.data["expectedFiles"].append(files_by_aov)
self.log.debug("expectedFiles:{}".format(files_by_aov))
# update the colorspace data
colorspace_data = get_color_management_preferences()
instance.data["colorspaceConfig"] = colorspace_data["config"]
instance.data["colorspaceDisplay"] = colorspace_data["display"]
instance.data["colorspaceView"] = colorspace_data["view"]
def get_beauty_render_product(self, prefix, suffix="<reName>"):
"""Return the beauty output filename if render element enabled
"""
aov_parm = ".{}".format(suffix)
beauty_product = None
if aov_parm in prefix:
beauty_product = prefix.replace(aov_parm, "")
else:
beauty_product = prefix
return beauty_product
def get_render_element_name(self, node, prefix, suffix="<reName>"):
"""Return the output filename using the AOV prefix and suffix
"""
render_element_dict = {}
# need a rewrite
re_path = node.evalParm("render_network_render_channels")
if re_path:
node_children = hou.node(re_path).children()
for element in node_children:
if element.shaderName() != "vray:SettingsRenderChannels":
aov = str(element)
render_product = prefix.replace(suffix, aov)
render_element_dict[aov] = render_product
return render_element_dict
def generate_expected_files(self, instance, path):
"""Create expected files in instance data"""
dir = os.path.dirname(path)
file = os.path.basename(path)
if "#" in file:
def replace(match):
return "%0{}d".format(len(match.group()))
file = re.sub("#+", replace, file)
if "%" not in file:
return path
expected_files = []
start = instance.data["frameStart"]
end = instance.data["frameEnd"]
for i in range(int(start), (int(end) + 1)):
expected_files.append(
os.path.join(dir, (file % i)).replace("\\", "/"))
return expected_files

View file

@ -2,7 +2,10 @@ import pyblish.api
from openpype.lib import version_up
from openpype.pipeline import registered_host
from openpype.action import get_errored_plugins_from_data
from openpype.hosts.houdini.api import HoudiniHost
from openpype.pipeline.publish import KnownPublishError
class IncrementCurrentFile(pyblish.api.ContextPlugin):
"""Increment the current file.
@ -14,17 +17,32 @@ class IncrementCurrentFile(pyblish.api.ContextPlugin):
label = "Increment current file"
order = pyblish.api.IntegratorOrder + 9.0
hosts = ["houdini"]
families = ["workfile"]
families = ["workfile",
"redshift_rop",
"arnold_rop",
"mantra_rop",
"karma_rop",
"usdrender"]
optional = True
def process(self, context):
errored_plugins = get_errored_plugins_from_data(context)
if any(
plugin.__name__ == "HoudiniSubmitPublishDeadline"
for plugin in errored_plugins
):
raise KnownPublishError(
"Skipping incrementing current file because "
"submission to deadline failed."
)
# Filename must not have changed since collecting
host = registered_host() # type: HoudiniHost
current_file = host.current_file()
assert (
context.data["currentFile"] == current_file
), "Collected filename from current scene name."
), "Collected filename mismatches from current scene name."
new_filepath = version_up(current_file)
host.save_workfile(new_filepath)

View file

@ -0,0 +1,50 @@
import attr
from pymxs import runtime as rt
@attr.s
class LayerMetadata(object):
"""Data class for Render Layer metadata."""
frameStart = attr.ib()
frameEnd = attr.ib()
@attr.s
class RenderProduct(object):
"""Getting Colorspace as
Specific Render Product Parameter for submitting
publish job.
"""
colorspace = attr.ib() # colorspace
view = attr.ib()
productName = attr.ib(default=None)
class ARenderProduct(object):
def __init__(self):
"""Constructor."""
# Initialize
self.layer_data = self._get_layer_data()
self.layer_data.products = self.get_colorspace_data()
def _get_layer_data(self):
return LayerMetadata(
frameStart=int(rt.rendStart),
frameEnd=int(rt.rendEnd),
)
def get_colorspace_data(self):
"""To be implemented by renderer class.
This should return a list of RenderProducts.
Returns:
list: List of RenderProduct
"""
colorspace_data = [
RenderProduct(
colorspace="sRGB",
view="ACES 1.0",
productName=""
)
]
return colorspace_data

View file

@ -1,30 +1,27 @@
# -*- coding: utf-8 -*-
"""Library of functions useful for 3dsmax pipeline."""
import json
import six
from pymxs import runtime as rt
from typing import Union
import contextlib
import json
from typing import Any, Dict, Union
import six
from openpype.pipeline.context_tools import (
get_current_project_asset,
get_current_project
)
get_current_project, get_current_project_asset,)
from pymxs import runtime as rt
JSON_PREFIX = "JSON::"
def imprint(node_name: str, data: dict) -> bool:
node = rt.getNodeByName(node_name)
node = rt.GetNodeByName(node_name)
if not node:
return False
for k, v in data.items():
if isinstance(v, (dict, list)):
rt.setUserProp(node, k, f'{JSON_PREFIX}{json.dumps(v)}')
rt.SetUserProp(node, k, f"{JSON_PREFIX}{json.dumps(v)}")
else:
rt.setUserProp(node, k, v)
rt.SetUserProp(node, k, v)
return True
@ -44,7 +41,7 @@ def lsattr(
Returns:
list of nodes.
"""
root = rt.rootnode if root is None else rt.getNodeByName(root)
root = rt.RootNode if root is None else rt.GetNodeByName(root)
def output_node(node, nodes):
nodes.append(node)
@ -55,16 +52,16 @@ def lsattr(
output_node(root, nodes)
return [
n for n in nodes
if rt.getUserProp(n, attr) == value
if rt.GetUserProp(n, attr) == value
] if value else [
n for n in nodes
if rt.getUserProp(n, attr)
if rt.GetUserProp(n, attr)
]
def read(container) -> dict:
data = {}
props = rt.getUserPropBuffer(container)
props = rt.GetUserPropBuffer(container)
# this shouldn't happen but let's guard against it anyway
if not props:
return data
@ -79,29 +76,25 @@ def read(container) -> dict:
value = value.strip()
if isinstance(value.strip(), six.string_types) and \
value.startswith(JSON_PREFIX):
try:
with contextlib.suppress(json.JSONDecodeError):
value = json.loads(value[len(JSON_PREFIX):])
except json.JSONDecodeError:
# not a json
pass
data[key.strip()] = value
data["instance_node"] = container.name
data["instance_node"] = container.Name
return data
@contextlib.contextmanager
def maintained_selection():
previous_selection = rt.getCurrentSelection()
previous_selection = rt.GetCurrentSelection()
try:
yield
finally:
if previous_selection:
rt.select(previous_selection)
rt.Select(previous_selection)
else:
rt.select()
rt.Select()
def get_all_children(parent, node_type=None):
@ -123,12 +116,19 @@ def get_all_children(parent, node_type=None):
return children
child_list = list_children(parent)
return ([x for x in child_list if rt.superClassOf(x) == node_type]
return ([x for x in child_list if rt.SuperClassOf(x) == node_type]
if node_type else child_list)
def get_current_renderer():
"""get current renderer"""
"""
Notes:
Get current renderer for Max
Returns:
"{Current Renderer}:{Current Renderer}"
e.g. "Redshift_Renderer:Redshift_Renderer"
"""
return rt.renderers.production
@ -175,7 +175,7 @@ def set_scene_resolution(width: int, height: int):
"""
# make sure the render dialog is closed
# for the update of resolution
# Changing the Render Setup dialog settingsshould be done
# Changing the Render Setup dialog settings should be done
# with the actual Render Setup dialog in a closed state.
if rt.renderSceneDialog.isOpen():
rt.renderSceneDialog.close()
@ -183,6 +183,7 @@ def set_scene_resolution(width: int, height: int):
rt.renderWidth = width
rt.renderHeight = height
def reset_scene_resolution():
"""Apply the scene resolution from the project definition
@ -205,7 +206,7 @@ def reset_scene_resolution():
set_scene_resolution(width, height)
def get_frame_range() -> dict:
def get_frame_range() -> Union[Dict[str, Any], None]:
"""Get the current assets frame range and handles.
Returns:
@ -249,10 +250,7 @@ def reset_frame_range(fps: bool = True):
frame_range["handleStart"]
)
frame_end_handle = frame_range["frameEnd"] + int(frame_range["handleEnd"])
frange_cmd = (
f"animationRange = interval {frame_start_handle} {frame_end_handle}"
)
rt.execute(frange_cmd)
set_timeline(frame_start_handle, frame_end_handle)
set_render_frame_range(frame_start_handle, frame_end_handle)
@ -282,5 +280,12 @@ def get_max_version():
#(25000, 62, 0, 25, 0, 0, 997, 2023, "")
max_info[7] = max version date
"""
max_info = rt.maxversion()
max_info = rt.MaxVersion()
return max_info[7]
def set_timeline(frameStart, frameEnd):
"""Set frame range for timeline editor in Max
"""
rt.animationRange = rt.interval(frameStart, frameEnd)
return rt.animationRange

View file

@ -3,96 +3,128 @@
# arnold
# https://help.autodesk.com/view/ARNOL/ENU/?guid=arnold_for_3ds_max_ax_maxscript_commands_ax_renderview_commands_html
import os
from pymxs import runtime as rt
from openpype.hosts.max.api.lib import (
get_current_renderer,
get_default_render_folder
)
from openpype.pipeline.context_tools import get_current_project_asset
from openpype.settings import get_project_settings
from openpype.hosts.max.api.lib import get_current_renderer
from openpype.pipeline import legacy_io
from openpype.settings import get_project_settings
class RenderProducts(object):
def __init__(self, project_settings=None):
self._project_settings = project_settings
if not self._project_settings:
self._project_settings = get_project_settings(
legacy_io.Session["AVALON_PROJECT"]
)
self._project_settings = project_settings or get_project_settings(
legacy_io.Session["AVALON_PROJECT"])
def get_beauty(self, container):
render_dir = os.path.dirname(rt.rendOutputFilename)
output_file = os.path.join(render_dir, container)
def render_product(self, container):
folder = rt.maxFilePath
file = rt.maxFileName
folder = folder.replace("\\", "/")
setting = self._project_settings
render_folder = get_default_render_folder(setting)
filename, ext = os.path.splitext(file)
img_fmt = setting["max"]["RenderSettings"]["image_format"] # noqa
output_file = os.path.join(folder,
render_folder,
filename,
start_frame = int(rt.rendStart)
end_frame = int(rt.rendEnd) + 1
return {
"beauty": self.get_expected_beauty(
output_file, start_frame, end_frame, img_fmt
)
}
def get_aovs(self, container):
render_dir = os.path.dirname(rt.rendOutputFilename)
output_file = os.path.join(render_dir,
container)
context = get_current_project_asset()
# TODO: change the frame range follows the current render setting
startFrame = int(rt.rendStart)
endFrame = int(rt.rendEnd) + 1
img_fmt = self._project_settings["max"]["RenderSettings"]["image_format"] # noqa
full_render_list = self.beauty_render_product(output_file,
startFrame,
endFrame,
img_fmt)
setting = self._project_settings
img_fmt = setting["max"]["RenderSettings"]["image_format"] # noqa
start_frame = int(rt.rendStart)
end_frame = int(rt.rendEnd) + 1
renderer_class = get_current_renderer()
renderer = str(renderer_class).split(":")[0]
if renderer == "VUE_File_Renderer":
return full_render_list
render_dict = {}
if renderer in [
"ART_Renderer",
"Redshift_Renderer",
"V_Ray_6_Hotfix_3",
"V_Ray_GPU_6_Hotfix_3",
"Default_Scanline_Renderer",
"Quicksilver_Hardware_Renderer",
]:
render_elem_list = self.render_elements_product(output_file,
startFrame,
endFrame,
img_fmt)
if render_elem_list:
full_render_list.extend(iter(render_elem_list))
return full_render_list
render_name = self.get_render_elements_name()
if render_name:
for name in render_name:
render_dict.update({
name: self.get_expected_render_elements(
output_file, name, start_frame,
end_frame, img_fmt)
})
elif renderer == "Redshift_Renderer":
render_name = self.get_render_elements_name()
if render_name:
rs_aov_files = rt.Execute("renderers.current.separateAovFiles")
# this doesn't work, always returns False
# rs_AovFiles = rt.RedShift_Renderer().separateAovFiles
if img_fmt == "exr" and not rs_aov_files:
for name in render_name:
if name == "RsCryptomatte":
render_dict.update({
name: self.get_expected_render_elements(
output_file, name, start_frame,
end_frame, img_fmt)
})
else:
for name in render_name:
render_dict.update({
name: self.get_expected_render_elements(
output_file, name, start_frame,
end_frame, img_fmt)
})
if renderer == "Arnold":
aov_list = self.arnold_render_product(output_file,
startFrame,
endFrame,
img_fmt)
if aov_list:
full_render_list.extend(iter(aov_list))
return full_render_list
elif renderer == "Arnold":
render_name = self.get_arnold_product_name()
if render_name:
for name in render_name:
render_dict.update({
name: self.get_expected_arnold_product(
output_file, name, start_frame, end_frame, img_fmt)
})
elif renderer in [
"V_Ray_6_Hotfix_3",
"V_Ray_GPU_6_Hotfix_3"
]:
if img_fmt != "exr":
render_name = self.get_render_elements_name()
if render_name:
for name in render_name:
render_dict.update({
name: self.get_expected_render_elements(
output_file, name, start_frame,
end_frame, img_fmt) # noqa
})
def beauty_render_product(self, folder, startFrame, endFrame, fmt):
return render_dict
def get_expected_beauty(self, folder, start_frame, end_frame, fmt):
beauty_frame_range = []
for f in range(startFrame, endFrame):
beauty_output = f"{folder}.{f}.{fmt}"
for f in range(start_frame, end_frame):
frame = "%04d" % f
beauty_output = f"{folder}.{frame}.{fmt}"
beauty_output = beauty_output.replace("\\", "/")
beauty_frame_range.append(beauty_output)
return beauty_frame_range
# TODO: Get the arnold render product
def arnold_render_product(self, folder, startFrame, endFrame, fmt):
"""Get all the Arnold AOVs"""
aovs = []
def get_arnold_product_name(self):
"""Get all the Arnold AOVs name"""
aov_name = []
amw = rt.MaxtoAOps.AOVsManagerWindow()
amw = rt.MaxToAOps.AOVsManagerWindow()
aov_mgr = rt.renderers.current.AOVManager
# Check if there is any aov group set in AOV manager
aov_group_num = len(aov_mgr.drivers)
@ -100,34 +132,51 @@ class RenderProducts(object):
return
for i in range(aov_group_num):
# get the specific AOV group
for aov in aov_mgr.drivers[i].aov_list:
for f in range(startFrame, endFrame):
render_element = f"{folder}_{aov.name}.{f}.{fmt}"
render_element = render_element.replace("\\", "/")
aovs.append(render_element)
aov_name.extend(aov.name for aov in aov_mgr.drivers[i].aov_list)
# close the AOVs manager window
amw.close()
return aovs
return aov_name
def render_elements_product(self, folder, startFrame, endFrame, fmt):
"""Get all the render element output files. """
render_dirname = []
def get_expected_arnold_product(self, folder, name,
start_frame, end_frame, fmt):
"""Get all the expected Arnold AOVs"""
aov_list = []
for f in range(start_frame, end_frame):
frame = "%04d" % f
render_element = f"{folder}_{name}.{frame}.{fmt}"
render_element = render_element.replace("\\", "/")
aov_list.append(render_element)
return aov_list
def get_render_elements_name(self):
"""Get all the render element names for general """
render_name = []
render_elem = rt.maxOps.GetCurRenderElementMgr()
render_elem_num = render_elem.NumRenderElements()
if render_elem_num < 1:
return
# get render elements from the renders
for i in range(render_elem_num):
renderlayer_name = render_elem.GetRenderElement(i)
target, renderpass = str(renderlayer_name).split(":")
if renderlayer_name.enabled:
for f in range(startFrame, endFrame):
render_element = f"{folder}_{renderpass}.{f}.{fmt}"
render_element = render_element.replace("\\", "/")
render_dirname.append(render_element)
target, renderpass = str(renderlayer_name).split(":")
render_name.append(renderpass)
return render_dirname
return render_name
def get_expected_render_elements(self, folder, name,
start_frame, end_frame, fmt):
"""Get all the expected render element output files. """
render_elements = []
for f in range(start_frame, end_frame):
frame = "%04d" % f
render_element = f"{folder}_{name}.{frame}.{fmt}"
render_element = render_element.replace("\\", "/")
render_elements.append(render_element)
return render_elements
def image_format(self):
return self._project_settings["max"]["RenderSettings"]["image_format"] # noqa

View file

@ -6,7 +6,7 @@ from operator import attrgetter
import json
from openpype.host import HostBase, IWorkfileHost, ILoadHost, INewPublisher
from openpype.host import HostBase, IWorkfileHost, ILoadHost, IPublishHost
import pyblish.api
from openpype.pipeline import (
register_creator_plugin_path,
@ -28,7 +28,7 @@ CREATE_PATH = os.path.join(PLUGINS_DIR, "create")
INVENTORY_PATH = os.path.join(PLUGINS_DIR, "inventory")
class MaxHost(HostBase, IWorkfileHost, ILoadHost, INewPublisher):
class MaxHost(HostBase, IWorkfileHost, ILoadHost, IPublishHost):
name = "max"
menu = None

View file

@ -1,15 +1,105 @@
# -*- coding: utf-8 -*-
"""3dsmax specific Avalon/Pyblish plugin definitions."""
from pymxs import runtime as rt
import six
from abc import ABCMeta
from openpype.pipeline import (
CreatorError,
Creator,
CreatedInstance
)
import six
from pymxs import runtime as rt
from openpype.lib import BoolDef
from .lib import imprint, read, lsattr
from openpype.pipeline import CreatedInstance, Creator, CreatorError
from .lib import imprint, lsattr, read
MS_CUSTOM_ATTRIB = """attributes "openPypeData"
(
parameters main rollout:OPparams
(
all_handles type:#maxObjectTab tabSize:0 tabSizeVariable:on
)
rollout OPparams "OP Parameters"
(
listbox list_node "Node References" items:#()
button button_add "Add to Container"
button button_del "Delete from Container"
fn node_to_name the_node =
(
handle = the_node.handle
obj_name = the_node.name
handle_name = obj_name + "<" + handle as string + ">"
return handle_name
)
on button_add pressed do
(
current_selection = selectByName title:"Select Objects to add to
the Container" buttontext:"Add"
if current_selection == undefined then return False
temp_arr = #()
i_node_arr = #()
for c in current_selection do
(
handle_name = node_to_name c
node_ref = NodeTransformMonitor node:c
append temp_arr handle_name
append i_node_arr node_ref
)
all_handles = join i_node_arr all_handles
list_node.items = join temp_arr list_node.items
)
on button_del pressed do
(
current_selection = selectByName title:"Select Objects to remove
from the Container" buttontext:"Remove"
if current_selection == undefined then return False
temp_arr = #()
i_node_arr = #()
new_i_node_arr = #()
new_temp_arr = #()
for c in current_selection do
(
node_ref = NodeTransformMonitor node:c as string
handle_name = node_to_name c
tmp_all_handles = #()
for i in all_handles do
(
tmp = i as string
append tmp_all_handles tmp
)
idx = finditem tmp_all_handles node_ref
if idx do
(
new_i_node_arr = DeleteItem all_handles idx
)
idx = finditem list_node.items handle_name
if idx do
(
new_temp_arr = DeleteItem list_node.items idx
)
)
all_handles = join i_node_arr new_i_node_arr
list_node.items = join temp_arr new_temp_arr
)
on OPparams open do
(
if all_handles.count != 0 then
(
temp_arr = #()
for x in all_handles do
(
handle_name = node_to_name x.node
append temp_arr handle_name
)
list_node.items = temp_arr
)
)
)
)"""
class OpenPypeCreatorError(CreatorError):
@ -20,28 +110,40 @@ class MaxCreatorBase(object):
@staticmethod
def cache_subsets(shared_data):
if shared_data.get("max_cached_subsets") is None:
shared_data["max_cached_subsets"] = {}
cached_instances = lsattr("id", "pyblish.avalon.instance")
for i in cached_instances:
creator_id = rt.getUserProp(i, "creator_identifier")
if creator_id not in shared_data["max_cached_subsets"]:
shared_data["max_cached_subsets"][creator_id] = [i.name]
else:
shared_data[
"max_cached_subsets"][creator_id].append(i.name) # noqa
if shared_data.get("max_cached_subsets") is not None:
return shared_data
shared_data["max_cached_subsets"] = {}
cached_instances = lsattr("id", "pyblish.avalon.instance")
for i in cached_instances:
creator_id = rt.GetUserProp(i, "creator_identifier")
if creator_id not in shared_data["max_cached_subsets"]:
shared_data["max_cached_subsets"][creator_id] = [i.name]
else:
shared_data[
"max_cached_subsets"][creator_id].append(i.name)
return shared_data
@staticmethod
def create_instance_node(node_name: str, parent: str = ""):
parent_node = rt.getNodeByName(parent) if parent else rt.rootScene
if not parent_node:
raise OpenPypeCreatorError(f"Specified parent {parent} not found")
def create_instance_node(node):
"""Create instance node.
container = rt.container(name=node_name)
container.Parent = parent_node
If the supplied node is existing node, it will be used to hold the
instance, otherwise new node of type Dummy will be created.
return container
Args:
node (rt.MXSWrapperBase, str): Node or node name to use.
Returns:
instance
"""
if isinstance(node, str):
node = rt.Container(name=node)
attrs = rt.Execute(MS_CUSTOM_ATTRIB)
rt.custAttributes.add(node.baseObject, attrs)
return node
@six.add_metaclass(ABCMeta)
@ -50,7 +152,7 @@ class MaxCreator(Creator, MaxCreatorBase):
def create(self, subset_name, instance_data, pre_create_data):
if pre_create_data.get("use_selection"):
self.selected_nodes = rt.getCurrentSelection()
self.selected_nodes = rt.GetCurrentSelection()
instance_node = self.create_instance_node(subset_name)
instance_data["instance_node"] = instance_node.name
@ -60,8 +162,16 @@ class MaxCreator(Creator, MaxCreatorBase):
instance_data,
self
)
for node in self.selected_nodes:
node.Parent = instance_node
if pre_create_data.get("use_selection"):
node_list = []
for i in self.selected_nodes:
node_ref = rt.NodeTransformMonitor(node=i)
node_list.append(node_ref)
# Setting the property
rt.setProperty(
instance_node.openPypeData, "all_handles", node_list)
self._add_instance_to_context(instance)
imprint(instance_node.name, instance.data_to_store())
@ -70,10 +180,9 @@ class MaxCreator(Creator, MaxCreatorBase):
def collect_instances(self):
self.cache_subsets(self.collection_shared_data)
for instance in self.collection_shared_data[
"max_cached_subsets"].get(self.identifier, []):
for instance in self.collection_shared_data["max_cached_subsets"].get(self.identifier, []): # noqa
created_instance = CreatedInstance.from_existing(
read(rt.getNodeByName(instance)), self
read(rt.GetNodeByName(instance)), self
)
self._add_instance_to_context(created_instance)
@ -98,12 +207,12 @@ class MaxCreator(Creator, MaxCreatorBase):
"""
for instance in instances:
instance_node = rt.getNodeByName(
instance_node = rt.GetNodeByName(
instance.data.get("instance_node"))
if instance_node:
rt.select(instance_node)
rt.execute(f'for o in selection do for c in o.children do c.parent = undefined') # noqa
rt.delete(instance_node)
count = rt.custAttributes.count(instance_node)
rt.custAttributes.delete(instance_node, count)
rt.Delete(instance_node)
self._remove_instance_from_context(instance)

View file

@ -1,26 +1,11 @@
# -*- coding: utf-8 -*-
"""Creator plugin for creating camera."""
from openpype.hosts.max.api import plugin
from openpype.pipeline import CreatedInstance
class CreateCamera(plugin.MaxCreator):
"""Creator plugin for Camera."""
identifier = "io.openpype.creators.max.camera"
label = "Camera"
family = "camera"
icon = "gear"
def create(self, subset_name, instance_data, pre_create_data):
from pymxs import runtime as rt
sel_obj = list(rt.selection)
instance = super(CreateCamera, self).create(
subset_name,
instance_data,
pre_create_data) # type: CreatedInstance
container = rt.getNodeByName(instance.data.get("instance_node"))
# TODO: Disable "Add to Containers?" Panel
# parent the selected cameras into the container
for obj in sel_obj:
obj.parent = container
# for additional work on the node:
# instance_node = rt.getNodeByName(instance.get("instance_node"))

View file

@ -1,26 +1,11 @@
# -*- coding: utf-8 -*-
"""Creator plugin for creating raw max scene."""
from openpype.hosts.max.api import plugin
from openpype.pipeline import CreatedInstance
class CreateMaxScene(plugin.MaxCreator):
"""Creator plugin for 3ds max scenes."""
identifier = "io.openpype.creators.max.maxScene"
label = "Max Scene"
family = "maxScene"
icon = "gear"
def create(self, subset_name, instance_data, pre_create_data):
from pymxs import runtime as rt
sel_obj = list(rt.selection)
instance = super(CreateMaxScene, self).create(
subset_name,
instance_data,
pre_create_data) # type: CreatedInstance
container = rt.getNodeByName(instance.data.get("instance_node"))
# TODO: Disable "Add to Containers?" Panel
# parent the selected cameras into the container
for obj in sel_obj:
obj.parent = container
# for additional work on the node:
# instance_node = rt.getNodeByName(instance.get("instance_node"))

View file

@ -1,28 +1,11 @@
# -*- coding: utf-8 -*-
"""Creator plugin for model."""
from openpype.hosts.max.api import plugin
from openpype.pipeline import CreatedInstance
class CreateModel(plugin.MaxCreator):
"""Creator plugin for Model."""
identifier = "io.openpype.creators.max.model"
label = "Model"
family = "model"
icon = "gear"
def create(self, subset_name, instance_data, pre_create_data):
from pymxs import runtime as rt
instance = super(CreateModel, self).create(
subset_name,
instance_data,
pre_create_data) # type: CreatedInstance
container = rt.getNodeByName(instance.data.get("instance_node"))
# TODO: Disable "Add to Containers?" Panel
# parent the selected cameras into the container
sel_obj = None
if self.selected_nodes:
sel_obj = list(self.selected_nodes)
for obj in sel_obj:
obj.parent = container
# for additional work on the node:
# instance_node = rt.getNodeByName(instance.get("instance_node"))

View file

@ -1,22 +1,11 @@
# -*- coding: utf-8 -*-
"""Creator plugin for creating pointcache alembics."""
from openpype.hosts.max.api import plugin
from openpype.pipeline import CreatedInstance
class CreatePointCache(plugin.MaxCreator):
"""Creator plugin for Point caches."""
identifier = "io.openpype.creators.max.pointcache"
label = "Point Cache"
family = "pointcache"
icon = "gear"
def create(self, subset_name, instance_data, pre_create_data):
# from pymxs import runtime as rt
_ = super(CreatePointCache, self).create(
subset_name,
instance_data,
pre_create_data) # type: CreatedInstance
# for additional work on the node:
# instance_node = rt.getNodeByName(instance.get("instance_node"))

View file

@ -1,26 +1,11 @@
# -*- coding: utf-8 -*-
"""Creator plugin for creating point cloud."""
from openpype.hosts.max.api import plugin
from openpype.pipeline import CreatedInstance
class CreatePointCloud(plugin.MaxCreator):
"""Creator plugin for Point Clouds."""
identifier = "io.openpype.creators.max.pointcloud"
label = "Point Cloud"
family = "pointcloud"
icon = "gear"
def create(self, subset_name, instance_data, pre_create_data):
from pymxs import runtime as rt
sel_obj = list(rt.selection)
instance = super(CreatePointCloud, self).create(
subset_name,
instance_data,
pre_create_data) # type: CreatedInstance
container = rt.getNodeByName(instance.data.get("instance_node"))
# TODO: Disable "Add to Containers?" Panel
# parent the selected cameras into the container
for obj in sel_obj:
obj.parent = container
# for additional work on the node:
# instance_node = rt.getNodeByName(instance.get("instance_node"))

View file

@ -0,0 +1,11 @@
# -*- coding: utf-8 -*-
"""Creator plugin for creating camera."""
from openpype.hosts.max.api import plugin
from openpype.pipeline import CreatedInstance
class CreateRedshiftProxy(plugin.MaxCreator):
identifier = "io.openpype.creators.max.redshiftproxy"
label = "Redshift Proxy"
family = "redshiftproxy"
icon = "gear"

View file

@ -1,11 +1,12 @@
# -*- coding: utf-8 -*-
"""Creator plugin for creating camera."""
import os
from openpype.hosts.max.api import plugin
from openpype.pipeline import CreatedInstance
from openpype.hosts.max.api.lib_rendersettings import RenderSettings
class CreateRender(plugin.MaxCreator):
"""Creator plugin for Renders."""
identifier = "io.openpype.creators.max.render"
label = "Render"
family = "maxrender"
@ -14,25 +15,18 @@ class CreateRender(plugin.MaxCreator):
def create(self, subset_name, instance_data, pre_create_data):
from pymxs import runtime as rt
sel_obj = list(rt.selection)
file = rt.maxFileName
filename, _ = os.path.splitext(file)
instance_data["AssetName"] = filename
instance = super(CreateRender, self).create(
subset_name,
instance_data,
pre_create_data) # type: CreatedInstance
pre_create_data)
container_name = instance.data.get("instance_node")
container = rt.getNodeByName(container_name)
# TODO: Disable "Add to Containers?" Panel
# parent the selected cameras into the container
for obj in sel_obj:
obj.parent = container
# for additional work on the node:
# instance_node = rt.getNodeByName(instance.get("instance_node"))
# make sure the render dialog is closed
# for the update of resolution
# Changing the Render Setup dialog settings should be done
# with the actual Render Setup dialog in a closed state.
# set viewport camera for rendering(mandatory for deadline)
RenderSettings().set_render_camera(sel_obj)
sel_obj = self.selected_nodes
if sel_obj:
# set viewport camera for rendering(mandatory for deadline)
RenderSettings(self.project_settings).set_render_camera(sel_obj)
# set output paths for rendering(mandatory for deadline)
RenderSettings().render_output(container_name)

View file

@ -1,14 +1,12 @@
import os
from openpype.pipeline import (
load,
get_representation_path
)
from openpype.hosts.max.api import lib, maintained_selection
from openpype.hosts.max.api.pipeline import containerise
from openpype.hosts.max.api import lib
from openpype.pipeline import get_representation_path, load
class FbxLoader(load.LoaderPlugin):
"""Fbx Loader"""
"""Fbx Loader."""
families = ["camera"]
representations = ["fbx"]
@ -24,17 +22,17 @@ class FbxLoader(load.LoaderPlugin):
rt.FBXImporterSetParam("Camera", True)
rt.FBXImporterSetParam("AxisConversionMethod", True)
rt.FBXImporterSetParam("Preserveinstances", True)
rt.importFile(
rt.ImportFile(
filepath,
rt.name("noPrompt"),
using=rt.FBXIMP)
container = rt.getNodeByName(f"{name}")
container = rt.GetNodeByName(f"{name}")
if not container:
container = rt.container()
container = rt.Container()
container.name = f"{name}"
for selection in rt.getCurrentSelection():
for selection in rt.GetCurrentSelection():
selection.Parent = container
return containerise(
@ -44,18 +42,33 @@ class FbxLoader(load.LoaderPlugin):
from pymxs import runtime as rt
path = get_representation_path(representation)
node = rt.getNodeByName(container["instance_node"])
node = rt.GetNodeByName(container["instance_node"])
rt.Select(node.Children)
fbx_reimport_cmd = (
f"""
fbx_objects = self.get_container_children(node)
for fbx_object in fbx_objects:
fbx_object.source = path
FBXImporterSetParam "Animation" true
FBXImporterSetParam "Cameras" true
FBXImporterSetParam "AxisConversionMethod" true
FbxExporterSetParam "UpAxis" "Y"
FbxExporterSetParam "Preserveinstances" true
importFile @"{path}" #noPrompt using:FBXIMP
""")
rt.Execute(fbx_reimport_cmd)
with maintained_selection():
rt.Select(node)
lib.imprint(container["instance_node"], {
"representation": str(representation["_id"])
})
def switch(self, container, representation):
self.update(container, representation)
def remove(self, container):
from pymxs import runtime as rt
node = rt.getNodeByName(container["instance_node"])
rt.delete(node)
node = rt.GetNodeByName(container["instance_node"])
rt.Delete(node)

View file

@ -1,13 +1,12 @@
import os
from openpype.pipeline import (
load, get_representation_path
)
from openpype.hosts.max.api.pipeline import containerise
from openpype.hosts.max.api import lib
from openpype.hosts.max.api.pipeline import containerise
from openpype.pipeline import get_representation_path, load
class MaxSceneLoader(load.LoaderPlugin):
"""Max Scene Loader"""
"""Max Scene Loader."""
families = ["camera",
"maxScene",
@ -23,23 +22,11 @@ class MaxSceneLoader(load.LoaderPlugin):
path = os.path.normpath(self.fname)
# import the max scene by using "merge file"
path = path.replace('\\', '/')
merge_before = {
c for c in rt.rootNode.Children
if rt.classOf(c) == rt.Container
}
rt.mergeMaxFile(path)
merge_after = {
c for c in rt.rootNode.Children
if rt.classOf(c) == rt.Container
}
max_containers = merge_after.difference(merge_before)
if len(max_containers) != 1:
self.log.error("Something failed when loading.")
max_container = max_containers.pop()
rt.MergeMaxFile(path)
max_objects = rt.getLastMergedNodes()
max_container = rt.Container(name=f"{name}")
for max_object in max_objects:
max_object.Parent = max_container
return containerise(
name, [max_container], context, loader=self.__class__.__name__)
@ -48,17 +35,27 @@ class MaxSceneLoader(load.LoaderPlugin):
from pymxs import runtime as rt
path = get_representation_path(representation)
node = rt.getNodeByName(container["instance_node"])
max_objects = node.Children
node_name = container["instance_node"]
rt.MergeMaxFile(path,
rt.Name("noRedraw"),
rt.Name("deleteOldDups"),
rt.Name("useSceneMtlDups"))
max_objects = rt.getLastMergedNodes()
container_node = rt.GetNodeByName(node_name)
for max_object in max_objects:
max_object.source = path
max_object.Parent = container_node
lib.imprint(container["instance_node"], {
"representation": str(representation["_id"])
})
def switch(self, container, representation):
self.update(container, representation)
def remove(self, container):
from pymxs import runtime as rt
node = rt.getNodeByName(container["instance_node"])
rt.delete(node)
node = rt.GetNodeByName(container["instance_node"])
rt.Delete(node)

View file

@ -1,8 +1,5 @@
import os
from openpype.pipeline import (
load, get_representation_path
)
from openpype.pipeline import load, get_representation_path
from openpype.hosts.max.api.pipeline import containerise
from openpype.hosts.max.api import lib
from openpype.hosts.max.api.lib import maintained_selection
@ -24,24 +21,20 @@ class ModelAbcLoader(load.LoaderPlugin):
file_path = os.path.normpath(self.fname)
abc_before = {
c for c in rt.rootNode.Children
c
for c in rt.rootNode.Children
if rt.classOf(c) == rt.AlembicContainer
}
abc_import_cmd = (f"""
AlembicImport.ImportToRoot = false
AlembicImport.CustomAttributes = true
AlembicImport.UVs = true
AlembicImport.VertexColors = true
importFile @"{file_path}" #noPrompt
""")
self.log.debug(f"Executing command: {abc_import_cmd}")
rt.execute(abc_import_cmd)
rt.AlembicImport.ImportToRoot = False
rt.AlembicImport.CustomAttributes = True
rt.AlembicImport.UVs = True
rt.AlembicImport.VertexColors = True
rt.importFile(file_path, rt.name("noPrompt"))
abc_after = {
c for c in rt.rootNode.Children
c
for c in rt.rootNode.Children
if rt.classOf(c) == rt.AlembicContainer
}
@ -54,31 +47,34 @@ importFile @"{file_path}" #noPrompt
abc_container = abc_containers.pop()
return containerise(
name, [abc_container], context, loader=self.__class__.__name__)
name, [abc_container], context, loader=self.__class__.__name__
)
def update(self, container, representation):
from pymxs import runtime as rt
path = get_representation_path(representation)
node = rt.getNodeByName(container["instance_node"])
rt.select(node.Children)
for alembic in rt.selection:
abc = rt.getNodeByName(alembic.name)
rt.select(abc.Children)
for abc_con in rt.selection:
container = rt.getNodeByName(abc_con.name)
path = get_representation_path(representation)
node = rt.GetNodeByName(container["instance_node"])
rt.Select(node.Children)
for alembic in rt.Selection:
abc = rt.GetNodeByName(alembic.name)
rt.Select(abc.Children)
for abc_con in rt.Selection:
container = rt.GetNodeByName(abc_con.name)
container.source = path
rt.select(container.Children)
for abc_obj in rt.selection:
alembic_obj = rt.getNodeByName(abc_obj.name)
rt.Select(container.Children)
for abc_obj in rt.Selection:
alembic_obj = rt.GetNodeByName(abc_obj.name)
alembic_obj.source = path
with maintained_selection():
rt.select(node)
rt.Select(node)
lib.imprint(container["instance_node"], {
"representation": str(representation["_id"])
})
lib.imprint(
container["instance_node"],
{"representation": str(representation["_id"])},
)
def switch(self, container, representation):
self.update(container, representation)
@ -86,8 +82,8 @@ importFile @"{file_path}" #noPrompt
def remove(self, container):
from pymxs import runtime as rt
node = rt.getNodeByName(container["instance_node"])
rt.delete(node)
node = rt.GetNodeByName(container["instance_node"])
rt.Delete(node)
@staticmethod
def get_container_children(parent, type_name):
@ -102,7 +98,7 @@ importFile @"{file_path}" #noPrompt
filtered = []
for child in list_children(parent):
class_type = str(rt.classOf(child.baseObject))
class_type = str(rt.ClassOf(child.baseObject))
if class_type == type_name:
filtered.append(child)

View file

@ -1,15 +1,12 @@
import os
from openpype.pipeline import (
load,
get_representation_path
)
from openpype.pipeline import load, get_representation_path
from openpype.hosts.max.api.pipeline import containerise
from openpype.hosts.max.api import lib
from openpype.hosts.max.api.lib import maintained_selection
class FbxModelLoader(load.LoaderPlugin):
"""Fbx Model Loader"""
"""Fbx Model Loader."""
families = ["model"]
representations = ["fbx"]
@ -24,46 +21,40 @@ class FbxModelLoader(load.LoaderPlugin):
rt.FBXImporterSetParam("Animation", False)
rt.FBXImporterSetParam("Cameras", False)
rt.FBXImporterSetParam("Preserveinstances", True)
rt.importFile(
filepath,
rt.name("noPrompt"),
using=rt.FBXIMP)
rt.importFile(filepath, rt.name("noPrompt"), using=rt.FBXIMP)
container = rt.getNodeByName(f"{name}")
container = rt.GetNodeByName(name)
if not container:
container = rt.container()
container.name = f"{name}"
container = rt.Container()
container.name = name
for selection in rt.getCurrentSelection():
for selection in rt.GetCurrentSelection():
selection.Parent = container
return containerise(
name, [container], context, loader=self.__class__.__name__)
name, [container], context, loader=self.__class__.__name__
)
def update(self, container, representation):
from pymxs import runtime as rt
path = get_representation_path(representation)
node = rt.getNodeByName(container["instance_node"])
rt.select(node.Children)
fbx_reimport_cmd = (
f"""
FBXImporterSetParam "Animation" false
FBXImporterSetParam "Cameras" false
FBXImporterSetParam "AxisConversionMethod" true
FbxExporterSetParam "UpAxis" "Y"
FbxExporterSetParam "Preserveinstances" true
importFile @"{path}" #noPrompt using:FBXIMP
""")
rt.execute(fbx_reimport_cmd)
rt.FBXImporterSetParam("Animation", False)
rt.FBXImporterSetParam("Cameras", False)
rt.FBXImporterSetParam("AxisConversionMethod", True)
rt.FBXImporterSetParam("UpAxis", "Y")
rt.FBXImporterSetParam("Preserveinstances", True)
rt.importFile(path, rt.name("noPrompt"), using=rt.FBXIMP)
with maintained_selection():
rt.select(node)
rt.Select(node)
lib.imprint(container["instance_node"], {
"representation": str(representation["_id"])
})
lib.imprint(
container["instance_node"],
{"representation": str(representation["_id"])},
)
def switch(self, container, representation):
self.update(container, representation)
@ -71,5 +62,5 @@ importFile @"{path}" #noPrompt using:FBXIMP
def remove(self, container):
from pymxs import runtime as rt
node = rt.getNodeByName(container["instance_node"])
rt.delete(node)
node = rt.GetNodeByName(container["instance_node"])
rt.Delete(node)

View file

@ -1,15 +1,13 @@
import os
from openpype.pipeline import (
load,
get_representation_path
)
from openpype.hosts.max.api.pipeline import containerise
from openpype.hosts.max.api import lib
from openpype.hosts.max.api.lib import maintained_selection
from openpype.hosts.max.api.pipeline import containerise
from openpype.pipeline import get_representation_path, load
class ObjLoader(load.LoaderPlugin):
"""Obj Loader"""
"""Obj Loader."""
families = ["model"]
representations = ["obj"]
@ -21,18 +19,18 @@ class ObjLoader(load.LoaderPlugin):
from pymxs import runtime as rt
filepath = os.path.normpath(self.fname)
self.log.debug(f"Executing command to import..")
self.log.debug("Executing command to import..")
rt.execute(f'importFile @"{filepath}" #noPrompt using:ObjImp')
rt.Execute(f'importFile @"{filepath}" #noPrompt using:ObjImp')
# create "missing" container for obj import
container = rt.container()
container.name = f"{name}"
container = rt.Container()
container.name = name
# get current selection
for selection in rt.getCurrentSelection():
for selection in rt.GetCurrentSelection():
selection.Parent = container
asset = rt.getNodeByName(f"{name}")
asset = rt.GetNodeByName(name)
return containerise(
name, [asset], context, loader=self.__class__.__name__)
@ -42,27 +40,30 @@ class ObjLoader(load.LoaderPlugin):
path = get_representation_path(representation)
node_name = container["instance_node"]
node = rt.getNodeByName(node_name)
node = rt.GetNodeByName(node_name)
instance_name, _ = node_name.split("_")
container = rt.getNodeByName(instance_name)
for n in container.Children:
rt.delete(n)
container = rt.GetNodeByName(instance_name)
for child in container.Children:
rt.Delete(child)
rt.execute(f'importFile @"{path}" #noPrompt using:ObjImp')
rt.Execute(f'importFile @"{path}" #noPrompt using:ObjImp')
# get current selection
for selection in rt.getCurrentSelection():
for selection in rt.GetCurrentSelection():
selection.Parent = container
with maintained_selection():
rt.select(node)
rt.Select(node)
lib.imprint(node_name, {
"representation": str(representation["_id"])
})
def switch(self, container, representation):
self.update(container, representation)
def remove(self, container):
from pymxs import runtime as rt
node = rt.getNodeByName(container["instance_node"])
rt.delete(node)
node = rt.GetNodeByName(container["instance_node"])
rt.Delete(node)

View file

@ -1,10 +1,9 @@
import os
from openpype.pipeline import (
load, get_representation_path
)
from openpype.hosts.max.api.pipeline import containerise
from openpype.hosts.max.api import lib
from openpype.hosts.max.api.lib import maintained_selection
from openpype.hosts.max.api.pipeline import containerise
from openpype.pipeline import get_representation_path, load
class ModelUSDLoader(load.LoaderPlugin):
@ -19,6 +18,7 @@ class ModelUSDLoader(load.LoaderPlugin):
def load(self, context, name=None, namespace=None, data=None):
from pymxs import runtime as rt
# asset_filepath
filepath = os.path.normpath(self.fname)
import_options = rt.USDImporter.CreateOptions()
@ -27,11 +27,11 @@ class ModelUSDLoader(load.LoaderPlugin):
log_filepath = filepath.replace(ext, "txt")
rt.LogPath = log_filepath
rt.LogLevel = rt.name('info')
rt.LogLevel = rt.Name("info")
rt.USDImporter.importFile(filepath,
importOptions=import_options)
asset = rt.getNodeByName(f"{name}")
asset = rt.GetNodeByName(name)
return containerise(
name, [asset], context, loader=self.__class__.__name__)
@ -41,11 +41,11 @@ class ModelUSDLoader(load.LoaderPlugin):
path = get_representation_path(representation)
node_name = container["instance_node"]
node = rt.getNodeByName(node_name)
node = rt.GetNodeByName(node_name)
for n in node.Children:
for r in n.Children:
rt.delete(r)
rt.delete(n)
rt.Delete(r)
rt.Delete(n)
instance_name, _ = node_name.split("_")
import_options = rt.USDImporter.CreateOptions()
@ -54,15 +54,15 @@ class ModelUSDLoader(load.LoaderPlugin):
log_filepath = path.replace(ext, "txt")
rt.LogPath = log_filepath
rt.LogLevel = rt.name('info')
rt.LogLevel = rt.Name("info")
rt.USDImporter.importFile(path,
importOptions=import_options)
asset = rt.getNodeByName(f"{instance_name}")
asset = rt.GetNodeByName(instance_name)
asset.Parent = node
with maintained_selection():
rt.select(node)
rt.Select(node)
lib.imprint(node_name, {
"representation": str(representation["_id"])
@ -74,5 +74,5 @@ class ModelUSDLoader(load.LoaderPlugin):
def remove(self, container):
from pymxs import runtime as rt
node = rt.getNodeByName(container["instance_node"])
rt.delete(node)
node = rt.GetNodeByName(container["instance_node"])
rt.Delete(node)

View file

@ -5,19 +5,15 @@ Because of limited api, alembics can be only loaded, but not easily updated.
"""
import os
from openpype.pipeline import (
load, get_representation_path
)
from openpype.pipeline import load, get_representation_path
from openpype.hosts.max.api import lib, maintained_selection
from openpype.hosts.max.api.pipeline import containerise
from openpype.hosts.max.api import lib
class AbcLoader(load.LoaderPlugin):
"""Alembic loader."""
families = ["camera",
"animation",
"pointcache"]
families = ["camera", "animation", "pointcache"]
label = "Load Alembic"
representations = ["abc"]
order = -10
@ -30,21 +26,17 @@ class AbcLoader(load.LoaderPlugin):
file_path = os.path.normpath(self.fname)
abc_before = {
c for c in rt.rootNode.Children
c
for c in rt.rootNode.Children
if rt.classOf(c) == rt.AlembicContainer
}
abc_export_cmd = (f"""
AlembicImport.ImportToRoot = false
importFile @"{file_path}" #noPrompt
""")
self.log.debug(f"Executing command: {abc_export_cmd}")
rt.execute(abc_export_cmd)
rt.AlembicImport.ImportToRoot = False
rt.importFile(file_path, rt.name("noPrompt"))
abc_after = {
c for c in rt.rootNode.Children
c
for c in rt.rootNode.Children
if rt.classOf(c) == rt.AlembicContainer
}
@ -56,22 +48,42 @@ importFile @"{file_path}" #noPrompt
abc_container = abc_containers.pop()
for abc in rt.GetCurrentSelection():
for cam_shape in abc.Children:
cam_shape.playbackType = 2
return containerise(
name, [abc_container], context, loader=self.__class__.__name__)
name, [abc_container], context, loader=self.__class__.__name__
)
def update(self, container, representation):
from pymxs import runtime as rt
path = get_representation_path(representation)
node = rt.getNodeByName(container["instance_node"])
node = rt.GetNodeByName(container["instance_node"])
alembic_objects = self.get_container_children(node, "AlembicObject")
for alembic_object in alembic_objects:
alembic_object.source = path
lib.imprint(container["instance_node"], {
"representation": str(representation["_id"])
})
lib.imprint(
container["instance_node"],
{"representation": str(representation["_id"])},
)
with maintained_selection():
rt.Select(node.Children)
for alembic in rt.Selection:
abc = rt.GetNodeByName(alembic.name)
rt.Select(abc.Children)
for abc_con in rt.Selection:
container = rt.GetNodeByName(abc_con.name)
container.source = path
rt.Select(container.Children)
for abc_obj in rt.Selection:
alembic_obj = rt.GetNodeByName(abc_obj.name)
alembic_obj.source = path
def switch(self, container, representation):
self.update(container, representation)
@ -79,8 +91,8 @@ importFile @"{file_path}" #noPrompt
def remove(self, container):
from pymxs import runtime as rt
node = rt.getNodeByName(container["instance_node"])
rt.delete(node)
node = rt.GetNodeByName(container["instance_node"])
rt.Delete(node)
@staticmethod
def get_container_children(parent, type_name):

View file

@ -1,13 +1,12 @@
import os
from openpype.pipeline import (
load, get_representation_path
)
from openpype.hosts.max.api import lib, maintained_selection
from openpype.hosts.max.api.pipeline import containerise
from openpype.hosts.max.api import lib
from openpype.pipeline import get_representation_path, load
class PointCloudLoader(load.LoaderPlugin):
"""Point Cloud Loader"""
"""Point Cloud Loader."""
families = ["pointcloud"]
representations = ["prt"]
@ -23,7 +22,7 @@ class PointCloudLoader(load.LoaderPlugin):
obj = rt.tyCache()
obj.filename = filepath
prt_container = rt.getNodeByName(f"{obj.name}")
prt_container = rt.GetNodeByName(obj.name)
return containerise(
name, [prt_container], context, loader=self.__class__.__name__)
@ -33,19 +32,23 @@ class PointCloudLoader(load.LoaderPlugin):
from pymxs import runtime as rt
path = get_representation_path(representation)
node = rt.getNodeByName(container["instance_node"])
node = rt.GetNodeByName(container["instance_node"])
with maintained_selection():
rt.Select(node.Children)
for prt in rt.Selection:
prt_object = rt.GetNodeByName(prt.name)
prt_object.filename = path
prt_objects = self.get_container_children(node)
for prt_object in prt_objects:
prt_object.source = path
lib.imprint(container["instance_node"], {
"representation": str(representation["_id"])
})
lib.imprint(container["instance_node"], {
"representation": str(representation["_id"])
})
def switch(self, container, representation):
self.update(container, representation)
def remove(self, container):
"""remove the container"""
from pymxs import runtime as rt
node = rt.getNodeByName(container["instance_node"])
rt.delete(node)
node = rt.GetNodeByName(container["instance_node"])
rt.Delete(node)

View file

@ -0,0 +1,63 @@
import os
import clique
from openpype.pipeline import (
load,
get_representation_path
)
from openpype.hosts.max.api.pipeline import containerise
from openpype.hosts.max.api import lib
class RedshiftProxyLoader(load.LoaderPlugin):
"""Load rs files with Redshift Proxy"""
label = "Load Redshift Proxy"
families = ["redshiftproxy"]
representations = ["rs"]
order = -9
icon = "code-fork"
color = "white"
def load(self, context, name=None, namespace=None, data=None):
from pymxs import runtime as rt
filepath = self.filepath_from_context(context)
rs_proxy = rt.RedshiftProxy()
rs_proxy.file = filepath
files_in_folder = os.listdir(os.path.dirname(filepath))
collections, remainder = clique.assemble(files_in_folder)
if collections:
rs_proxy.is_sequence = True
container = rt.container()
container.name = name
rs_proxy.Parent = container
asset = rt.getNodeByName(name)
return containerise(
name, [asset], context, loader=self.__class__.__name__)
def update(self, container, representation):
from pymxs import runtime as rt
path = get_representation_path(representation)
node = rt.getNodeByName(container["instance_node"])
for children in node.Children:
children_node = rt.getNodeByName(children.name)
for proxy in children_node.Children:
proxy.file = path
lib.imprint(container["instance_node"], {
"representation": str(representation["_id"])
})
def switch(self, container, representation):
self.update(container, representation)
def remove(self, container):
from pymxs import runtime as rt
node = rt.getNodeByName(container["instance_node"])
rt.delete(node)

View file

@ -0,0 +1,22 @@
# -*- coding: utf-8 -*-
"""Collect instance members."""
import pyblish.api
from pymxs import runtime as rt
class CollectMembers(pyblish.api.InstancePlugin):
"""Collect Set Members."""
order = pyblish.api.CollectorOrder + 0.01
label = "Collect Instance Members"
hosts = ['max']
def process(self, instance):
if instance.data.get("instance_node"):
container = rt.GetNodeByName(instance.data["instance_node"])
instance.data["members"] = [
member.node for member
in container.openPypeData.all_handles
]
self.log.debug("{}".format(instance.data["members"]))

View file

@ -5,7 +5,8 @@ import pyblish.api
from pymxs import runtime as rt
from openpype.pipeline import get_current_asset_name
from openpype.hosts.max.api.lib import get_max_version
from openpype.hosts.max.api import colorspace
from openpype.hosts.max.api.lib import get_max_version, get_current_renderer
from openpype.hosts.max.api.lib_renderproducts import RenderProducts
from openpype.client import get_last_version_by_subset_name
@ -28,8 +29,16 @@ class CollectRender(pyblish.api.InstancePlugin):
context.data['currentFile'] = current_file
asset = get_current_asset_name()
render_layer_files = RenderProducts().render_product(instance.name)
files_by_aov = RenderProducts().get_beauty(instance.name)
folder = folder.replace("\\", "/")
aovs = RenderProducts().get_aovs(instance.name)
files_by_aov.update(aovs)
if "expectedFiles" not in instance.data:
instance.data["expectedFiles"] = list()
instance.data["files"] = list()
instance.data["expectedFiles"].append(files_by_aov)
instance.data["files"].append(files_by_aov)
img_format = RenderProducts().image_format()
project_name = context.data["projectName"]
@ -38,7 +47,6 @@ class CollectRender(pyblish.api.InstancePlugin):
version_doc = get_last_version_by_subset_name(project_name,
instance.name,
asset_id)
self.log.debug("version_doc: {0}".format(version_doc))
version_int = 1
if version_doc:
@ -46,22 +54,42 @@ class CollectRender(pyblish.api.InstancePlugin):
self.log.debug(f"Setting {version_int} to context.")
context.data["version"] = version_int
# setup the plugin as 3dsmax for the internal renderer
# OCIO config not support in
# most of the 3dsmax renderers
# so this is currently hard coded
# TODO: add options for redshift/vray ocio config
instance.data["colorspaceConfig"] = ""
instance.data["colorspaceDisplay"] = "sRGB"
instance.data["colorspaceView"] = "ACES 1.0 SDR-video"
instance.data["renderProducts"] = colorspace.ARenderProduct()
instance.data["publishJobState"] = "Suspended"
instance.data["attachTo"] = []
renderer_class = get_current_renderer()
renderer = str(renderer_class).split(":")[0]
# also need to get the render dir for conversion
data = {
"subset": instance.name,
"asset": asset,
"subset": str(instance.name),
"publish": True,
"maxversion": str(get_max_version()),
"imageFormat": img_format,
"family": 'maxrender',
"families": ['maxrender'],
"renderer": renderer,
"source": filepath,
"expectedFiles": render_layer_files,
"plugin": "3dsmax",
"frameStart": int(rt.rendStart),
"frameEnd": int(rt.rendEnd),
"version": version_int,
"farm": True
}
self.log.info("data: {0}".format(data))
instance.data.update(data)
# TODO: this should be unified with maya and its "multipart" flag
# on instance.
if renderer == "Redshift_Renderer":
instance.data.update(
{"separateAovFiles": rt.Execute(
"renderers.current.separateAovFiles")})
self.log.info("data: {0}".format(data))

View file

@ -1,14 +1,14 @@
import os
import pyblish.api
from openpype.pipeline import publish, OptionalPyblishPluginMixin
from pymxs import runtime as rt
from openpype.hosts.max.api import maintained_selection, get_all_children
from openpype.hosts.max.api import maintained_selection
from openpype.pipeline import OptionalPyblishPluginMixin, publish
class ExtractCameraAlembic(publish.Extractor, OptionalPyblishPluginMixin):
"""
Extract Camera with AlembicExport
"""
"""Extract Camera with AlembicExport."""
order = pyblish.api.ExtractorOrder - 0.1
label = "Extract Alembic Camera"
@ -31,20 +31,21 @@ class ExtractCameraAlembic(publish.Extractor, OptionalPyblishPluginMixin):
path = os.path.join(stagingdir, filename)
# We run the render
self.log.info("Writing alembic '%s' to '%s'" % (filename, stagingdir))
self.log.info(f"Writing alembic '{filename}' to '{stagingdir}'")
rt.AlembicExport.ArchiveType = rt.name("ogawa")
rt.AlembicExport.CoordinateSystem = rt.name("maya")
rt.AlembicExport.ArchiveType = rt.Name("ogawa")
rt.AlembicExport.CoordinateSystem = rt.Name("maya")
rt.AlembicExport.StartFrame = start
rt.AlembicExport.EndFrame = end
rt.AlembicExport.CustomAttributes = True
with maintained_selection():
# select and export
rt.select(get_all_children(rt.getNodeByName(container)))
rt.exportFile(
node_list = instance.data["members"]
rt.Select(node_list)
rt.ExportFile(
path,
rt.name("noPrompt"),
rt.Name("noPrompt"),
selectedOnly=True,
using=rt.AlembicExport,
)
@ -58,6 +59,8 @@ class ExtractCameraAlembic(publish.Extractor, OptionalPyblishPluginMixin):
"ext": "abc",
"files": filename,
"stagingDir": stagingdir,
"frameStart": start,
"frameEnd": end,
}
instance.data["representations"].append(representation)
self.log.info("Extracted instance '%s' to: %s" % (instance.name, path))
self.log.info(f"Extracted instance '{instance.name}' to: {path}")

View file

@ -1,14 +1,14 @@
import os
import pyblish.api
from openpype.pipeline import publish, OptionalPyblishPluginMixin
from pymxs import runtime as rt
from openpype.hosts.max.api import maintained_selection, get_all_children
from openpype.hosts.max.api import maintained_selection
from openpype.pipeline import OptionalPyblishPluginMixin, publish
class ExtractCameraFbx(publish.Extractor, OptionalPyblishPluginMixin):
"""
Extract Camera with FbxExporter
"""
"""Extract Camera with FbxExporter."""
order = pyblish.api.ExtractorOrder - 0.2
label = "Extract Fbx Camera"
@ -26,7 +26,7 @@ class ExtractCameraFbx(publish.Extractor, OptionalPyblishPluginMixin):
filename = "{name}.fbx".format(**instance.data)
filepath = os.path.join(stagingdir, filename)
self.log.info("Writing fbx file '%s' to '%s'" % (filename, filepath))
self.log.info(f"Writing fbx file '{filename}' to '{filepath}'")
rt.FBXExporterSetParam("Animation", True)
rt.FBXExporterSetParam("Cameras", True)
@ -36,10 +36,11 @@ class ExtractCameraFbx(publish.Extractor, OptionalPyblishPluginMixin):
with maintained_selection():
# select and export
rt.select(get_all_children(rt.getNodeByName(container)))
rt.exportFile(
node_list = instance.data["members"]
rt.Select(node_list)
rt.ExportFile(
filepath,
rt.name("noPrompt"),
rt.Name("noPrompt"),
selectedOnly=True,
using=rt.FBXEXP,
)
@ -55,6 +56,4 @@ class ExtractCameraFbx(publish.Extractor, OptionalPyblishPluginMixin):
"stagingDir": stagingdir,
}
instance.data["representations"].append(representation)
self.log.info(
"Extracted instance '%s' to: %s" % (instance.name, filepath)
)
self.log.info(f"Extracted instance '{instance.name}' to: {filepath}")

View file

@ -2,7 +2,6 @@ import os
import pyblish.api
from openpype.pipeline import publish, OptionalPyblishPluginMixin
from pymxs import runtime as rt
from openpype.hosts.max.api import get_all_children
class ExtractMaxSceneRaw(publish.Extractor, OptionalPyblishPluginMixin):
@ -33,7 +32,7 @@ class ExtractMaxSceneRaw(publish.Extractor, OptionalPyblishPluginMixin):
if "representations" not in instance.data:
instance.data["representations"] = []
nodes = get_all_children(rt.getNodeByName(container))
nodes = instance.data["members"]
rt.saveNodes(nodes, max_path, quiet=True)
self.log.info("Performing Extraction ...")

View file

@ -2,7 +2,7 @@ import os
import pyblish.api
from openpype.pipeline import publish, OptionalPyblishPluginMixin
from pymxs import runtime as rt
from openpype.hosts.max.api import maintained_selection, get_all_children
from openpype.hosts.max.api import maintained_selection
class ExtractModel(publish.Extractor, OptionalPyblishPluginMixin):
@ -40,7 +40,8 @@ class ExtractModel(publish.Extractor, OptionalPyblishPluginMixin):
with maintained_selection():
# select and export
rt.select(get_all_children(rt.getNodeByName(container)))
node_list = instance.data["members"]
rt.Select(node_list)
rt.exportFile(
filepath,
rt.name("noPrompt"),

View file

@ -2,7 +2,7 @@ import os
import pyblish.api
from openpype.pipeline import publish, OptionalPyblishPluginMixin
from pymxs import runtime as rt
from openpype.hosts.max.api import maintained_selection, get_all_children
from openpype.hosts.max.api import maintained_selection
class ExtractModelFbx(publish.Extractor, OptionalPyblishPluginMixin):
@ -22,6 +22,7 @@ class ExtractModelFbx(publish.Extractor, OptionalPyblishPluginMixin):
container = instance.data["instance_node"]
self.log.info("Extracting Geometry ...")
stagingdir = self.staging_dir(instance)
@ -39,7 +40,8 @@ class ExtractModelFbx(publish.Extractor, OptionalPyblishPluginMixin):
with maintained_selection():
# select and export
rt.select(get_all_children(rt.getNodeByName(container)))
node_list = instance.data["members"]
rt.Select(node_list)
rt.exportFile(
filepath,
rt.name("noPrompt"),

View file

@ -2,7 +2,7 @@ import os
import pyblish.api
from openpype.pipeline import publish, OptionalPyblishPluginMixin
from pymxs import runtime as rt
from openpype.hosts.max.api import maintained_selection, get_all_children
from openpype.hosts.max.api import maintained_selection
class ExtractModelObj(publish.Extractor, OptionalPyblishPluginMixin):
@ -31,7 +31,8 @@ class ExtractModelObj(publish.Extractor, OptionalPyblishPluginMixin):
with maintained_selection():
# select and export
rt.select(get_all_children(rt.getNodeByName(container)))
node_list = instance.data["members"]
rt.Select(node_list)
rt.exportFile(
filepath,
rt.name("noPrompt"),

View file

@ -1,20 +1,15 @@
import os
import pyblish.api
from openpype.pipeline import (
publish,
OptionalPyblishPluginMixin
)
from pymxs import runtime as rt
from openpype.hosts.max.api import (
maintained_selection
)
from openpype.hosts.max.api import maintained_selection
from openpype.pipeline import OptionalPyblishPluginMixin, publish
class ExtractModelUSD(publish.Extractor,
OptionalPyblishPluginMixin):
"""
Extract Geometry in USDA Format
"""
"""Extract Geometry in USDA Format."""
order = pyblish.api.ExtractorOrder - 0.05
label = "Extract Geometry (USD)"
@ -26,31 +21,28 @@ class ExtractModelUSD(publish.Extractor,
if not self.is_active(instance.data):
return
container = instance.data["instance_node"]
self.log.info("Extracting Geometry ...")
stagingdir = self.staging_dir(instance)
asset_filename = "{name}.usda".format(**instance.data)
asset_filepath = os.path.join(stagingdir,
asset_filename)
self.log.info("Writing USD '%s' to '%s'" % (asset_filepath,
stagingdir))
self.log.info(f"Writing USD '{asset_filepath}' to '{stagingdir}'")
log_filename = "{name}.txt".format(**instance.data)
log_filepath = os.path.join(stagingdir,
log_filename)
self.log.info("Writing log '%s' to '%s'" % (log_filepath,
stagingdir))
self.log.info(f"Writing log '{log_filepath}' to '{stagingdir}'")
# get the nodes which need to be exported
export_options = self.get_export_options(log_filepath)
with maintained_selection():
# select and export
node_list = self.get_node_list(container)
node_list = instance.data["members"]
rt.Select(node_list)
rt.USDExporter.ExportFile(asset_filepath,
exportOptions=export_options,
contentSource=rt.name("selected"),
contentSource=rt.Name("selected"),
nodeList=node_list)
self.log.info("Performing Extraction ...")
@ -73,25 +65,11 @@ class ExtractModelUSD(publish.Extractor,
}
instance.data["representations"].append(log_representation)
self.log.info("Extracted instance '%s' to: %s" % (instance.name,
asset_filepath))
self.log.info(
f"Extracted instance '{instance.name}' to: {asset_filepath}")
def get_node_list(self, container):
"""
Get the target nodes which are
the children of the container
"""
node_list = []
container_node = rt.getNodeByName(container)
target_node = container_node.Children
rt.select(target_node)
for sel in rt.selection:
node_list.append(sel)
return node_list
def get_export_options(self, log_path):
@staticmethod
def get_export_options(log_path):
"""Set Export Options for USD Exporter"""
export_options = rt.USDExporter.createOptions()
@ -101,13 +79,13 @@ class ExtractModelUSD(publish.Extractor,
export_options.Lights = False
export_options.Cameras = False
export_options.Materials = False
export_options.MeshFormat = rt.name('fromScene')
export_options.FileFormat = rt.name('ascii')
export_options.UpAxis = rt.name('y')
export_options.LogLevel = rt.name('info')
export_options.MeshFormat = rt.Name('fromScene')
export_options.FileFormat = rt.Name('ascii')
export_options.UpAxis = rt.Name('y')
export_options.LogLevel = rt.Name('info')
export_options.LogPath = log_path
export_options.PreserveEdgeOrientation = True
export_options.TimeMode = rt.name('current')
export_options.TimeMode = rt.Name('current')
rt.USDexporter.UIOptions = export_options

View file

@ -41,7 +41,7 @@ import os
import pyblish.api
from openpype.pipeline import publish
from pymxs import runtime as rt
from openpype.hosts.max.api import maintained_selection, get_all_children
from openpype.hosts.max.api import maintained_selection
class ExtractAlembic(publish.Extractor):
@ -72,7 +72,8 @@ class ExtractAlembic(publish.Extractor):
with maintained_selection():
# select and export
rt.select(get_all_children(rt.getNodeByName(container)))
node_list = instance.data["members"]
rt.Select(node_list)
rt.exportFile(
path,
rt.name("noPrompt"),

View file

@ -1,42 +1,34 @@
import os
import pyblish.api
from openpype.pipeline import publish
from pymxs import runtime as rt
from openpype.hosts.max.api import (
maintained_selection
)
from openpype.settings import get_project_settings
from openpype.pipeline import legacy_io
def get_setting(project_setting=None):
project_setting = get_project_settings(
legacy_io.Session["AVALON_PROJECT"]
)
return (project_setting["max"]["PointCloud"])
from openpype.hosts.max.api import maintained_selection
from openpype.pipeline import publish
class ExtractPointCloud(publish.Extractor):
"""
Extract PRT format with tyFlow operators
Extract PRT format with tyFlow operators.
Notes:
Currently only works for the default partition setting
Args:
export_particle(): sets up all job arguments for attributes
to be exported in MAXscript
self.export_particle(): sets up all job arguments for attributes
to be exported in MAXscript
get_operators(): get the export_particle operator
self.get_operators(): get the export_particle operator
get_custom_attr(): get all custom channel attributes from Openpype
setting and sets it as job arguments before exporting
self.get_custom_attr(): get all custom channel attributes from Openpype
setting and sets it as job arguments before exporting
get_files(): get the files with tyFlow naming convention
before publishing
self.get_files(): get the files with tyFlow naming convention
before publishing
partition_output_name(): get the naming with partition settings.
get_partition(): get partition value
self.partition_output_name(): get the naming with partition settings.
self.get_partition(): get partition value
"""
@ -46,9 +38,9 @@ class ExtractPointCloud(publish.Extractor):
families = ["pointcloud"]
def process(self, instance):
self.settings = self.get_setting(instance)
start = int(instance.context.data.get("frameStart"))
end = int(instance.context.data.get("frameEnd"))
container = instance.data["instance_node"]
self.log.info("Extracting PRT...")
stagingdir = self.staging_dir(instance)
@ -56,22 +48,25 @@ class ExtractPointCloud(publish.Extractor):
path = os.path.join(stagingdir, filename)
with maintained_selection():
job_args = self.export_particle(container,
job_args = self.export_particle(instance.data["members"],
start,
end,
path)
for job in job_args:
rt.execute(job)
rt.Execute(job)
self.log.info("Performing Extraction ...")
if "representations" not in instance.data:
instance.data["representations"] = []
self.log.info("Writing PRT with TyFlow Plugin...")
filenames = self.get_files(container, path, start, end)
self.log.debug("filenames: {0}".format(filenames))
filenames = self.get_files(
instance.data["members"], path, start, end)
self.log.debug(f"filenames: {filenames}")
partition = self.partition_output_name(container)
partition = self.partition_output_name(
instance.data["members"])
representation = {
'name': 'prt',
@ -81,67 +76,84 @@ class ExtractPointCloud(publish.Extractor):
"outputName": partition # partition value
}
instance.data["representations"].append(representation)
self.log.info("Extracted instance '%s' to: %s" % (instance.name,
path))
self.log.info(f"Extracted instance '{instance.name}' to: {path}")
def export_particle(self,
container,
members,
start,
end,
filepath):
"""Sets up all job arguments for attributes.
Those attributes are to be exported in MAX Script.
Args:
members (list): Member nodes of the instance.
start (int): Start frame.
end (int): End frame.
filepath (str): Path to PRT file.
Returns:
list of arguments for MAX Script.
"""
job_args = []
opt_list = self.get_operators(container)
opt_list = self.get_operators(members)
for operator in opt_list:
start_frame = "{0}.frameStart={1}".format(operator,
start)
start_frame = f"{operator}.frameStart={start}"
job_args.append(start_frame)
end_frame = "{0}.frameEnd={1}".format(operator,
end)
end_frame = f"{operator}.frameEnd={end}"
job_args.append(end_frame)
filepath = filepath.replace("\\", "/")
prt_filename = '{0}.PRTFilename="{1}"'.format(operator,
filepath)
prt_filename = f'{operator}.PRTFilename="{filepath}"'
job_args.append(prt_filename)
# Partition
mode = "{0}.PRTPartitionsMode=2".format(operator)
mode = f"{operator}.PRTPartitionsMode=2"
job_args.append(mode)
additional_args = self.get_custom_attr(operator)
for args in additional_args:
job_args.append(args)
prt_export = "{0}.exportPRT()".format(operator)
job_args.extend(iter(additional_args))
prt_export = f"{operator}.exportPRT()"
job_args.append(prt_export)
return job_args
def get_operators(self, container):
"""Get Export Particles Operator"""
@staticmethod
def get_operators(members):
"""Get Export Particles Operator.
Args:
members (list): Instance members.
Returns:
list of particle operators
"""
opt_list = []
node = rt.getNodebyName(container)
selection_list = list(node.Children)
for sel in selection_list:
obj = sel.baseobject
# TODO: to see if it can be used maxscript instead
anim_names = rt.getsubanimnames(obj)
for member in members:
obj = member.baseobject
# TODO: to see if it can be used maxscript instead
anim_names = rt.GetSubAnimNames(obj)
for anim_name in anim_names:
sub_anim = rt.getsubanim(obj, anim_name)
boolean = rt.isProperty(sub_anim, "Export_Particles")
event_name = sub_anim.name
sub_anim = rt.GetSubAnim(obj, anim_name)
boolean = rt.IsProperty(sub_anim, "Export_Particles")
if boolean:
opt = "${0}.{1}.export_particles".format(sel.name,
event_name)
opt_list.append(opt)
event_name = sub_anim.Name
opt = f"${member.Name}.{event_name}.export_particles"
opt_list.append(opt)
return opt_list
@staticmethod
def get_setting(instance):
project_setting = instance.context.data["project_settings"]
return project_setting["max"]["PointCloud"]
def get_custom_attr(self, operator):
"""Get Custom Attributes"""
custom_attr_list = []
attr_settings = get_setting()["attribute"]
attr_settings = self.settings["attribute"]
for key, value in attr_settings.items():
custom_attr = "{0}.PRTChannels_{1}=True".format(operator,
value)
@ -157,14 +169,25 @@ class ExtractPointCloud(publish.Extractor):
path,
start_frame,
end_frame):
"""
Note:
Set the filenames accordingly to the tyFlow file
naming extension for the publishing purpose
"""Get file names for tyFlow.
Actual File Output from tyFlow:
Set the filenames accordingly to the tyFlow file
naming extension for the publishing purpose
Actual File Output from tyFlow::
<SceneFile>__part<PartitionStart>of<PartitionCount>.<frame>.prt
e.g. tyFlow_cloth_CCCS_blobbyFill_001__part1of1_00004.prt
Args:
container: Instance node.
path (str): Output directory.
start_frame (int): Start frame.
end_frame (int): End frame.
Returns:
list of filenames
"""
filenames = []
filename = os.path.basename(path)
@ -181,27 +204,36 @@ class ExtractPointCloud(publish.Extractor):
return filenames
def partition_output_name(self, container):
"""
Notes:
Partition output name set for mapping
the published file output
"""Get partition output name.
Partition output name set for mapping
the published file output.
Todo:
Customizes the setting for the output.
Args:
container: Instance node.
Returns:
str: Partition name.
todo:
Customizes the setting for the output
"""
partition_count, partition_start = self.get_partition(container)
partition = "_part{:03}of{}".format(partition_start,
partition_count)
return partition
return f"_part{partition_start:03}of{partition_count}"
def get_partition(self, container):
"""
Get Partition Value
"""Get Partition value.
Args:
container: Instance node.
"""
opt_list = self.get_operators(container)
# TODO: This looks strange? Iterating over
# the opt_list but returning from inside?
for operator in opt_list:
count = rt.execute(f'{operator}.PRTPartitionsCount')
start = rt.execute(f'{operator}.PRTPartitionsFrom')
count = rt.Execute(f'{operator}.PRTPartitionsCount')
start = rt.Execute(f'{operator}.PRTPartitionsFrom')
return count, start

View file

@ -0,0 +1,62 @@
import os
import pyblish.api
from openpype.pipeline import publish
from pymxs import runtime as rt
from openpype.hosts.max.api import maintained_selection
class ExtractRedshiftProxy(publish.Extractor):
"""
Extract Redshift Proxy with rsProxy
"""
order = pyblish.api.ExtractorOrder - 0.1
label = "Extract RedShift Proxy"
hosts = ["max"]
families = ["redshiftproxy"]
def process(self, instance):
container = instance.data["instance_node"]
start = int(instance.context.data.get("frameStart"))
end = int(instance.context.data.get("frameEnd"))
self.log.info("Extracting Redshift Proxy...")
stagingdir = self.staging_dir(instance)
rs_filename = "{name}.rs".format(**instance.data)
rs_filepath = os.path.join(stagingdir, rs_filename)
rs_filepath = rs_filepath.replace("\\", "/")
rs_filenames = self.get_rsfiles(instance, start, end)
with maintained_selection():
# select and export
node_list = instance.data["members"]
rt.Select(node_list)
# Redshift rsProxy command
# rsProxy fp selected compress connectivity startFrame endFrame
# camera warnExisting transformPivotToOrigin
rt.rsProxy(rs_filepath, 1, 0, 0, start, end, 0, 1, 1)
self.log.info("Performing Extraction ...")
if "representations" not in instance.data:
instance.data["representations"] = []
representation = {
'name': 'rs',
'ext': 'rs',
'files': rs_filenames if len(rs_filenames) > 1 else rs_filenames[0], # noqa
"stagingDir": stagingdir,
}
instance.data["representations"].append(representation)
self.log.info("Extracted instance '%s' to: %s" % (instance.name,
stagingdir))
def get_rsfiles(self, instance, startFrame, endFrame):
rs_filenames = []
rs_name = instance.data["name"]
for frame in range(startFrame, endFrame + 1):
rs_filename = "%s.%04d.rs" % (rs_name, frame)
rs_filenames.append(rs_filename)
return rs_filenames

View file

@ -0,0 +1,21 @@
import pyblish.api
import os
class SaveCurrentScene(pyblish.api.ContextPlugin):
"""Save current scene
"""
label = "Save current file"
order = pyblish.api.ExtractorOrder - 0.49
hosts = ["max"]
families = ["maxrender", "workfile"]
def process(self, context):
from pymxs import runtime as rt
folder = rt.maxFilePath
file = rt.maxFileName
current = os.path.join(folder, file)
assert context.data["currentFile"] == current
rt.saveMaxFile(current)

View file

@ -20,28 +20,23 @@ class ValidateCameraContent(pyblish.api.InstancePlugin):
def process(self, instance):
invalid = self.get_invalid(instance)
if invalid:
raise PublishValidationError("Camera instance must only include"
"camera (and camera target)")
raise PublishValidationError(("Camera instance must only include"
"camera (and camera target). "
f"Invalid content {invalid}"))
def get_invalid(self, instance):
"""
Get invalid nodes if the instance is not camera
"""
invalid = list()
invalid = []
container = instance.data["instance_node"]
self.log.info("Validating look content for "
"{}".format(container))
self.log.info(f"Validating camera content for {container}")
con = rt.getNodeByName(container)
selection_list = list(con.Children)
selection_list = instance.data["members"]
for sel in selection_list:
# to avoid Attribute Error from pymxs wrapper
sel_tmp = str(sel)
found = False
for cam in self.camera_type:
if sel_tmp.startswith(cam):
found = True
break
found = any(sel_tmp.startswith(cam) for cam in self.camera_type)
if not found:
self.log.error("Camera not found")
invalid.append(sel)

View file

@ -0,0 +1,43 @@
import os
import pyblish.api
from pymxs import runtime as rt
from openpype.pipeline.publish import (
RepairAction,
ValidateContentsOrder,
PublishValidationError,
OptionalPyblishPluginMixin
)
from openpype.hosts.max.api.lib_rendersettings import RenderSettings
class ValidateDeadlinePublish(pyblish.api.InstancePlugin,
OptionalPyblishPluginMixin):
"""Validates Render File Directory is
not the same in every submission
"""
order = ValidateContentsOrder
families = ["maxrender"]
hosts = ["max"]
label = "Render Output for Deadline"
optional = True
actions = [RepairAction]
def process(self, instance):
if not self.is_active(instance.data):
return
file = rt.maxFileName
filename, ext = os.path.splitext(file)
if filename not in rt.rendOutputFilename:
raise PublishValidationError(
"Render output folder "
"doesn't match the max scene name! "
"Use Repair action to "
"fix the folder file path.."
)
@classmethod
def repair(cls, instance):
container = instance.data.get("instance_node")
RenderSettings().render_output(container)
cls.log.debug("Reset the render output folder...")

View file

@ -1,8 +1,9 @@
# -*- coding: utf-8 -*-
import pyblish.api
from openpype.pipeline import PublishValidationError
from pymxs import runtime as rt
from openpype.pipeline import PublishValidationError
class ValidateModelContent(pyblish.api.InstancePlugin):
"""Validates Model instance contents.
@ -19,26 +20,25 @@ class ValidateModelContent(pyblish.api.InstancePlugin):
def process(self, instance):
invalid = self.get_invalid(instance)
if invalid:
raise PublishValidationError("Model instance must only include"
"Geometry and Editable Mesh")
raise PublishValidationError(("Model instance must only include"
"Geometry and Editable Mesh. "
f"Invalid types on: {invalid}"))
def get_invalid(self, instance):
"""
Get invalid nodes if the instance is not camera
"""
invalid = list()
invalid = []
container = instance.data["instance_node"]
self.log.info("Validating look content for "
"{}".format(container))
self.log.info(f"Validating model content for {container}")
con = rt.getNodeByName(container)
selection_list = list(con.Children) or rt.getCurrentSelection()
selection_list = instance.data["members"]
for sel in selection_list:
if rt.classOf(sel) in rt.Camera.classes:
if rt.ClassOf(sel) in rt.Camera.classes:
invalid.append(sel)
if rt.classOf(sel) in rt.Light.classes:
if rt.ClassOf(sel) in rt.Light.classes:
invalid.append(sel)
if rt.classOf(sel) in rt.Shape.classes:
if rt.ClassOf(sel) in rt.Shape.classes:
invalid.append(sel)
return invalid

View file

@ -18,6 +18,5 @@ class ValidateMaxContents(pyblish.api.InstancePlugin):
label = "Max Scene Contents"
def process(self, instance):
container = rt.getNodeByName(instance.data["instance_node"])
if not list(container.Children):
if not instance.data["members"]:
raise PublishValidationError("No content found in the container")

View file

@ -9,11 +9,11 @@ def get_setting(project_setting=None):
project_setting = get_project_settings(
legacy_io.Session["AVALON_PROJECT"]
)
return (project_setting["max"]["PointCloud"])
return project_setting["max"]["PointCloud"]
class ValidatePointCloud(pyblish.api.InstancePlugin):
"""Validate that workfile was saved."""
"""Validate that work file was saved."""
order = pyblish.api.ValidatorOrder
families = ["pointcloud"]
@ -34,39 +34,42 @@ class ValidatePointCloud(pyblish.api.InstancePlugin):
of export_particle operator
"""
invalid = self.get_tyFlow_object(instance)
if invalid:
raise PublishValidationError("Non tyFlow object "
"found: {}".format(invalid))
invalid = self.get_tyFlow_operator(instance)
if invalid:
raise PublishValidationError("tyFlow ExportParticle operator "
"not found: {}".format(invalid))
report = []
invalid = self.validate_export_mode(instance)
if invalid:
raise PublishValidationError("The export mode is not at PRT")
invalid_object = self.get_tyflow_object(instance)
if invalid_object:
report.append(f"Non tyFlow object found: {invalid_object}")
invalid = self.validate_partition_value(instance)
if invalid:
raise PublishValidationError("tyFlow Partition setting is "
"not at the default value")
invalid = self.validate_custom_attribute(instance)
if invalid:
raise PublishValidationError("Custom Attribute not found "
":{}".format(invalid))
invalid_operator = self.get_tyflow_operator(instance)
if invalid_operator:
report.append((
"tyFlow ExportParticle operator not "
f"found: {invalid_operator}"))
def get_tyFlow_object(self, instance):
if self.validate_export_mode(instance):
report.append("The export mode is not at PRT")
if self.validate_partition_value(instance):
report.append(("tyFlow Partition setting is "
"not at the default value"))
invalid_attribute = self.validate_custom_attribute(instance)
if invalid_attribute:
report.append(("Custom Attribute not found "
f":{invalid_attribute}"))
if report:
raise PublishValidationError(f"{report}")
def get_tyflow_object(self, instance):
invalid = []
container = instance.data["instance_node"]
self.log.info("Validating tyFlow container "
"for {}".format(container))
self.log.info(f"Validating tyFlow container for {container}")
con = rt.getNodeByName(container)
selection_list = list(con.Children)
selection_list = instance.data["members"]
for sel in selection_list:
sel_tmp = str(sel)
if rt.classOf(sel) in [rt.tyFlow,
if rt.ClassOf(sel) in [rt.tyFlow,
rt.Editable_Mesh]:
if "tyFlow" not in sel_tmp:
invalid.append(sel)
@ -75,23 +78,20 @@ class ValidatePointCloud(pyblish.api.InstancePlugin):
return invalid
def get_tyFlow_operator(self, instance):
def get_tyflow_operator(self, instance):
invalid = []
container = instance.data["instance_node"]
self.log.info("Validating tyFlow object "
"for {}".format(container))
con = rt.getNodeByName(container)
selection_list = list(con.Children)
self.log.info(f"Validating tyFlow object for {container}")
selection_list = instance.data["members"]
bool_list = []
for sel in selection_list:
obj = sel.baseobject
anim_names = rt.getsubanimnames(obj)
anim_names = rt.GetSubAnimNames(obj)
for anim_name in anim_names:
# get all the names of the related tyFlow nodes
sub_anim = rt.getsubanim(obj, anim_name)
sub_anim = rt.GetSubAnim(obj, anim_name)
# check if there is export particle operator
boolean = rt.isProperty(sub_anim, "Export_Particles")
boolean = rt.IsProperty(sub_anim, "Export_Particles")
bool_list.append(str(boolean))
# if the export_particles property is not there
# it means there is not a "Export Particle" operator
@ -104,21 +104,18 @@ class ValidatePointCloud(pyblish.api.InstancePlugin):
def validate_custom_attribute(self, instance):
invalid = []
container = instance.data["instance_node"]
self.log.info("Validating tyFlow custom "
"attributes for {}".format(container))
self.log.info(
f"Validating tyFlow custom attributes for {container}")
con = rt.getNodeByName(container)
selection_list = list(con.Children)
selection_list = instance.data["members"]
for sel in selection_list:
obj = sel.baseobject
anim_names = rt.getsubanimnames(obj)
anim_names = rt.GetSubAnimNames(obj)
for anim_name in anim_names:
# get all the names of the related tyFlow nodes
sub_anim = rt.getsubanim(obj, anim_name)
# check if there is export particle operator
boolean = rt.isProperty(sub_anim, "Export_Particles")
event_name = sub_anim.name
if boolean:
sub_anim = rt.GetSubAnim(obj, anim_name)
if rt.IsProperty(sub_anim, "Export_Particles"):
event_name = sub_anim.name
opt = "${0}.{1}.export_particles".format(sel.name,
event_name)
attributes = get_setting()["attribute"]
@ -126,39 +123,36 @@ class ValidatePointCloud(pyblish.api.InstancePlugin):
custom_attr = "{0}.PRTChannels_{1}".format(opt,
value)
try:
rt.execute(custom_attr)
rt.Execute(custom_attr)
except RuntimeError:
invalid.add(key)
invalid.append(key)
return invalid
def validate_partition_value(self, instance):
invalid = []
container = instance.data["instance_node"]
self.log.info("Validating tyFlow partition "
"value for {}".format(container))
self.log.info(
f"Validating tyFlow partition value for {container}")
con = rt.getNodeByName(container)
selection_list = list(con.Children)
selection_list = instance.data["members"]
for sel in selection_list:
obj = sel.baseobject
anim_names = rt.getsubanimnames(obj)
anim_names = rt.GetSubAnimNames(obj)
for anim_name in anim_names:
# get all the names of the related tyFlow nodes
sub_anim = rt.getsubanim(obj, anim_name)
# check if there is export particle operator
boolean = rt.isProperty(sub_anim, "Export_Particles")
event_name = sub_anim.name
if boolean:
sub_anim = rt.GetSubAnim(obj, anim_name)
if rt.IsProperty(sub_anim, "Export_Particles"):
event_name = sub_anim.name
opt = "${0}.{1}.export_particles".format(sel.name,
event_name)
count = rt.execute(f'{opt}.PRTPartitionsCount')
count = rt.Execute(f'{opt}.PRTPartitionsCount')
if count != 100:
invalid.append(count)
start = rt.execute(f'{opt}.PRTPartitionsFrom')
start = rt.Execute(f'{opt}.PRTPartitionsFrom')
if start != 1:
invalid.append(start)
end = rt.execute(f'{opt}.PRTPartitionsTo')
end = rt.Execute(f'{opt}.PRTPartitionsTo')
if end != 1:
invalid.append(end)
@ -167,24 +161,23 @@ class ValidatePointCloud(pyblish.api.InstancePlugin):
def validate_export_mode(self, instance):
invalid = []
container = instance.data["instance_node"]
self.log.info("Validating tyFlow export "
"mode for {}".format(container))
self.log.info(
f"Validating tyFlow export mode for {container}")
con = rt.getNodeByName(container)
con = rt.GetNodeByName(container)
selection_list = list(con.Children)
for sel in selection_list:
obj = sel.baseobject
anim_names = rt.getsubanimnames(obj)
anim_names = rt.GetSubAnimNames(obj)
for anim_name in anim_names:
# get all the names of the related tyFlow nodes
sub_anim = rt.getsubanim(obj, anim_name)
sub_anim = rt.GetSubAnim(obj, anim_name)
# check if there is export particle operator
boolean = rt.isProperty(sub_anim, "Export_Particles")
boolean = rt.IsProperty(sub_anim, "Export_Particles")
event_name = sub_anim.name
if boolean:
opt = "${0}.{1}.export_particles".format(sel.name,
event_name)
export_mode = rt.execute(f'{opt}.exportMode')
opt = f"${sel.name}.{event_name}.export_particles"
export_mode = rt.Execute(f'{opt}.exportMode')
if export_mode != 1:
invalid.append(export_mode)

View file

@ -0,0 +1,54 @@
# -*- coding: utf-8 -*-
import pyblish.api
from openpype.pipeline import PublishValidationError
from pymxs import runtime as rt
from openpype.pipeline.publish import RepairAction
from openpype.hosts.max.api.lib import get_current_renderer
class ValidateRendererRedshiftProxy(pyblish.api.InstancePlugin):
"""
Validates Redshift as the current renderer for creating
Redshift Proxy
"""
order = pyblish.api.ValidatorOrder
families = ["redshiftproxy"]
hosts = ["max"]
label = "Redshift Renderer"
actions = [RepairAction]
def process(self, instance):
invalid = self.get_redshift_renderer(instance)
if invalid:
raise PublishValidationError("Please install Redshift for 3dsMax"
" before using the Redshift proxy instance") # noqa
invalid = self.get_current_renderer(instance)
if invalid:
raise PublishValidationError("The Redshift proxy extraction"
"discontinued since the current renderer is not Redshift") # noqa
def get_redshift_renderer(self, instance):
invalid = list()
max_renderers_list = str(rt.RendererClass.classes)
if "Redshift_Renderer" not in max_renderers_list:
invalid.append(max_renderers_list)
return invalid
def get_current_renderer(self, instance):
invalid = list()
renderer_class = get_current_renderer()
current_renderer = str(renderer_class).split(":")[0]
if current_renderer != "Redshift_Renderer":
invalid.append(current_renderer)
return invalid
@classmethod
def repair(cls, instance):
for Renderer in rt.RendererClass.classes:
renderer = Renderer()
if "Redshift_Renderer" in str(renderer):
rt.renderers.production = renderer
break

View file

@ -1,36 +1,37 @@
# -*- coding: utf-8 -*-
import pyblish.api
"""Validator for USD plugin."""
from openpype.pipeline import PublishValidationError
from pyblish.api import InstancePlugin, ValidatorOrder
from pymxs import runtime as rt
class ValidateUSDPlugin(pyblish.api.InstancePlugin):
"""Validates if USD plugin is installed or loaded in Max
"""
def get_plugins() -> list:
"""Get plugin list from 3ds max."""
manager = rt.PluginManager
count = manager.pluginDllCount
plugin_info_list = []
for p in range(1, count + 1):
plugin_info = manager.pluginDllName(p)
plugin_info_list.append(plugin_info)
order = pyblish.api.ValidatorOrder - 0.01
return plugin_info_list
class ValidateUSDPlugin(InstancePlugin):
"""Validates if USD plugin is installed or loaded in 3ds max."""
order = ValidatorOrder - 0.01
families = ["model"]
hosts = ["max"]
label = "USD Plugin"
def process(self, instance):
plugin_mgr = rt.pluginManager
plugin_count = plugin_mgr.pluginDllCount
plugin_info = self.get_plugins(plugin_mgr,
plugin_count)
"""Plugin entry point."""
plugin_info = get_plugins()
usd_import = "usdimport.dli"
if usd_import not in plugin_info:
raise PublishValidationError("USD Plugin {}"
" not found".format(usd_import))
raise PublishValidationError(f"USD Plugin {usd_import} not found")
usd_export = "usdexport.dle"
if usd_export not in plugin_info:
raise PublishValidationError("USD Plugin {}"
" not found".format(usd_export))
def get_plugins(self, manager, count):
plugin_info_list = list()
for p in range(1, count + 1):
plugin_info = manager.pluginDllName(p)
plugin_info_list.append(plugin_info)
return plugin_info_list
raise PublishValidationError(f"USD Plugin {usd_export} not found")

View file

@ -1,6 +1,7 @@
"""Standalone helper functions"""
import os
from pprint import pformat
import sys
import platform
import uuid
@ -2296,8 +2297,8 @@ def reset_frame_range(playback=True, render=True, fps=True):
cmds.currentTime(frame_start)
if render:
cmds.setAttr("defaultRenderGlobals.startFrame", frame_start)
cmds.setAttr("defaultRenderGlobals.endFrame", frame_end)
cmds.setAttr("defaultRenderGlobals.startFrame", animation_start)
cmds.setAttr("defaultRenderGlobals.endFrame", animation_end)
def reset_scene_resolution():
@ -3239,75 +3240,6 @@ def iter_shader_edits(relationships, shader_nodes, nodes_by_id, label=None):
def set_colorspace():
"""Set Colorspace from project configuration
"""
project_name = os.getenv("AVALON_PROJECT")
imageio = get_project_settings(project_name)["maya"]["imageio"]
# Maya 2022+ introduces new OCIO v2 color management settings that
# can override the old color managenement preferences. OpenPype has
# separate settings for both so we fall back when necessary.
use_ocio_v2 = imageio["colorManagementPreference_v2"]["enabled"]
required_maya_version = 2022
maya_version = int(cmds.about(version=True))
maya_supports_ocio_v2 = maya_version >= required_maya_version
if use_ocio_v2 and not maya_supports_ocio_v2:
# Fallback to legacy behavior with a warning
log.warning("Color Management Preference v2 is enabled but not "
"supported by current Maya version: {} (< {}). Falling "
"back to legacy settings.".format(
maya_version, required_maya_version)
)
use_ocio_v2 = False
if use_ocio_v2:
root_dict = imageio["colorManagementPreference_v2"]
else:
root_dict = imageio["colorManagementPreference"]
if not isinstance(root_dict, dict):
msg = "set_colorspace(): argument should be dictionary"
log.error(msg)
log.debug(">> root_dict: {}".format(root_dict))
# enable color management
cmds.colorManagementPrefs(e=True, cmEnabled=True)
cmds.colorManagementPrefs(e=True, ocioRulesEnabled=True)
# set config path
custom_ocio_config = False
if root_dict.get("configFilePath"):
unresolved_path = root_dict["configFilePath"]
ocio_paths = unresolved_path[platform.system().lower()]
resolved_path = None
for ocio_p in ocio_paths:
resolved_path = str(ocio_p).format(**os.environ)
if not os.path.exists(resolved_path):
continue
if resolved_path:
filepath = str(resolved_path).replace("\\", "/")
cmds.colorManagementPrefs(e=True, configFilePath=filepath)
cmds.colorManagementPrefs(e=True, cmConfigFileEnabled=True)
log.debug("maya '{}' changed to: {}".format(
"configFilePath", resolved_path))
custom_ocio_config = True
else:
cmds.colorManagementPrefs(e=True, cmConfigFileEnabled=False)
cmds.colorManagementPrefs(e=True, configFilePath="")
# If no custom OCIO config file was set we make sure that Maya 2022+
# either chooses between Maya's newer default v2 or legacy config based
# on OpenPype setting to use ocio v2 or not.
if maya_supports_ocio_v2 and not custom_ocio_config:
if use_ocio_v2:
# Use Maya 2022+ default OCIO v2 config
log.info("Setting default Maya OCIO v2 config")
cmds.colorManagementPrefs(edit=True, configFilePath="")
else:
# Set the Maya default config file path
log.info("Setting default Maya OCIO v1 legacy config")
cmds.colorManagementPrefs(edit=True, configFilePath="legacy")
# set color spaces for rendering space and view transforms
def _colormanage(**kwargs):
@ -3324,17 +3256,74 @@ def set_colorspace():
except RuntimeError as exc:
log.error(exc)
if use_ocio_v2:
_colormanage(renderingSpaceName=root_dict["renderSpace"])
_colormanage(displayName=root_dict["displayName"])
_colormanage(viewName=root_dict["viewName"])
else:
_colormanage(renderingSpaceName=root_dict["renderSpace"])
if maya_supports_ocio_v2:
_colormanage(viewName=root_dict["viewTransform"])
_colormanage(displayName="legacy")
project_name = os.getenv("AVALON_PROJECT")
imageio = get_project_settings(project_name)["maya"]["imageio"]
# ocio compatibility variables
ocio_v2_maya_version = 2022
maya_version = int(cmds.about(version=True))
ocio_v2_support = use_ocio_v2 = maya_version >= ocio_v2_maya_version
root_dict = {}
use_workfile_settings = imageio.get("workfile", {}).get("enabled")
if use_workfile_settings:
# TODO: deprecated code from 3.15.5 - remove
# Maya 2022+ introduces new OCIO v2 color management settings that
# can override the old color management preferences. OpenPype has
# separate settings for both so we fall back when necessary.
use_ocio_v2 = imageio["colorManagementPreference_v2"]["enabled"]
if use_ocio_v2 and not ocio_v2_support:
# Fallback to legacy behavior with a warning
log.warning(
"Color Management Preference v2 is enabled but not "
"supported by current Maya version: {} (< {}). Falling "
"back to legacy settings.".format(
maya_version, ocio_v2_maya_version)
)
if use_ocio_v2:
root_dict = imageio["colorManagementPreference_v2"]
else:
_colormanage(viewTransformName=root_dict["viewTransform"])
root_dict = imageio["colorManagementPreference"]
if not isinstance(root_dict, dict):
msg = "set_colorspace(): argument should be dictionary"
log.error(msg)
else:
root_dict = imageio["workfile"]
log.debug(">> root_dict: {}".format(pformat(root_dict)))
if root_dict:
# enable color management
cmds.colorManagementPrefs(e=True, cmEnabled=True)
cmds.colorManagementPrefs(e=True, ocioRulesEnabled=True)
# backward compatibility
# TODO: deprecated code from 3.15.5 - refactor to use new settings
view_name = root_dict.get("viewTransform")
if view_name is None:
view_name = root_dict.get("viewName")
if use_ocio_v2:
# Use Maya 2022+ default OCIO v2 config
log.info("Setting default Maya OCIO v2 config")
cmds.colorManagementPrefs(edit=True, configFilePath="")
# set rendering space and view transform
_colormanage(renderingSpaceName=root_dict["renderSpace"])
_colormanage(viewName=view_name)
_colormanage(displayName=root_dict["displayName"])
else:
# Set the Maya default config file path
log.info("Setting default Maya OCIO v1 legacy config")
cmds.colorManagementPrefs(edit=True, configFilePath="legacy")
# set rendering space and view transform
_colormanage(renderingSpaceName=root_dict["renderSpace"])
_colormanage(viewTransformName=view_name)
@contextlib.contextmanager
@ -3977,6 +3966,71 @@ def get_capture_preset(task_name, task_type, subset, project_settings, log):
return capture_preset or {}
def get_reference_node(members, log=None):
"""Get the reference node from the container members
Args:
members: list of node names
Returns:
str: Reference node name.
"""
# Collect the references without .placeHolderList[] attributes as
# unique entries (objects only) and skipping the sharedReferenceNode.
references = set()
for ref in cmds.ls(members, exactType="reference", objectsOnly=True):
# Ignore any `:sharedReferenceNode`
if ref.rsplit(":", 1)[-1].startswith("sharedReferenceNode"):
continue
# Ignore _UNKNOWN_REF_NODE_ (PLN-160)
if ref.rsplit(":", 1)[-1].startswith("_UNKNOWN_REF_NODE_"):
continue
references.add(ref)
assert references, "No reference node found in container"
# Get highest reference node (least parents)
highest = min(references,
key=lambda x: len(get_reference_node_parents(x)))
# Warn the user when we're taking the highest reference node
if len(references) > 1:
if not log:
log = logging.getLogger(__name__)
log.warning("More than one reference node found in "
"container, using highest reference node: "
"%s (in: %s)", highest, list(references))
return highest
def get_reference_node_parents(ref):
"""Return all parent reference nodes of reference node
Args:
ref (str): reference node.
Returns:
list: The upstream parent reference nodes.
"""
parent = cmds.referenceQuery(ref,
referenceNode=True,
parent=True)
parents = []
while parent:
parents.append(parent)
parent = cmds.referenceQuery(parent,
referenceNode=True,
parent=True)
return parents
def create_rig_animation_instance(
nodes, context, namespace, options=None, log=None
):

View file

@ -113,7 +113,7 @@ class MayaHost(HostBase, IWorkfileHost, ILoadHost):
register_event_callback("taskChanged", on_task_changed)
register_event_callback("workfile.open.before", before_workfile_open)
register_event_callback("workfile.save.before", before_workfile_save)
register_event_callback("workfile.save.before", after_workfile_save)
register_event_callback("workfile.save.after", after_workfile_save)
def open_workfile(self, filepath):
return open_file(filepath)

View file

@ -19,69 +19,30 @@ from .pipeline import containerise
from . import lib
def get_reference_node(members, log=None):
"""Get the reference node from the container members
Args:
members: list of node names
log = Logger.get_logger()
Returns:
str: Reference node name.
# Backwards compatibility: these functions has been moved to lib.
def get_reference_node(*args, **kwargs):
"""
# Collect the references without .placeHolderList[] attributes as
# unique entries (objects only) and skipping the sharedReferenceNode.
references = set()
for ref in cmds.ls(members, exactType="reference", objectsOnly=True):
# Ignore any `:sharedReferenceNode`
if ref.rsplit(":", 1)[-1].startswith("sharedReferenceNode"):
continue
# Ignore _UNKNOWN_REF_NODE_ (PLN-160)
if ref.rsplit(":", 1)[-1].startswith("_UNKNOWN_REF_NODE_"):
continue
references.add(ref)
assert references, "No reference node found in container"
# Get highest reference node (least parents)
highest = min(references,
key=lambda x: len(get_reference_node_parents(x)))
# Warn the user when we're taking the highest reference node
if len(references) > 1:
if not log:
log = Logger.get_logger(__name__)
log.warning("More than one reference node found in "
"container, using highest reference node: "
"%s (in: %s)", highest, list(references))
return highest
def get_reference_node_parents(ref):
"""Return all parent reference nodes of reference node
Args:
ref (str): reference node.
Returns:
list: The upstream parent reference nodes.
Deprecated:
This function was moved and will be removed in 3.16.x.
"""
parent = cmds.referenceQuery(ref,
referenceNode=True,
parent=True)
parents = []
while parent:
parents.append(parent)
parent = cmds.referenceQuery(parent,
referenceNode=True,
parent=True)
return parents
msg = "Function 'get_reference_node' has been moved."
log.warning(msg)
cmds.warning(msg)
return lib.get_reference_node(*args, **kwargs)
def get_reference_node_parents(*args, **kwargs):
"""
Deprecated:
This function was moved and will be removed in 3.16.x.
"""
msg = "Function 'get_reference_node_parents' has been moved."
log.warning(msg)
cmds.warning(msg)
return lib.get_reference_node_parents(*args, **kwargs)
class Creator(LegacyCreator):
@ -205,7 +166,7 @@ class ReferenceLoader(Loader):
if not nodes:
return
ref_node = get_reference_node(nodes, self.log)
ref_node = lib.get_reference_node(nodes, self.log)
container = containerise(
name=name,
namespace=namespace,
@ -234,7 +195,7 @@ class ReferenceLoader(Loader):
# Get reference node from container members
members = get_container_members(node)
reference_node = get_reference_node(members, self.log)
reference_node = lib.get_reference_node(members, self.log)
namespace = cmds.referenceQuery(reference_node, namespace=True)
file_type = {
@ -382,7 +343,7 @@ class ReferenceLoader(Loader):
# Assume asset has been referenced
members = cmds.sets(node, query=True)
reference_node = get_reference_node(members, self.log)
reference_node = lib.get_reference_node(members, self.log)
assert reference_node, ("Imported container not supported; "
"container must be referenced.")

View file

@ -28,7 +28,9 @@ from openpype.pipeline import (
)
from openpype.hosts.maya.api.lib import (
matrix_equals,
unique_namespace
unique_namespace,
get_container_transforms,
DEFAULT_MATRIX
)
log = logging.getLogger("PackageLoader")
@ -183,8 +185,6 @@ def _add(instance, representation_id, loaders, namespace, root="|"):
"""
from openpype.hosts.maya.lib import get_container_transforms
# Process within the namespace
with namespaced(namespace, new=False) as namespace:
@ -379,8 +379,6 @@ def update_scene(set_container, containers, current_data, new_data, new_file):
"""
from openpype.hosts.maya.lib import DEFAULT_MATRIX, get_container_transforms
set_namespace = set_container['namespace']
project_name = legacy_io.active_project()

View file

@ -14,7 +14,7 @@ from openpype.tools.workfile_template_build import (
WorkfileBuildPlaceholderDialog,
)
from .lib import read, imprint
from .lib import read, imprint, get_reference_node, get_main_window
PLACEHOLDER_SET = "PLACEHOLDERS_SET"
@ -173,44 +173,37 @@ class MayaPlaceholderLoadPlugin(PlaceholderPlugin, PlaceholderLoadMixin):
def create_placeholder(self, placeholder_data):
selection = cmds.ls(selection=True)
if not selection:
raise ValueError("Nothing is selected")
if len(selection) > 1:
raise ValueError("More then one item are selected")
parent = selection[0] if selection else None
placeholder_data["plugin_identifier"] = self.identifier
placeholder_name = self._create_placeholder_name(placeholder_data)
placeholder = cmds.spaceLocator(name=placeholder_name)[0]
# TODO: this can crash if selection can't be used
cmds.parent(placeholder, selection[0])
if parent:
placeholder = cmds.parent(placeholder, selection[0])[0]
# get the long name of the placeholder (with the groups)
placeholder_full_name = (
cmds.ls(selection[0], long=True)[0]
+ "|"
+ placeholder.replace("|", "")
)
imprint(placeholder_full_name, placeholder_data)
imprint(placeholder, placeholder_data)
# Add helper attributes to keep placeholder info
cmds.addAttr(
placeholder_full_name,
placeholder,
longName="parent",
hidden=True,
dataType="string"
)
cmds.addAttr(
placeholder_full_name,
placeholder,
longName="index",
hidden=True,
attributeType="short",
defaultValue=-1
)
cmds.setAttr(placeholder_full_name + ".parent", "", type="string")
cmds.setAttr(placeholder + ".parent", "", type="string")
def update_placeholder(self, placeholder_item, placeholder_data):
node_name = placeholder_item.scene_identifier
@ -233,7 +226,7 @@ class MayaPlaceholderLoadPlugin(PlaceholderPlugin, PlaceholderLoadMixin):
if placeholder_data.get("plugin_identifier") != self.identifier:
continue
# TODO do data validations and maybe updgrades if are invalid
# TODO do data validations and maybe upgrades if they are invalid
output.append(
LoadPlaceholderItem(node_name, placeholder_data, self)
)
@ -250,15 +243,19 @@ class MayaPlaceholderLoadPlugin(PlaceholderPlugin, PlaceholderLoadMixin):
def get_placeholder_options(self, options=None):
return self.get_load_plugin_options(options)
def cleanup_placeholder(self, placeholder, failed):
def post_placeholder_process(self, placeholder, failed):
"""Hide placeholder, add them to placeholder set
"""
node = placeholder._scene_identifier
node = placeholder.scene_identifier
cmds.sets(node, addElement=PLACEHOLDER_SET)
cmds.hide(node)
cmds.setAttr(node + ".hiddenInOutliner", True)
def delete_placeholder(self, placeholder):
"""Remove placeholder if building was successful"""
cmds.delete(placeholder.scene_identifier)
def load_succeed(self, placeholder, container):
self._parent_in_hierarchy(placeholder, container)
@ -275,9 +272,19 @@ class MayaPlaceholderLoadPlugin(PlaceholderPlugin, PlaceholderLoadMixin):
return
roots = cmds.sets(container, q=True)
ref_node = get_reference_node(roots)
nodes_to_parent = []
for root in roots:
if ref_node:
ref_root = cmds.referenceQuery(root, nodes=True)[0]
ref_root = (
cmds.listRelatives(ref_root, parent=True, path=True) or
[ref_root]
)
nodes_to_parent.extend(ref_root)
continue
if root.endswith("_RN"):
# Backwards compatibility for hardcoded reference names.
refRoot = cmds.referenceQuery(root, n=True)[0]
refRoot = cmds.listRelatives(refRoot, parent=True) or [refRoot]
nodes_to_parent.extend(refRoot)
@ -294,10 +301,17 @@ class MayaPlaceholderLoadPlugin(PlaceholderPlugin, PlaceholderLoadMixin):
matrix=True,
worldSpace=True
)
scene_parent = cmds.listRelatives(
placeholder.scene_identifier, parent=True, fullPath=True
)
for node in set(nodes_to_parent):
cmds.reorder(node, front=True)
cmds.reorder(node, relative=placeholder.data["index"])
cmds.xform(node, matrix=placeholder_form, ws=True)
if scene_parent:
cmds.parent(node, scene_parent)
else:
cmds.parent(node, world=True)
holding_sets = cmds.listSets(object=placeholder.scene_identifier)
if not holding_sets:
@ -319,8 +333,9 @@ def update_workfile_template(*args):
def create_placeholder(*args):
host = registered_host()
builder = MayaTemplateBuilder(host)
window = WorkfileBuildPlaceholderDialog(host, builder)
window.exec_()
window = WorkfileBuildPlaceholderDialog(host, builder,
parent=get_main_window())
window.show()
def update_placeholder(*args):
@ -343,6 +358,7 @@ def update_placeholder(*args):
raise ValueError("Too many selected nodes")
placeholder_item = placeholder_items[0]
window = WorkfileBuildPlaceholderDialog(host, builder)
window = WorkfileBuildPlaceholderDialog(host, builder,
parent=get_main_window())
window.set_update_mode(placeholder_item)
window.exec_()

View file

@ -181,16 +181,34 @@ class CreateRender(plugin.Creator):
primary_pool = pool_setting["primary_pool"]
sorted_pools = self._set_default_pool(list(pools), primary_pool)
cmds.addAttr(self.instance, longName="primaryPool",
attributeType="enum",
enumName=":".join(sorted_pools))
cmds.addAttr(
self.instance,
longName="primaryPool",
attributeType="enum",
enumName=":".join(sorted_pools)
)
cmds.setAttr(
"{}.primaryPool".format(self.instance),
0,
keyable=False,
channelBox=True
)
pools = ["-"] + pools
secondary_pool = pool_setting["secondary_pool"]
sorted_pools = self._set_default_pool(list(pools), secondary_pool)
cmds.addAttr("{}.secondaryPool".format(self.instance),
attributeType="enum",
enumName=":".join(sorted_pools))
cmds.addAttr(
self.instance,
longName="secondaryPool",
attributeType="enum",
enumName=":".join(sorted_pools)
)
cmds.setAttr(
"{}.secondaryPool".format(self.instance),
0,
keyable=False,
channelBox=True
)
def _create_render_settings(self):
"""Create instance settings."""
@ -260,6 +278,12 @@ class CreateRender(plugin.Creator):
default_priority)
self.data["tile_priority"] = tile_priority
strict_error_checking = maya_submit_dl.get("strict_error_checking",
True)
self.data["strict_error_checking"] = strict_error_checking
# Pool attributes should be last since they will be recreated when
# the deadline server changes.
pool_setting = (self._project_settings["deadline"]
["publish"]
["CollectDeadlinePools"])
@ -272,9 +296,6 @@ class CreateRender(plugin.Creator):
secondary_pool = pool_setting["secondary_pool"]
self.data["secondaryPool"] = self._set_default_pool(pool_names,
secondary_pool)
strict_error_checking = maya_submit_dl.get("strict_error_checking",
True)
self.data["strict_error_checking"] = strict_error_checking
if muster_enabled:
self.log.info(">>> Loading Muster credentials ...")

View file

@ -105,7 +105,8 @@ class ImportMayaLoader(load.LoaderPlugin):
"camera",
"rig",
"camerarig",
"staticMesh"
"staticMesh",
"workfile"
]
label = "Import"

View file

@ -6,23 +6,29 @@ import maya.cmds as cmds
from openpype.settings import get_project_settings
from openpype.pipeline import (
load,
legacy_io,
get_representation_path
)
from openpype.hosts.maya.api.lib import (
unique_namespace, get_attribute_input, maintained_selection
unique_namespace,
get_attribute_input,
maintained_selection,
convert_to_maya_fps
)
from openpype.hosts.maya.api.pipeline import containerise
def is_sequence(files):
sequence = False
collections, remainder = clique.assemble(files)
collections, remainder = clique.assemble(files, minimum_items=1)
if collections:
sequence = True
return sequence
def get_current_session_fps():
session_fps = float(legacy_io.Session.get('AVALON_FPS', 25))
return convert_to_maya_fps(session_fps)
class ArnoldStandinLoader(load.LoaderPlugin):
"""Load as Arnold standin"""
@ -35,9 +41,15 @@ class ArnoldStandinLoader(load.LoaderPlugin):
color = "orange"
def load(self, context, name, namespace, options):
if not cmds.pluginInfo("mtoa", query=True, loaded=True):
cmds.loadPlugin("mtoa")
# Create defaultArnoldRenderOptions before creating aiStandin
# which tries to connect it. Since we load the plugin and directly
# create aiStandin without the defaultArnoldRenderOptions,
# we need to create the render options for aiStandin creation.
from mtoa.core import createOptions
createOptions()
# Make sure to load arnold before importing `mtoa.ui.arnoldmenu`
cmds.loadPlugin("mtoa", quiet=True)
import mtoa.ui.arnoldmenu
version = context['version']
@ -84,6 +96,9 @@ class ArnoldStandinLoader(load.LoaderPlugin):
sequence = is_sequence(os.listdir(os.path.dirname(self.fname)))
cmds.setAttr(standin_shape + ".useFrameExtension", sequence)
fps = float(version["data"].get("fps"))or get_current_session_fps()
cmds.setAttr(standin_shape + ".abcFPS", fps)
nodes = [root, standin, standin_shape]
if operator is not None:
nodes.append(operator)

View file

@ -1,8 +1,14 @@
import maya.cmds as cmds
from openpype.pipeline import (
load,
remove_container
)
from openpype.hosts.maya.api.pipeline import containerise
from openpype.hosts.maya.api.lib import unique_namespace
from openpype.hosts.maya.api import setdress
class AssemblyLoader(load.LoaderPlugin):
@ -16,9 +22,6 @@ class AssemblyLoader(load.LoaderPlugin):
def load(self, context, name, namespace, data):
from openpype.hosts.maya.api.pipeline import containerise
from openpype.hosts.maya.api.lib import unique_namespace
asset = context['asset']['name']
namespace = namespace or unique_namespace(
asset + "_",
@ -26,8 +29,6 @@ class AssemblyLoader(load.LoaderPlugin):
suffix="_",
)
from openpype.hosts.maya.api import setdress
containers = setdress.load_package(
filepath=self.fname,
name=name,
@ -50,15 +51,11 @@ class AssemblyLoader(load.LoaderPlugin):
def update(self, container, representation):
from openpype import setdress
return setdress.update_package(container, representation)
def remove(self, container):
"""Remove all sub containers"""
from openpype import setdress
import maya.cmds as cmds
# Remove all members
member_containers = setdress.get_contained_containers(container)
for member_container in member_containers:

View file

@ -273,6 +273,11 @@ class FileNodeLoader(load.LoaderPlugin):
project_name, host_name,
project_settings=project_settings
)
# ignore if host imageio is not enabled
if not config_data:
return
file_rules = get_imageio_file_rules(
project_name, host_name,
project_settings=project_settings

Some files were not shown because too many files have changed in this diff Show more