mirror of
https://github.com/ynput/ayon-core.git
synced 2025-12-24 21:04:40 +01:00
Merge remote-tracking branch 'upstream/develop' into houdini_opengl
This commit is contained in:
commit
a427b3dd05
521 changed files with 16955 additions and 6517 deletions
15
.github/pull_request_template.md
vendored
15
.github/pull_request_template.md
vendored
|
|
@ -1,16 +1,9 @@
|
|||
## Brief description
|
||||
First sentence is brief description.
|
||||
|
||||
## Description
|
||||
Next paragraf is more elaborate text with more info. This will be displayed for example in collapsed form under the first sentence in a changelog.
|
||||
## Changelog Description
|
||||
Paragraphs contain detailed information on the changes made to the product or service, providing an in-depth description of the updates and enhancements. They can be used to explain the reasoning behind the changes, or to highlight the importance of the new features. Paragraphs can often include links to further information or support documentation.
|
||||
|
||||
## Additional info
|
||||
The rest will be ignored in changelog and should contain any additional
|
||||
technical information.
|
||||
|
||||
## Documentation (add _"type: documentation"_ label)
|
||||
[feature_documentation](future_url_after_it_will_be_merged)
|
||||
Paragraphs of text giving context of additional technical information or code examples.
|
||||
|
||||
## Testing notes:
|
||||
1. start with this step
|
||||
2. follow this step
|
||||
2. follow this step
|
||||
|
|
|
|||
19
.github/workflows/automate-projects.yml
vendored
19
.github/workflows/automate-projects.yml
vendored
|
|
@ -1,19 +0,0 @@
|
|||
name: Automate Projects
|
||||
|
||||
on:
|
||||
issues:
|
||||
types: [opened, labeled]
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
jobs:
|
||||
assign_one_project:
|
||||
runs-on: ubuntu-latest
|
||||
name: Assign to One Project
|
||||
steps:
|
||||
- name: Assign NEW bugs to triage
|
||||
uses: srggrs/assign-one-project-github-action@1.2.0
|
||||
if: contains(github.event.issue.labels.*.name, 'bug')
|
||||
with:
|
||||
project: 'https://github.com/pypeclub/pype/projects/2'
|
||||
column_name: 'Needs triage'
|
||||
6
.github/workflows/milestone_assign.yml
vendored
6
.github/workflows/milestone_assign.yml
vendored
|
|
@ -13,7 +13,7 @@ jobs:
|
|||
if: github.event.pull_request.milestone == null
|
||||
uses: zoispag/action-assign-milestone@v1
|
||||
with:
|
||||
repo-token: "${{ secrets.GITHUB_TOKEN }}"
|
||||
repo-token: "${{ secrets.YNPUT_BOT_TOKEN }}"
|
||||
milestone: 'next-minor'
|
||||
|
||||
run_if_develop:
|
||||
|
|
@ -24,5 +24,5 @@ jobs:
|
|||
if: github.event.pull_request.milestone == null
|
||||
uses: zoispag/action-assign-milestone@v1
|
||||
with:
|
||||
repo-token: "${{ secrets.GITHUB_TOKEN }}"
|
||||
milestone: 'next-patch'
|
||||
repo-token: "${{ secrets.YNPUT_BOT_TOKEN }}"
|
||||
milestone: 'next-patch'
|
||||
|
|
|
|||
8
.github/workflows/milestone_create.yml
vendored
8
.github/workflows/milestone_create.yml
vendored
|
|
@ -12,7 +12,7 @@ jobs:
|
|||
uses: "WyriHaximus/github-action-get-milestones@master"
|
||||
id: milestones
|
||||
env:
|
||||
GITHUB_TOKEN: "${{ secrets.GITHUB_TOKEN }}"
|
||||
GITHUB_TOKEN: "${{ secrets.YNPUT_BOT_TOKEN }}"
|
||||
|
||||
- run: printf "name=number::%s" $(printenv MILESTONES | jq --arg MILESTONE $(printenv MILESTONE) '.[] | select(.title == $MILESTONE) | .number')
|
||||
id: querymilestone
|
||||
|
|
@ -31,7 +31,7 @@ jobs:
|
|||
with:
|
||||
title: 'next-patch'
|
||||
env:
|
||||
GITHUB_TOKEN: "${{ secrets.GITHUB_TOKEN }}"
|
||||
GITHUB_TOKEN: "${{ secrets.YNPUT_BOT_TOKEN }}"
|
||||
|
||||
generate-next-minor:
|
||||
runs-on: ubuntu-latest
|
||||
|
|
@ -40,7 +40,7 @@ jobs:
|
|||
uses: "WyriHaximus/github-action-get-milestones@master"
|
||||
id: milestones
|
||||
env:
|
||||
GITHUB_TOKEN: "${{ secrets.GITHUB_TOKEN }}"
|
||||
GITHUB_TOKEN: "${{ secrets.YNPUT_BOT_TOKEN }}"
|
||||
|
||||
- run: printf "name=number::%s" $(printenv MILESTONES | jq --arg MILESTONE $(printenv MILESTONE) '.[] | select(.title == $MILESTONE) | .number')
|
||||
id: querymilestone
|
||||
|
|
@ -59,4 +59,4 @@ jobs:
|
|||
with:
|
||||
title: 'next-minor'
|
||||
env:
|
||||
GITHUB_TOKEN: "${{ secrets.GITHUB_TOKEN }}"
|
||||
GITHUB_TOKEN: "${{ secrets.YNPUT_BOT_TOKEN }}"
|
||||
|
|
|
|||
47
.github/workflows/miletone_release_trigger.yml
vendored
Normal file
47
.github/workflows/miletone_release_trigger.yml
vendored
Normal file
|
|
@ -0,0 +1,47 @@
|
|||
name: Milestone Release [trigger]
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
milestone:
|
||||
required: true
|
||||
release-type:
|
||||
type: choice
|
||||
description: What release should be created
|
||||
options:
|
||||
- release
|
||||
- pre-release
|
||||
milestone:
|
||||
types: closed
|
||||
|
||||
|
||||
jobs:
|
||||
milestone-title:
|
||||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
milestone: ${{ steps.milestoneTitle.outputs.value }}
|
||||
steps:
|
||||
- name: Switch input milestone
|
||||
uses: haya14busa/action-cond@v1
|
||||
id: milestoneTitle
|
||||
with:
|
||||
cond: ${{ inputs.milestone == '' }}
|
||||
if_true: ${{ github.event.milestone.title }}
|
||||
if_false: ${{ inputs.milestone }}
|
||||
- name: Print resulted milestone
|
||||
run: |
|
||||
echo "${{ steps.milestoneTitle.outputs.value }}"
|
||||
|
||||
call-ci-tools-milestone-release:
|
||||
needs: milestone-title
|
||||
uses: ynput/ci-tools/.github/workflows/milestone_release_ref.yml@main
|
||||
with:
|
||||
milestone: ${{ needs.milestone-title.outputs.milestone }}
|
||||
repo-owner: ${{ github.event.repository.owner.login }}
|
||||
repo-name: ${{ github.event.repository.name }}
|
||||
version-py-path: "./openpype/version.py"
|
||||
pyproject-path: "./pyproject.toml"
|
||||
secrets:
|
||||
token: ${{ secrets.YNPUT_BOT_TOKEN }}
|
||||
user_email: ${{ secrets.CI_EMAIL }}
|
||||
user_name: ${{ secrets.CI_USER }}
|
||||
6
.github/workflows/nightly_merge.yml
vendored
6
.github/workflows/nightly_merge.yml
vendored
|
|
@ -14,10 +14,10 @@ jobs:
|
|||
- name: 🚛 Checkout Code
|
||||
uses: actions/checkout@v2
|
||||
|
||||
- name: 🔨 Merge develop to main
|
||||
- name: 🔨 Merge develop to main
|
||||
uses: everlytic/branch-merge@1.1.0
|
||||
with:
|
||||
github_token: ${{ secrets.ADMIN_TOKEN }}
|
||||
github_token: ${{ secrets.YNPUT_BOT_TOKEN }}
|
||||
source_ref: 'develop'
|
||||
target_branch: 'main'
|
||||
commit_message_template: '[Automated] Merged {source_ref} into {target_branch}'
|
||||
|
|
@ -26,4 +26,4 @@ jobs:
|
|||
uses: benc-uk/workflow-dispatch@v1
|
||||
with:
|
||||
workflow: Nightly Prerelease
|
||||
token: ${{ secrets.ADMIN_TOKEN }}
|
||||
token: ${{ secrets.YNPUT_BOT_TOKEN }}
|
||||
|
|
|
|||
40
.github/workflows/prerelease.yml
vendored
40
.github/workflows/prerelease.yml
vendored
|
|
@ -25,43 +25,15 @@ jobs:
|
|||
- name: 🔎 Determine next version type
|
||||
id: version_type
|
||||
run: |
|
||||
TYPE=$(python ./tools/ci_tools.py --bump --github_token ${{ secrets.GITHUB_TOKEN }})
|
||||
|
||||
echo ::set-output name=type::$TYPE
|
||||
TYPE=$(python ./tools/ci_tools.py --bump --github_token ${{ secrets.YNPUT_BOT_TOKEN }})
|
||||
echo "type=${TYPE}" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: 💉 Inject new version into files
|
||||
id: version
|
||||
if: steps.version_type.outputs.type != 'skip'
|
||||
run: |
|
||||
RESULT=$(python ./tools/ci_tools.py --nightly --github_token ${{ secrets.GITHUB_TOKEN }})
|
||||
|
||||
echo ::set-output name=next_tag::$RESULT
|
||||
|
||||
# - name: "✏️ Generate full changelog"
|
||||
# if: steps.version_type.outputs.type != 'skip'
|
||||
# id: generate-full-changelog
|
||||
# uses: heinrichreimer/github-changelog-generator-action@v2.3
|
||||
# with:
|
||||
# token: ${{ secrets.ADMIN_TOKEN }}
|
||||
# addSections: '{"documentation":{"prefix":"### 📖 Documentation","labels":["type: documentation"]},"tests":{"prefix":"### ✅ Testing","labels":["tests"]},"feature":{"prefix":"**🆕 New features**", "labels":["type: feature"]},"breaking":{"prefix":"**💥 Breaking**", "labels":["breaking"]},"enhancements":{"prefix":"**🚀 Enhancements**", "labels":["type: enhancement"]},"bugs":{"prefix":"**🐛 Bug fixes**", "labels":["type: bug"]},"deprecated":{"prefix":"**⚠️ Deprecations**", "labels":["depreciated"]}, "refactor":{"prefix":"**🔀 Refactored code**", "labels":["refactor"]}}'
|
||||
# issues: false
|
||||
# issuesWoLabels: false
|
||||
# sinceTag: "3.12.0"
|
||||
# maxIssues: 100
|
||||
# pullRequests: true
|
||||
# prWoLabels: false
|
||||
# author: false
|
||||
# unreleased: true
|
||||
# compareLink: true
|
||||
# stripGeneratorNotice: true
|
||||
# verbose: true
|
||||
# unreleasedLabel: ${{ steps.version.outputs.next_tag }}
|
||||
# excludeTagsRegex: "CI/.+"
|
||||
# releaseBranch: "main"
|
||||
|
||||
- name: "🖨️ Print changelog to console"
|
||||
if: steps.version_type.outputs.type != 'skip'
|
||||
run: cat CHANGELOG.md
|
||||
NEW_VERSION_TAG=$(python ./tools/ci_tools.py --nightly --github_token ${{ secrets.YNPUT_BOT_TOKEN }})
|
||||
echo "next_tag=${NEW_VERSION_TAG}" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: 💾 Commit and Tag
|
||||
id: git_commit
|
||||
|
|
@ -80,7 +52,7 @@ jobs:
|
|||
- name: Push to protected main branch
|
||||
uses: CasperWA/push-protected@v2.10.0
|
||||
with:
|
||||
token: ${{ secrets.ADMIN_TOKEN }}
|
||||
token: ${{ secrets.YNPUT_BOT_TOKEN }}
|
||||
branch: main
|
||||
tags: true
|
||||
unprotect_reviews: true
|
||||
|
|
@ -89,7 +61,7 @@ jobs:
|
|||
uses: everlytic/branch-merge@1.1.0
|
||||
if: steps.version_type.outputs.type != 'skip'
|
||||
with:
|
||||
github_token: ${{ secrets.ADMIN_TOKEN }}
|
||||
github_token: ${{ secrets.YNPUT_BOT_TOKEN }}
|
||||
source_ref: 'main'
|
||||
target_branch: 'develop'
|
||||
commit_message_template: '[Automated] Merged {source_ref} into {target_branch}'
|
||||
|
|
|
|||
124
.github/workflows/release.yml
vendored
124
.github/workflows/release.yml
vendored
|
|
@ -1,124 +0,0 @@
|
|||
name: Stable Release
|
||||
|
||||
on:
|
||||
release:
|
||||
types:
|
||||
- prereleased
|
||||
|
||||
jobs:
|
||||
create_release:
|
||||
runs-on: ubuntu-latest
|
||||
if: github.actor != 'pypebot'
|
||||
|
||||
steps:
|
||||
- name: 🚛 Checkout Code
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Set up Python
|
||||
uses: actions/setup-python@v2
|
||||
with:
|
||||
python-version: 3.9
|
||||
- name: Install Python requirements
|
||||
run: pip install gitpython semver PyGithub
|
||||
|
||||
- name: 💉 Inject new version into files
|
||||
id: version
|
||||
run: |
|
||||
echo ::set-output name=current_version::${GITHUB_REF#refs/*/}
|
||||
RESULT=$(python ./tools/ci_tools.py --finalize ${GITHUB_REF#refs/*/})
|
||||
LASTRELEASE=$(python ./tools/ci_tools.py --lastversion release)
|
||||
|
||||
echo ::set-output name=last_release::$LASTRELEASE
|
||||
echo ::set-output name=release_tag::$RESULT
|
||||
|
||||
# - name: "✏️ Generate full changelog"
|
||||
# if: steps.version.outputs.release_tag != 'skip'
|
||||
# id: generate-full-changelog
|
||||
# uses: heinrichreimer/github-changelog-generator-action@v2.3
|
||||
# with:
|
||||
# token: ${{ secrets.ADMIN_TOKEN }}
|
||||
# addSections: '{"documentation":{"prefix":"### 📖 Documentation","labels":["type: documentation"]},"tests":{"prefix":"### ✅ Testing","labels":["tests"]},"feature":{"prefix":"**🆕 New features**", "labels":["type: feature"]},"breaking":{"prefix":"**💥 Breaking**", "labels":["breaking"]},"enhancements":{"prefix":"**🚀 Enhancements**", "labels":["type: enhancement"]},"bugs":{"prefix":"**🐛 Bug fixes**", "labels":["type: bug"]},"deprecated":{"prefix":"**⚠️ Deprecations**", "labels":["depreciated"]}, "refactor":{"prefix":"**🔀 Refactored code**", "labels":["refactor"]}}'
|
||||
# issues: false
|
||||
# issuesWoLabels: false
|
||||
# sinceTag: "3.12.0"
|
||||
# maxIssues: 100
|
||||
# pullRequests: true
|
||||
# prWoLabels: false
|
||||
# author: false
|
||||
# unreleased: true
|
||||
# compareLink: true
|
||||
# stripGeneratorNotice: true
|
||||
# verbose: true
|
||||
# futureRelease: ${{ steps.version.outputs.release_tag }}
|
||||
# excludeTagsRegex: "CI/.+"
|
||||
# releaseBranch: "main"
|
||||
|
||||
- name: 💾 Commit and Tag
|
||||
id: git_commit
|
||||
if: steps.version.outputs.release_tag != 'skip'
|
||||
run: |
|
||||
git config user.email ${{ secrets.CI_EMAIL }}
|
||||
git config user.name ${{ secrets.CI_USER }}
|
||||
git add .
|
||||
git commit -m "[Automated] Release"
|
||||
tag_name="${{ steps.version.outputs.release_tag }}"
|
||||
git tag -a $tag_name -m "stable release"
|
||||
|
||||
- name: 🔏 Push to protected main branch
|
||||
if: steps.version.outputs.release_tag != 'skip'
|
||||
uses: CasperWA/push-protected@v2.10.0
|
||||
with:
|
||||
token: ${{ secrets.ADMIN_TOKEN }}
|
||||
branch: main
|
||||
tags: true
|
||||
unprotect_reviews: true
|
||||
|
||||
- name: "✏️ Generate last changelog"
|
||||
if: steps.version.outputs.release_tag != 'skip'
|
||||
id: generate-last-changelog
|
||||
uses: heinrichreimer/github-changelog-generator-action@v2.2
|
||||
with:
|
||||
token: ${{ secrets.ADMIN_TOKEN }}
|
||||
addSections: '{"documentation":{"prefix":"### 📖 Documentation","labels":["type: documentation"]},"tests":{"prefix":"### ✅ Testing","labels":["tests"]},"feature":{"prefix":"**🆕 New features**", "labels":["type: feature"]},"breaking":{"prefix":"**💥 Breaking**", "labels":["breaking"]},"enhancements":{"prefix":"**🚀 Enhancements**", "labels":["type: enhancement"]},"bugs":{"prefix":"**🐛 Bug fixes**", "labels":["type: bug"]},"deprecated":{"prefix":"**⚠️ Deprecations**", "labels":["depreciated"]}, "refactor":{"prefix":"**🔀 Refactored code**", "labels":["refactor"]}}'
|
||||
issues: false
|
||||
issuesWoLabels: false
|
||||
sinceTag: ${{ steps.version.outputs.last_release }}
|
||||
maxIssues: 100
|
||||
pullRequests: true
|
||||
prWoLabels: false
|
||||
author: false
|
||||
unreleased: true
|
||||
compareLink: true
|
||||
stripGeneratorNotice: true
|
||||
verbose: true
|
||||
futureRelease: ${{ steps.version.outputs.release_tag }}
|
||||
excludeTagsRegex: "CI/.+"
|
||||
releaseBranch: "main"
|
||||
stripHeaders: true
|
||||
base: 'none'
|
||||
|
||||
|
||||
- name: 🚀 Github Release
|
||||
if: steps.version.outputs.release_tag != 'skip'
|
||||
uses: ncipollo/release-action@v1
|
||||
with:
|
||||
body: ${{ steps.generate-last-changelog.outputs.changelog }}
|
||||
tag: ${{ steps.version.outputs.release_tag }}
|
||||
token: ${{ secrets.ADMIN_TOKEN }}
|
||||
|
||||
- name: ☠ Delete Pre-release
|
||||
if: steps.version.outputs.release_tag != 'skip'
|
||||
uses: cb80/delrel@latest
|
||||
with:
|
||||
tag: "${{ steps.version.outputs.current_version }}"
|
||||
|
||||
- name: 🔁 Merge main back to develop
|
||||
if: steps.version.outputs.release_tag != 'skip'
|
||||
uses: everlytic/branch-merge@1.1.0
|
||||
with:
|
||||
github_token: ${{ secrets.ADMIN_TOKEN }}
|
||||
source_ref: 'main'
|
||||
target_branch: 'develop'
|
||||
commit_message_template: '[Automated] Merged release {source_ref} into {target_branch}'
|
||||
26
.github/workflows/test_build.yml
vendored
26
.github/workflows/test_build.yml
vendored
|
|
@ -28,7 +28,7 @@ jobs:
|
|||
uses: actions/setup-python@v2
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
|
||||
|
||||
- name: 🧵 Install Requirements
|
||||
shell: pwsh
|
||||
run: |
|
||||
|
|
@ -64,27 +64,3 @@ jobs:
|
|||
run: |
|
||||
export SKIP_THIRD_PARTY_VALIDATION="1"
|
||||
./tools/build.sh
|
||||
|
||||
# MacOS-latest:
|
||||
|
||||
# runs-on: macos-latest
|
||||
# strategy:
|
||||
# matrix:
|
||||
# python-version: [3.9]
|
||||
|
||||
# steps:
|
||||
# - name: 🚛 Checkout Code
|
||||
# uses: actions/checkout@v2
|
||||
|
||||
# - name: Set up Python
|
||||
# uses: actions/setup-python@v2
|
||||
# with:
|
||||
# python-version: ${{ matrix.python-version }}
|
||||
|
||||
# - name: 🧵 Install Requirements
|
||||
# run: |
|
||||
# ./tools/create_env.sh
|
||||
|
||||
# - name: 🔨 Build
|
||||
# run: |
|
||||
# ./tools/build.sh
|
||||
|
|
|
|||
|
|
@ -9,4 +9,4 @@ repos:
|
|||
- id: check-yaml
|
||||
- id: check-added-large-files
|
||||
- id: no-commit-to-branch
|
||||
args: [ '--pattern', '^(?!((enhancement|feature|bugfix|documentation|tests|local|chore)\/[a-zA-Z0-9\-]+)$).*' ]
|
||||
args: [ '--pattern', '^(?!((release|enhancement|feature|bugfix|documentation|tests|local|chore)\/[a-zA-Z0-9\-_]+)$).*' ]
|
||||
|
|
|
|||
1183
CHANGELOG.md
1183
CHANGELOG.md
File diff suppressed because it is too large
Load diff
|
|
@ -8,7 +8,6 @@ OpenPype
|
|||
[](https://github.com/pypeclub/pype/actions/workflows/documentation.yml) 
|
||||
|
||||
|
||||
this
|
||||
Introduction
|
||||
------------
|
||||
|
||||
|
|
|
|||
112
openpype/api.py
112
openpype/api.py
|
|
@ -1,112 +0,0 @@
|
|||
from .settings import (
|
||||
get_system_settings,
|
||||
get_project_settings,
|
||||
get_current_project_settings,
|
||||
get_anatomy_settings,
|
||||
|
||||
SystemSettings,
|
||||
ProjectSettings
|
||||
)
|
||||
from .lib import (
|
||||
PypeLogger,
|
||||
Logger,
|
||||
Anatomy,
|
||||
execute,
|
||||
run_subprocess,
|
||||
version_up,
|
||||
get_asset,
|
||||
get_workdir_data,
|
||||
get_version_from_path,
|
||||
get_last_version_from_path,
|
||||
get_app_environments_for_context,
|
||||
source_hash,
|
||||
get_latest_version,
|
||||
get_local_site_id,
|
||||
change_openpype_mongo_url,
|
||||
create_project_folders,
|
||||
get_project_basic_paths
|
||||
)
|
||||
|
||||
from .lib.mongo import (
|
||||
get_default_components
|
||||
)
|
||||
|
||||
from .lib.applications import (
|
||||
ApplicationManager
|
||||
)
|
||||
|
||||
from .lib.avalon_context import (
|
||||
BuildWorkfile
|
||||
)
|
||||
|
||||
from . import resources
|
||||
|
||||
from .plugin import (
|
||||
Extractor,
|
||||
|
||||
ValidatePipelineOrder,
|
||||
ValidateContentsOrder,
|
||||
ValidateSceneOrder,
|
||||
ValidateMeshOrder,
|
||||
)
|
||||
|
||||
# temporary fix, might
|
||||
from .action import (
|
||||
get_errored_instances_from_context,
|
||||
RepairAction,
|
||||
RepairContextAction
|
||||
)
|
||||
|
||||
|
||||
__all__ = [
|
||||
"get_system_settings",
|
||||
"get_project_settings",
|
||||
"get_current_project_settings",
|
||||
"get_anatomy_settings",
|
||||
"get_project_basic_paths",
|
||||
|
||||
"SystemSettings",
|
||||
"ProjectSettings",
|
||||
|
||||
"PypeLogger",
|
||||
"Logger",
|
||||
"Anatomy",
|
||||
"execute",
|
||||
"get_default_components",
|
||||
"ApplicationManager",
|
||||
"BuildWorkfile",
|
||||
|
||||
# Resources
|
||||
"resources",
|
||||
|
||||
# plugin classes
|
||||
"Extractor",
|
||||
# ordering
|
||||
"ValidatePipelineOrder",
|
||||
"ValidateContentsOrder",
|
||||
"ValidateSceneOrder",
|
||||
"ValidateMeshOrder",
|
||||
# action
|
||||
"get_errored_instances_from_context",
|
||||
"RepairAction",
|
||||
"RepairContextAction",
|
||||
|
||||
# get contextual data
|
||||
"version_up",
|
||||
"get_asset",
|
||||
"get_workdir_data",
|
||||
"get_version_from_path",
|
||||
"get_last_version_from_path",
|
||||
"get_app_environments_for_context",
|
||||
"source_hash",
|
||||
|
||||
"run_subprocess",
|
||||
"get_latest_version",
|
||||
|
||||
"get_local_site_id",
|
||||
"change_openpype_mongo_url",
|
||||
|
||||
"get_project_basic_paths",
|
||||
"create_project_folders"
|
||||
|
||||
]
|
||||
|
|
@ -164,7 +164,6 @@ def get_linked_representation_id(
|
|||
# Recursive graph lookup for inputs
|
||||
{"$graphLookup": graph_lookup}
|
||||
]
|
||||
|
||||
conn = get_project_connection(project_name)
|
||||
result = conn.aggregate(query_pipeline)
|
||||
referenced_version_ids = _process_referenced_pipeline_result(
|
||||
|
|
@ -213,7 +212,7 @@ def _process_referenced_pipeline_result(result, link_type):
|
|||
|
||||
for output in sorted(outputs_recursive, key=lambda o: o["depth"]):
|
||||
output_links = output.get("data", {}).get("inputLinks")
|
||||
if not output_links:
|
||||
if not output_links and output["type"] != "hero_version":
|
||||
continue
|
||||
|
||||
# Leaf
|
||||
|
|
@ -232,6 +231,9 @@ def _process_referenced_pipeline_result(result, link_type):
|
|||
|
||||
|
||||
def _filter_input_links(input_links, link_type, correctly_linked_ids):
|
||||
if not input_links: # to handle hero versions
|
||||
return
|
||||
|
||||
for input_link in input_links:
|
||||
if link_type and input_link["type"] != link_type:
|
||||
continue
|
||||
|
|
|
|||
|
|
@ -1,4 +1,5 @@
|
|||
import os
|
||||
|
||||
from openpype.lib import PreLaunchHook
|
||||
|
||||
|
||||
|
|
@ -40,5 +41,13 @@ class AddLastWorkfileToLaunchArgs(PreLaunchHook):
|
|||
self.log.info("Current context does not have any workfile yet.")
|
||||
return
|
||||
|
||||
# Determine whether to open workfile post initialization.
|
||||
if self.host_name == "maya":
|
||||
key = "open_workfile_post_initialization"
|
||||
if self.data["project_settings"]["maya"][key]:
|
||||
self.log.debug("Opening workfile post initialization.")
|
||||
self.data["env"]["OPENPYPE_" + key.upper()] = "1"
|
||||
return
|
||||
|
||||
# Add path to workfile to arguments
|
||||
self.launch_context.launch_args.append(last_workfile)
|
||||
|
|
|
|||
|
|
@ -13,7 +13,7 @@ class LaunchFoundryAppsWindows(PreLaunchHook):
|
|||
|
||||
# Should be as last hook because must change launch arguments to string
|
||||
order = 1000
|
||||
app_groups = ["nuke", "nukex", "hiero", "nukestudio"]
|
||||
app_groups = ["nuke", "nukeassist", "nukex", "hiero", "nukestudio"]
|
||||
platforms = ["windows"]
|
||||
|
||||
def execute(self):
|
||||
|
|
|
|||
|
|
@ -8,6 +8,7 @@ exists is used.
|
|||
|
||||
import os
|
||||
from abc import ABCMeta, abstractmethod
|
||||
import platform
|
||||
|
||||
import six
|
||||
|
||||
|
|
@ -187,11 +188,19 @@ class HostDirmap(object):
|
|||
|
||||
self.log.debug("local overrides {}".format(active_overrides))
|
||||
self.log.debug("remote overrides {}".format(remote_overrides))
|
||||
current_platform = platform.system().lower()
|
||||
for root_name, active_site_dir in active_overrides.items():
|
||||
remote_site_dir = (
|
||||
remote_overrides.get(root_name)
|
||||
or sync_settings["sites"][remote_site]["root"][root_name]
|
||||
)
|
||||
|
||||
if isinstance(remote_site_dir, dict):
|
||||
remote_site_dir = remote_site_dir.get(current_platform)
|
||||
|
||||
if not remote_site_dir:
|
||||
continue
|
||||
|
||||
if os.path.isdir(active_site_dir):
|
||||
if "destination-path" not in mapping:
|
||||
mapping["destination-path"] = []
|
||||
|
|
|
|||
|
|
@ -1,3 +1,4 @@
|
|||
import os
|
||||
import logging
|
||||
import contextlib
|
||||
from abc import ABCMeta, abstractproperty
|
||||
|
|
@ -100,6 +101,30 @@ class HostBase(object):
|
|||
|
||||
pass
|
||||
|
||||
def get_current_project_name(self):
|
||||
"""
|
||||
Returns:
|
||||
Union[str, None]: Current project name.
|
||||
"""
|
||||
|
||||
return os.environ.get("AVALON_PROJECT")
|
||||
|
||||
def get_current_asset_name(self):
|
||||
"""
|
||||
Returns:
|
||||
Union[str, None]: Current asset name.
|
||||
"""
|
||||
|
||||
return os.environ.get("AVALON_ASSET")
|
||||
|
||||
def get_current_task_name(self):
|
||||
"""
|
||||
Returns:
|
||||
Union[str, None]: Current task name.
|
||||
"""
|
||||
|
||||
return os.environ.get("AVALON_TASK")
|
||||
|
||||
def get_current_context(self):
|
||||
"""Get current context information.
|
||||
|
||||
|
|
@ -111,19 +136,14 @@ class HostBase(object):
|
|||
Default implementation returns values from 'legacy_io.Session'.
|
||||
|
||||
Returns:
|
||||
dict: Context with 3 keys 'project_name', 'asset_name' and
|
||||
'task_name'. All of them can be 'None'.
|
||||
Dict[str, Union[str, None]]: Context with 3 keys 'project_name',
|
||||
'asset_name' and 'task_name'. All of them can be 'None'.
|
||||
"""
|
||||
|
||||
from openpype.pipeline import legacy_io
|
||||
|
||||
if legacy_io.is_installed():
|
||||
legacy_io.install()
|
||||
|
||||
return {
|
||||
"project_name": legacy_io.Session["AVALON_PROJECT"],
|
||||
"asset_name": legacy_io.Session["AVALON_ASSET"],
|
||||
"task_name": legacy_io.Session["AVALON_TASK"]
|
||||
"project_name": self.get_current_project_name(),
|
||||
"asset_name": self.get_current_asset_name(),
|
||||
"task_name": self.get_current_task_name()
|
||||
}
|
||||
|
||||
def get_context_title(self):
|
||||
|
|
|
|||
|
|
@ -6,14 +6,19 @@ from openpype.hosts.aftereffects import api
|
|||
from openpype.pipeline import (
|
||||
Creator,
|
||||
CreatedInstance,
|
||||
CreatorError,
|
||||
legacy_io,
|
||||
CreatorError
|
||||
)
|
||||
from openpype.hosts.aftereffects.api.pipeline import cache_and_get_instances
|
||||
from openpype.lib import prepare_template_data
|
||||
from openpype.pipeline.create import SUBSET_NAME_ALLOWED_SYMBOLS
|
||||
|
||||
|
||||
class RenderCreator(Creator):
|
||||
"""Creates 'render' instance for publishing.
|
||||
|
||||
Result of 'render' instance is video or sequence of images for particular
|
||||
composition based of configuration in its RenderQueue.
|
||||
"""
|
||||
identifier = "render"
|
||||
label = "Render"
|
||||
family = "render"
|
||||
|
|
@ -28,45 +33,6 @@ class RenderCreator(Creator):
|
|||
["RenderCreator"]
|
||||
["defaults"])
|
||||
|
||||
def get_icon(self):
|
||||
return resources.get_openpype_splash_filepath()
|
||||
|
||||
def collect_instances(self):
|
||||
for instance_data in cache_and_get_instances(self):
|
||||
# legacy instances have family=='render' or 'renderLocal', use them
|
||||
creator_id = (instance_data.get("creator_identifier") or
|
||||
instance_data.get("family", '').replace("Local", ''))
|
||||
if creator_id == self.identifier:
|
||||
instance_data = self._handle_legacy(instance_data)
|
||||
instance = CreatedInstance.from_existing(
|
||||
instance_data, self
|
||||
)
|
||||
self._add_instance_to_context(instance)
|
||||
|
||||
def update_instances(self, update_list):
|
||||
for created_inst, _changes in update_list:
|
||||
api.get_stub().imprint(created_inst.get("instance_id"),
|
||||
created_inst.data_to_store())
|
||||
subset_change = _changes.get("subset")
|
||||
if subset_change:
|
||||
api.get_stub().rename_item(created_inst.data["members"][0],
|
||||
subset_change[1])
|
||||
|
||||
def remove_instances(self, instances):
|
||||
for instance in instances:
|
||||
self._remove_instance_from_context(instance)
|
||||
self.host.remove_instance(instance)
|
||||
|
||||
subset = instance.data["subset"]
|
||||
comp_id = instance.data["members"][0]
|
||||
comp = api.get_stub().get_item(comp_id)
|
||||
if comp:
|
||||
new_comp_name = comp.name.replace(subset, '')
|
||||
if not new_comp_name:
|
||||
new_comp_name = "dummyCompName"
|
||||
api.get_stub().rename_item(comp_id,
|
||||
new_comp_name)
|
||||
|
||||
def create(self, subset_name_from_ui, data, pre_create_data):
|
||||
stub = api.get_stub() # only after After Effects is up
|
||||
if pre_create_data.get("use_selection"):
|
||||
|
|
@ -82,10 +48,19 @@ class RenderCreator(Creator):
|
|||
"if 'useSelection' or create at least "
|
||||
"one composition."
|
||||
)
|
||||
|
||||
use_composition_name = (pre_create_data.get("use_composition_name") or
|
||||
len(comps) > 1)
|
||||
for comp in comps:
|
||||
if pre_create_data.get("use_composition_name"):
|
||||
composition_name = comp.name
|
||||
if use_composition_name:
|
||||
if "{composition}" not in subset_name_from_ui.lower():
|
||||
subset_name_from_ui += "{Composition}"
|
||||
|
||||
composition_name = re.sub(
|
||||
"[^{}]+".format(SUBSET_NAME_ALLOWED_SYMBOLS),
|
||||
"",
|
||||
comp.name
|
||||
)
|
||||
|
||||
dynamic_fill = prepare_template_data({"composition":
|
||||
composition_name})
|
||||
subset_name = subset_name_from_ui.format(**dynamic_fill)
|
||||
|
|
@ -129,8 +104,72 @@ class RenderCreator(Creator):
|
|||
]
|
||||
return output
|
||||
|
||||
def get_icon(self):
|
||||
return resources.get_openpype_splash_filepath()
|
||||
|
||||
def collect_instances(self):
|
||||
for instance_data in cache_and_get_instances(self):
|
||||
# legacy instances have family=='render' or 'renderLocal', use them
|
||||
creator_id = (instance_data.get("creator_identifier") or
|
||||
instance_data.get("family", '').replace("Local", ''))
|
||||
if creator_id == self.identifier:
|
||||
instance_data = self._handle_legacy(instance_data)
|
||||
instance = CreatedInstance.from_existing(
|
||||
instance_data, self
|
||||
)
|
||||
self._add_instance_to_context(instance)
|
||||
|
||||
def update_instances(self, update_list):
|
||||
for created_inst, _changes in update_list:
|
||||
api.get_stub().imprint(created_inst.get("instance_id"),
|
||||
created_inst.data_to_store())
|
||||
subset_change = _changes.get("subset")
|
||||
if subset_change:
|
||||
api.get_stub().rename_item(created_inst.data["members"][0],
|
||||
subset_change.new_value)
|
||||
|
||||
def remove_instances(self, instances):
|
||||
for instance in instances:
|
||||
self._remove_instance_from_context(instance)
|
||||
self.host.remove_instance(instance)
|
||||
|
||||
subset = instance.data["subset"]
|
||||
comp_id = instance.data["members"][0]
|
||||
comp = api.get_stub().get_item(comp_id)
|
||||
if comp:
|
||||
new_comp_name = comp.name.replace(subset, '')
|
||||
if not new_comp_name:
|
||||
new_comp_name = "dummyCompName"
|
||||
api.get_stub().rename_item(comp_id,
|
||||
new_comp_name)
|
||||
|
||||
def get_detail_description(self):
|
||||
return """Creator for Render instances"""
|
||||
return """Creator for Render instances
|
||||
|
||||
Main publishable item in AfterEffects will be of `render` family.
|
||||
Result of this item (instance) is picture sequence or video that could
|
||||
be a final delivery product or loaded and used in another DCCs.
|
||||
|
||||
Select single composition and create instance of 'render' family or
|
||||
turn off 'Use selection' to create instance for all compositions.
|
||||
|
||||
'Use composition name in subset' allows to explicitly add composition
|
||||
name into created subset name.
|
||||
|
||||
Position of composition name could be set in
|
||||
`project_settings/global/tools/creator/subset_name_profiles` with some
|
||||
form of '{composition}' placeholder.
|
||||
|
||||
Composition name will be used implicitly if multiple composition should
|
||||
be handled at same time.
|
||||
|
||||
If {composition} placeholder is not us 'subset_name_profiles'
|
||||
composition name will be capitalized and set at the end of subset name
|
||||
if necessary.
|
||||
|
||||
If composition name should be used, it will be cleaned up of characters
|
||||
that would cause an issue in published file names.
|
||||
"""
|
||||
|
||||
def get_dynamic_data(self, variant, task_name, asset_doc,
|
||||
project_name, host_name, instance):
|
||||
|
|
@ -155,7 +194,7 @@ class RenderCreator(Creator):
|
|||
instance_data.pop("uuid")
|
||||
|
||||
if not instance_data.get("task"):
|
||||
instance_data["task"] = legacy_io.Session.get("AVALON_TASK")
|
||||
instance_data["task"] = self.create_context.get_current_task_name()
|
||||
|
||||
if not instance_data.get("creator_attributes"):
|
||||
is_old_farm = instance_data["family"] != "renderLocal"
|
||||
|
|
|
|||
|
|
@ -2,8 +2,7 @@ import openpype.hosts.aftereffects.api as api
|
|||
from openpype.client import get_asset_by_name
|
||||
from openpype.pipeline import (
|
||||
AutoCreator,
|
||||
CreatedInstance,
|
||||
legacy_io,
|
||||
CreatedInstance
|
||||
)
|
||||
from openpype.hosts.aftereffects.api.pipeline import cache_and_get_instances
|
||||
|
||||
|
|
@ -38,10 +37,11 @@ class AEWorkfileCreator(AutoCreator):
|
|||
existing_instance = instance
|
||||
break
|
||||
|
||||
project_name = legacy_io.Session["AVALON_PROJECT"]
|
||||
asset_name = legacy_io.Session["AVALON_ASSET"]
|
||||
task_name = legacy_io.Session["AVALON_TASK"]
|
||||
host_name = legacy_io.Session["AVALON_APP"]
|
||||
context = self.create_context
|
||||
project_name = context.get_current_project_name()
|
||||
asset_name = context.get_current_asset_name()
|
||||
task_name = context.get_current_task_name()
|
||||
host_name = context.host_name
|
||||
|
||||
if existing_instance is None:
|
||||
asset_doc = get_asset_by_name(project_name, asset_name)
|
||||
|
|
|
|||
|
|
@ -1,6 +1,6 @@
|
|||
import json
|
||||
import pyblish.api
|
||||
from openpype.hosts.aftereffects.api import list_instances
|
||||
from openpype.hosts.aftereffects.api import AfterEffectsHost
|
||||
|
||||
|
||||
class PreCollectRender(pyblish.api.ContextPlugin):
|
||||
|
|
@ -25,7 +25,7 @@ class PreCollectRender(pyblish.api.ContextPlugin):
|
|||
self.log.debug("Not applicable for New Publisher, skip")
|
||||
return
|
||||
|
||||
for inst in list_instances():
|
||||
for inst in AfterEffectsHost().list_instances():
|
||||
if inst.get("creator_attributes"):
|
||||
raise ValueError("Instance created in New publisher, "
|
||||
"cannot be published in Pyblish.\n"
|
||||
|
|
|
|||
|
|
@ -44,7 +44,7 @@ class AppendBlendLoader(plugin.AssetLoader):
|
|||
"""
|
||||
|
||||
representations = ["blend"]
|
||||
families = ["*"]
|
||||
families = ["workfile"]
|
||||
|
||||
label = "Append Workfile"
|
||||
order = 9
|
||||
|
|
@ -68,7 +68,7 @@ class ImportBlendLoader(plugin.AssetLoader):
|
|||
"""
|
||||
|
||||
representations = ["blend"]
|
||||
families = ["*"]
|
||||
families = ["workfile"]
|
||||
|
||||
label = "Import Workfile"
|
||||
order = 9
|
||||
|
|
|
|||
|
|
@ -19,7 +19,6 @@ class ValidateCameraZeroKeyframe(pyblish.api.InstancePlugin):
|
|||
order = ValidateContentsOrder
|
||||
hosts = ["blender"]
|
||||
families = ["camera"]
|
||||
version = (0, 1, 0)
|
||||
label = "Zero Keyframe"
|
||||
actions = [openpype.hosts.blender.api.action.SelectInvalidAction]
|
||||
|
||||
|
|
|
|||
|
|
@ -14,7 +14,6 @@ class ValidateMeshHasUvs(pyblish.api.InstancePlugin):
|
|||
order = ValidateContentsOrder
|
||||
hosts = ["blender"]
|
||||
families = ["model"]
|
||||
category = "geometry"
|
||||
label = "Mesh Has UV's"
|
||||
actions = [openpype.hosts.blender.api.action.SelectInvalidAction]
|
||||
optional = True
|
||||
|
|
|
|||
|
|
@ -14,7 +14,6 @@ class ValidateMeshNoNegativeScale(pyblish.api.Validator):
|
|||
order = ValidateContentsOrder
|
||||
hosts = ["blender"]
|
||||
families = ["model"]
|
||||
category = "geometry"
|
||||
label = "Mesh No Negative Scale"
|
||||
actions = [openpype.hosts.blender.api.action.SelectInvalidAction]
|
||||
|
||||
|
|
|
|||
|
|
@ -19,7 +19,6 @@ class ValidateNoColonsInName(pyblish.api.InstancePlugin):
|
|||
order = ValidateContentsOrder
|
||||
hosts = ["blender"]
|
||||
families = ["model", "rig"]
|
||||
version = (0, 1, 0)
|
||||
label = "No Colons in names"
|
||||
actions = [openpype.hosts.blender.api.action.SelectInvalidAction]
|
||||
|
||||
|
|
|
|||
|
|
@ -21,7 +21,6 @@ class ValidateTransformZero(pyblish.api.InstancePlugin):
|
|||
order = ValidateContentsOrder
|
||||
hosts = ["blender"]
|
||||
families = ["model"]
|
||||
version = (0, 1, 0)
|
||||
label = "Transform Zero"
|
||||
actions = [openpype.hosts.blender.api.action.SelectInvalidAction]
|
||||
|
||||
|
|
|
|||
|
|
@ -1,5 +1,4 @@
|
|||
import pyblish.api
|
||||
import argparse
|
||||
import sys
|
||||
from pprint import pformat
|
||||
|
||||
|
|
@ -11,20 +10,40 @@ class CollectCelactionCliKwargs(pyblish.api.Collector):
|
|||
order = pyblish.api.Collector.order - 0.1
|
||||
|
||||
def process(self, context):
|
||||
parser = argparse.ArgumentParser(prog="celaction")
|
||||
parser.add_argument("--currentFile",
|
||||
help="Pass file to Context as `currentFile`")
|
||||
parser.add_argument("--chunk",
|
||||
help=("Render chanks on farm"))
|
||||
parser.add_argument("--frameStart",
|
||||
help=("Start of frame range"))
|
||||
parser.add_argument("--frameEnd",
|
||||
help=("End of frame range"))
|
||||
parser.add_argument("--resolutionWidth",
|
||||
help=("Width of resolution"))
|
||||
parser.add_argument("--resolutionHeight",
|
||||
help=("Height of resolution"))
|
||||
passing_kwargs = parser.parse_args(sys.argv[1:]).__dict__
|
||||
args = list(sys.argv[1:])
|
||||
self.log.info(str(args))
|
||||
missing_kwargs = []
|
||||
passing_kwargs = {}
|
||||
for key in (
|
||||
"chunk",
|
||||
"frameStart",
|
||||
"frameEnd",
|
||||
"resolutionWidth",
|
||||
"resolutionHeight",
|
||||
"currentFile",
|
||||
):
|
||||
arg_key = f"--{key}"
|
||||
if arg_key not in args:
|
||||
missing_kwargs.append(key)
|
||||
continue
|
||||
arg_idx = args.index(arg_key)
|
||||
args.pop(arg_idx)
|
||||
if key != "currentFile":
|
||||
value = args.pop(arg_idx)
|
||||
else:
|
||||
path_parts = []
|
||||
while arg_idx < len(args):
|
||||
path_parts.append(args.pop(arg_idx))
|
||||
value = " ".join(path_parts).strip('"')
|
||||
|
||||
passing_kwargs[key] = value
|
||||
|
||||
if missing_kwargs:
|
||||
raise RuntimeError("Missing arguments {}".format(
|
||||
", ".join(
|
||||
[f'"{key}"' for key in missing_kwargs]
|
||||
)
|
||||
))
|
||||
|
||||
self.log.info("Storing kwargs ...")
|
||||
self.log.debug("_ passing_kwargs: {}".format(pformat(passing_kwargs)))
|
||||
|
|
|
|||
|
|
@ -702,6 +702,37 @@ class ClipLoader(LoaderPlugin):
|
|||
|
||||
_mapping = None
|
||||
|
||||
def apply_settings(cls, project_settings, system_settings):
|
||||
|
||||
plugin_type_settings = (
|
||||
project_settings
|
||||
.get("flame", {})
|
||||
.get("load", {})
|
||||
)
|
||||
|
||||
if not plugin_type_settings:
|
||||
return
|
||||
|
||||
plugin_name = cls.__name__
|
||||
|
||||
plugin_settings = None
|
||||
# Look for plugin settings in host specific settings
|
||||
if plugin_name in plugin_type_settings:
|
||||
plugin_settings = plugin_type_settings[plugin_name]
|
||||
|
||||
if not plugin_settings:
|
||||
return
|
||||
|
||||
print(">>> We have preset for {}".format(plugin_name))
|
||||
for option, value in plugin_settings.items():
|
||||
if option == "enabled" and value is False:
|
||||
print(" - is disabled by preset")
|
||||
elif option == "representations":
|
||||
continue
|
||||
else:
|
||||
print(" - setting `{}`: `{}`".format(option, value))
|
||||
setattr(cls, option, value)
|
||||
|
||||
def get_colorspace(self, context):
|
||||
"""Get colorspace name
|
||||
|
||||
|
|
|
|||
|
|
@ -4,6 +4,10 @@ import flame
|
|||
from pprint import pformat
|
||||
import openpype.hosts.flame.api as opfapi
|
||||
from openpype.lib import StringTemplate
|
||||
from openpype.lib.transcoding import (
|
||||
VIDEO_EXTENSIONS,
|
||||
IMAGE_EXTENSIONS
|
||||
)
|
||||
|
||||
|
||||
class LoadClip(opfapi.ClipLoader):
|
||||
|
|
@ -14,7 +18,10 @@ class LoadClip(opfapi.ClipLoader):
|
|||
"""
|
||||
|
||||
families = ["render2d", "source", "plate", "render", "review"]
|
||||
representations = ["exr", "dpx", "jpg", "jpeg", "png", "h264"]
|
||||
representations = ["*"]
|
||||
extensions = set(
|
||||
ext.lstrip(".") for ext in IMAGE_EXTENSIONS.union(VIDEO_EXTENSIONS)
|
||||
)
|
||||
|
||||
label = "Load as clip"
|
||||
order = -10
|
||||
|
|
|
|||
|
|
@ -4,7 +4,10 @@ import flame
|
|||
from pprint import pformat
|
||||
import openpype.hosts.flame.api as opfapi
|
||||
from openpype.lib import StringTemplate
|
||||
|
||||
from openpype.lib.transcoding import (
|
||||
VIDEO_EXTENSIONS,
|
||||
IMAGE_EXTENSIONS
|
||||
)
|
||||
|
||||
class LoadClipBatch(opfapi.ClipLoader):
|
||||
"""Load a subset to timeline as clip
|
||||
|
|
@ -14,7 +17,10 @@ class LoadClipBatch(opfapi.ClipLoader):
|
|||
"""
|
||||
|
||||
families = ["render2d", "source", "plate", "render", "review"]
|
||||
representations = ["exr", "dpx", "jpg", "jpeg", "png", "h264"]
|
||||
representations = ["*"]
|
||||
extensions = set(
|
||||
ext.lstrip(".") for ext in IMAGE_EXTENSIONS.union(VIDEO_EXTENSIONS)
|
||||
)
|
||||
|
||||
label = "Load as clip to current batch"
|
||||
order = -10
|
||||
|
|
|
|||
|
|
@ -143,6 +143,9 @@ class ExtractSubsetResources(publish.Extractor):
|
|||
# create staging dir path
|
||||
staging_dir = self.staging_dir(instance)
|
||||
|
||||
# append staging dir for later cleanup
|
||||
instance.context.data["cleanupFullPaths"].append(staging_dir)
|
||||
|
||||
# add default preset type for thumbnail and reviewable video
|
||||
# update them with settings and override in case the same
|
||||
# are found in there
|
||||
|
|
@ -548,30 +551,3 @@ class ExtractSubsetResources(publish.Extractor):
|
|||
"Path `{}` is containing more that one clip".format(path)
|
||||
)
|
||||
return clips[0]
|
||||
|
||||
def staging_dir(self, instance):
|
||||
"""Provide a temporary directory in which to store extracted files
|
||||
|
||||
Upon calling this method the staging directory is stored inside
|
||||
the instance.data['stagingDir']
|
||||
"""
|
||||
staging_dir = instance.data.get('stagingDir', None)
|
||||
openpype_temp_dir = os.getenv("OPENPYPE_TEMP_DIR")
|
||||
|
||||
if not staging_dir:
|
||||
if openpype_temp_dir and os.path.exists(openpype_temp_dir):
|
||||
staging_dir = os.path.normpath(
|
||||
tempfile.mkdtemp(
|
||||
prefix="pyblish_tmp_",
|
||||
dir=openpype_temp_dir
|
||||
)
|
||||
)
|
||||
else:
|
||||
staging_dir = os.path.normpath(
|
||||
tempfile.mkdtemp(prefix="pyblish_tmp_")
|
||||
)
|
||||
instance.data['stagingDir'] = staging_dir
|
||||
|
||||
instance.context.data["cleanupFullPaths"].append(staging_dir)
|
||||
|
||||
return staging_dir
|
||||
|
|
|
|||
|
|
@ -1,20 +1,11 @@
|
|||
from .pipeline import (
|
||||
install,
|
||||
uninstall,
|
||||
|
||||
FusionHost,
|
||||
ls,
|
||||
|
||||
imprint_container,
|
||||
parse_container
|
||||
)
|
||||
|
||||
from .workio import (
|
||||
open_file,
|
||||
save_file,
|
||||
current_file,
|
||||
has_unsaved_changes,
|
||||
file_extensions,
|
||||
work_root
|
||||
parse_container,
|
||||
list_instances,
|
||||
remove_instance
|
||||
)
|
||||
|
||||
from .lib import (
|
||||
|
|
@ -30,21 +21,11 @@ from .menu import launch_openpype_menu
|
|||
|
||||
__all__ = [
|
||||
# pipeline
|
||||
"install",
|
||||
"uninstall",
|
||||
"ls",
|
||||
|
||||
"imprint_container",
|
||||
"parse_container",
|
||||
|
||||
# workio
|
||||
"open_file",
|
||||
"save_file",
|
||||
"current_file",
|
||||
"has_unsaved_changes",
|
||||
"file_extensions",
|
||||
"work_root",
|
||||
|
||||
# lib
|
||||
"maintained_selection",
|
||||
"update_frame_range",
|
||||
|
|
|
|||
54
openpype/hosts/fusion/api/action.py
Normal file
54
openpype/hosts/fusion/api/action.py
Normal file
|
|
@ -0,0 +1,54 @@
|
|||
import pyblish.api
|
||||
|
||||
|
||||
from openpype.hosts.fusion.api.lib import get_current_comp
|
||||
from openpype.pipeline.publish import get_errored_instances_from_context
|
||||
|
||||
|
||||
class SelectInvalidAction(pyblish.api.Action):
|
||||
"""Select invalid nodes in Maya when plug-in failed.
|
||||
|
||||
To retrieve the invalid nodes this assumes a static `get_invalid()`
|
||||
method is available on the plugin.
|
||||
|
||||
"""
|
||||
label = "Select invalid"
|
||||
on = "failed" # This action is only available on a failed plug-in
|
||||
icon = "search" # Icon from Awesome Icon
|
||||
|
||||
def process(self, context, plugin):
|
||||
errored_instances = get_errored_instances_from_context(context)
|
||||
|
||||
# Apply pyblish.logic to get the instances for the plug-in
|
||||
instances = pyblish.api.instances_by_plugin(errored_instances, plugin)
|
||||
|
||||
# Get the invalid nodes for the plug-ins
|
||||
self.log.info("Finding invalid nodes..")
|
||||
invalid = list()
|
||||
for instance in instances:
|
||||
invalid_nodes = plugin.get_invalid(instance)
|
||||
if invalid_nodes:
|
||||
if isinstance(invalid_nodes, (list, tuple)):
|
||||
invalid.extend(invalid_nodes)
|
||||
else:
|
||||
self.log.warning("Plug-in returned to be invalid, "
|
||||
"but has no selectable nodes.")
|
||||
|
||||
if not invalid:
|
||||
# Assume relevant comp is current comp and clear selection
|
||||
self.log.info("No invalid tools found.")
|
||||
comp = get_current_comp()
|
||||
flow = comp.CurrentFrame.FlowView
|
||||
flow.Select() # No args equals clearing selection
|
||||
return
|
||||
|
||||
# Assume a single comp
|
||||
first_tool = invalid[0]
|
||||
comp = first_tool.Comp()
|
||||
flow = comp.CurrentFrame.FlowView
|
||||
flow.Select() # No args equals clearing selection
|
||||
names = set()
|
||||
for tool in invalid:
|
||||
flow.Select(tool, True)
|
||||
names.add(tool.Name)
|
||||
self.log.info("Selecting invalid tools: %s" % ", ".join(sorted(names)))
|
||||
|
|
@ -210,7 +210,8 @@ def switch_item(container,
|
|||
if any(not x for x in [asset_name, subset_name, representation_name]):
|
||||
repre_id = container["representation"]
|
||||
representation = get_representation_by_id(project_name, repre_id)
|
||||
repre_parent_docs = get_representation_parents(representation)
|
||||
repre_parent_docs = get_representation_parents(
|
||||
project_name, representation)
|
||||
if repre_parent_docs:
|
||||
version, subset, asset, _ = repre_parent_docs
|
||||
else:
|
||||
|
|
|
|||
|
|
@ -7,11 +7,11 @@ from openpype.style import load_stylesheet
|
|||
from openpype.lib import register_event_callback
|
||||
from openpype.hosts.fusion.scripts import (
|
||||
set_rendermode,
|
||||
duplicate_with_inputs
|
||||
duplicate_with_inputs,
|
||||
)
|
||||
from openpype.hosts.fusion.api.lib import (
|
||||
set_asset_framerange,
|
||||
set_asset_resolution
|
||||
set_asset_resolution,
|
||||
)
|
||||
from openpype.pipeline import legacy_io
|
||||
from openpype.resources import get_openpype_icon_filepath
|
||||
|
|
@ -45,17 +45,19 @@ class OpenPypeMenu(QtWidgets.QWidget):
|
|||
self.setWindowTitle("OpenPype")
|
||||
|
||||
asset_label = QtWidgets.QLabel("Context", self)
|
||||
asset_label.setStyleSheet("""QLabel {
|
||||
asset_label.setStyleSheet(
|
||||
"""QLabel {
|
||||
font-size: 14px;
|
||||
font-weight: 600;
|
||||
color: #5f9fb8;
|
||||
}""")
|
||||
}"""
|
||||
)
|
||||
asset_label.setAlignment(QtCore.Qt.AlignHCenter)
|
||||
|
||||
workfiles_btn = QtWidgets.QPushButton("Workfiles...", self)
|
||||
create_btn = QtWidgets.QPushButton("Create...", self)
|
||||
publish_btn = QtWidgets.QPushButton("Publish...", self)
|
||||
load_btn = QtWidgets.QPushButton("Load...", self)
|
||||
publish_btn = QtWidgets.QPushButton("Publish...", self)
|
||||
manager_btn = QtWidgets.QPushButton("Manage...", self)
|
||||
libload_btn = QtWidgets.QPushButton("Library...", self)
|
||||
rendermode_btn = QtWidgets.QPushButton("Set render mode...", self)
|
||||
|
|
@ -108,7 +110,8 @@ class OpenPypeMenu(QtWidgets.QWidget):
|
|||
libload_btn.clicked.connect(self.on_libload_clicked)
|
||||
rendermode_btn.clicked.connect(self.on_rendermode_clicked)
|
||||
duplicate_with_inputs_btn.clicked.connect(
|
||||
self.on_duplicate_with_inputs_clicked)
|
||||
self.on_duplicate_with_inputs_clicked
|
||||
)
|
||||
set_resolution_btn.clicked.connect(self.on_set_resolution_clicked)
|
||||
set_framerange_btn.clicked.connect(self.on_set_framerange_clicked)
|
||||
|
||||
|
|
@ -130,7 +133,6 @@ class OpenPypeMenu(QtWidgets.QWidget):
|
|||
self.asset_label.setText(label)
|
||||
|
||||
def register_callback(self, name, fn):
|
||||
|
||||
# Create a wrapper callback that we only store
|
||||
# for as long as we want it to persist as callback
|
||||
def _callback(*args):
|
||||
|
|
@ -146,10 +148,10 @@ class OpenPypeMenu(QtWidgets.QWidget):
|
|||
host_tools.show_workfiles()
|
||||
|
||||
def on_create_clicked(self):
|
||||
host_tools.show_creator()
|
||||
host_tools.show_publisher(tab="create")
|
||||
|
||||
def on_publish_clicked(self):
|
||||
host_tools.show_publish()
|
||||
host_tools.show_publisher(tab="publish")
|
||||
|
||||
def on_load_clicked(self):
|
||||
host_tools.show_loader(use_context=True)
|
||||
|
|
|
|||
|
|
@ -4,6 +4,7 @@ Basic avalon integration
|
|||
import os
|
||||
import sys
|
||||
import logging
|
||||
import contextlib
|
||||
|
||||
import pyblish.api
|
||||
from qtpy import QtCore
|
||||
|
|
@ -17,15 +18,14 @@ from openpype.pipeline import (
|
|||
register_loader_plugin_path,
|
||||
register_creator_plugin_path,
|
||||
register_inventory_action_path,
|
||||
deregister_loader_plugin_path,
|
||||
deregister_creator_plugin_path,
|
||||
deregister_inventory_action_path,
|
||||
AVALON_CONTAINER_ID,
|
||||
)
|
||||
from openpype.pipeline.load import any_outdated_containers
|
||||
from openpype.hosts.fusion import FUSION_HOST_DIR
|
||||
from openpype.host import HostBase, IWorkfileHost, ILoadHost, IPublishHost
|
||||
from openpype.tools.utils import host_tools
|
||||
|
||||
|
||||
from .lib import (
|
||||
get_current_comp,
|
||||
comp_lock_and_undo_chunk,
|
||||
|
|
@ -66,94 +66,98 @@ class FusionLogHandler(logging.Handler):
|
|||
self.print(entry)
|
||||
|
||||
|
||||
def install():
|
||||
"""Install fusion-specific functionality of OpenPype.
|
||||
class FusionHost(HostBase, IWorkfileHost, ILoadHost, IPublishHost):
|
||||
name = "fusion"
|
||||
|
||||
This is where you install menus and register families, data
|
||||
and loaders into fusion.
|
||||
def install(self):
|
||||
"""Install fusion-specific functionality of OpenPype.
|
||||
|
||||
It is called automatically when installing via
|
||||
`openpype.pipeline.install_host(openpype.hosts.fusion.api)`
|
||||
This is where you install menus and register families, data
|
||||
and loaders into fusion.
|
||||
|
||||
See the Maya equivalent for inspiration on how to implement this.
|
||||
It is called automatically when installing via
|
||||
`openpype.pipeline.install_host(openpype.hosts.fusion.api)`
|
||||
|
||||
"""
|
||||
# Remove all handlers associated with the root logger object, because
|
||||
# that one always logs as "warnings" incorrectly.
|
||||
for handler in logging.root.handlers[:]:
|
||||
logging.root.removeHandler(handler)
|
||||
See the Maya equivalent for inspiration on how to implement this.
|
||||
|
||||
# Attach default logging handler that prints to active comp
|
||||
logger = logging.getLogger()
|
||||
formatter = logging.Formatter(fmt="%(message)s\n")
|
||||
handler = FusionLogHandler()
|
||||
handler.setFormatter(formatter)
|
||||
logger.addHandler(handler)
|
||||
logger.setLevel(logging.DEBUG)
|
||||
"""
|
||||
# Remove all handlers associated with the root logger object, because
|
||||
# that one always logs as "warnings" incorrectly.
|
||||
for handler in logging.root.handlers[:]:
|
||||
logging.root.removeHandler(handler)
|
||||
|
||||
pyblish.api.register_host("fusion")
|
||||
pyblish.api.register_plugin_path(PUBLISH_PATH)
|
||||
log.info("Registering Fusion plug-ins..")
|
||||
# Attach default logging handler that prints to active comp
|
||||
logger = logging.getLogger()
|
||||
formatter = logging.Formatter(fmt="%(message)s\n")
|
||||
handler = FusionLogHandler()
|
||||
handler.setFormatter(formatter)
|
||||
logger.addHandler(handler)
|
||||
logger.setLevel(logging.DEBUG)
|
||||
|
||||
register_loader_plugin_path(LOAD_PATH)
|
||||
register_creator_plugin_path(CREATE_PATH)
|
||||
register_inventory_action_path(INVENTORY_PATH)
|
||||
pyblish.api.register_host("fusion")
|
||||
pyblish.api.register_plugin_path(PUBLISH_PATH)
|
||||
log.info("Registering Fusion plug-ins..")
|
||||
|
||||
pyblish.api.register_callback(
|
||||
"instanceToggled", on_pyblish_instance_toggled
|
||||
)
|
||||
register_loader_plugin_path(LOAD_PATH)
|
||||
register_creator_plugin_path(CREATE_PATH)
|
||||
register_inventory_action_path(INVENTORY_PATH)
|
||||
|
||||
# Register events
|
||||
register_event_callback("open", on_after_open)
|
||||
register_event_callback("save", on_save)
|
||||
register_event_callback("new", on_new)
|
||||
# Register events
|
||||
register_event_callback("open", on_after_open)
|
||||
register_event_callback("save", on_save)
|
||||
register_event_callback("new", on_new)
|
||||
|
||||
# region workfile io api
|
||||
def has_unsaved_changes(self):
|
||||
comp = get_current_comp()
|
||||
return comp.GetAttrs()["COMPB_Modified"]
|
||||
|
||||
def uninstall():
|
||||
"""Uninstall all that was installed
|
||||
def get_workfile_extensions(self):
|
||||
return [".comp"]
|
||||
|
||||
This is where you undo everything that was done in `install()`.
|
||||
That means, removing menus, deregistering families and data
|
||||
and everything. It should be as though `install()` was never run,
|
||||
because odds are calling this function means the user is interested
|
||||
in re-installing shortly afterwards. If, for example, he has been
|
||||
modifying the menu or registered families.
|
||||
def save_workfile(self, dst_path=None):
|
||||
comp = get_current_comp()
|
||||
comp.Save(dst_path)
|
||||
|
||||
"""
|
||||
pyblish.api.deregister_host("fusion")
|
||||
pyblish.api.deregister_plugin_path(PUBLISH_PATH)
|
||||
log.info("Deregistering Fusion plug-ins..")
|
||||
def open_workfile(self, filepath):
|
||||
# Hack to get fusion, see
|
||||
# openpype.hosts.fusion.api.pipeline.get_current_comp()
|
||||
fusion = getattr(sys.modules["__main__"], "fusion", None)
|
||||
|
||||
deregister_loader_plugin_path(LOAD_PATH)
|
||||
deregister_creator_plugin_path(CREATE_PATH)
|
||||
deregister_inventory_action_path(INVENTORY_PATH)
|
||||
return fusion.LoadComp(filepath)
|
||||
|
||||
pyblish.api.deregister_callback(
|
||||
"instanceToggled", on_pyblish_instance_toggled
|
||||
)
|
||||
def get_current_workfile(self):
|
||||
comp = get_current_comp()
|
||||
current_filepath = comp.GetAttrs()["COMPS_FileName"]
|
||||
if not current_filepath:
|
||||
return None
|
||||
|
||||
return current_filepath
|
||||
|
||||
def on_pyblish_instance_toggled(instance, old_value, new_value):
|
||||
"""Toggle saver tool passthrough states on instance toggles."""
|
||||
comp = instance.context.data.get("currentComp")
|
||||
if not comp:
|
||||
return
|
||||
def work_root(self, session):
|
||||
work_dir = session["AVALON_WORKDIR"]
|
||||
scene_dir = session.get("AVALON_SCENEDIR")
|
||||
if scene_dir:
|
||||
return os.path.join(work_dir, scene_dir)
|
||||
else:
|
||||
return work_dir
|
||||
# endregion
|
||||
|
||||
savers = [tool for tool in instance if
|
||||
getattr(tool, "ID", None) == "Saver"]
|
||||
if not savers:
|
||||
return
|
||||
@contextlib.contextmanager
|
||||
def maintained_selection(self):
|
||||
from .lib import maintained_selection
|
||||
return maintained_selection()
|
||||
|
||||
# Whether instances should be passthrough based on new value
|
||||
passthrough = not new_value
|
||||
with comp_lock_and_undo_chunk(comp,
|
||||
undo_queue_name="Change instance "
|
||||
"active state"):
|
||||
for tool in savers:
|
||||
attrs = tool.GetAttrs()
|
||||
current = attrs["TOOLB_PassThrough"]
|
||||
if current != passthrough:
|
||||
tool.SetAttrs({"TOOLB_PassThrough": passthrough})
|
||||
def get_containers(self):
|
||||
return ls()
|
||||
|
||||
def update_context_data(self, data, changes):
|
||||
comp = get_current_comp()
|
||||
comp.SetData("openpype", data)
|
||||
|
||||
def get_context_data(self):
|
||||
comp = get_current_comp()
|
||||
return comp.GetData("openpype") or {}
|
||||
|
||||
|
||||
def on_new(event):
|
||||
|
|
@ -283,9 +287,51 @@ def parse_container(tool):
|
|||
return container
|
||||
|
||||
|
||||
# TODO: Function below is currently unused prototypes
|
||||
def list_instances(creator_id=None):
|
||||
"""Return created instances in current workfile which will be published.
|
||||
Returns:
|
||||
(list) of dictionaries matching instances format
|
||||
"""
|
||||
|
||||
comp = get_current_comp()
|
||||
tools = comp.GetToolList(False).values()
|
||||
|
||||
instance_signature = {
|
||||
"id": "pyblish.avalon.instance",
|
||||
"identifier": creator_id
|
||||
}
|
||||
instances = []
|
||||
for tool in tools:
|
||||
|
||||
data = tool.GetData('openpype')
|
||||
if not isinstance(data, dict):
|
||||
continue
|
||||
|
||||
if data.get("id") != instance_signature["id"]:
|
||||
continue
|
||||
|
||||
if creator_id and data.get("identifier") != creator_id:
|
||||
continue
|
||||
|
||||
instances.append(tool)
|
||||
|
||||
return instances
|
||||
|
||||
|
||||
# TODO: Function below is currently unused prototypes
|
||||
def remove_instance(instance):
|
||||
"""Remove instance from current workfile.
|
||||
|
||||
Args:
|
||||
instance (dict): instance representation from subsetmanager model
|
||||
"""
|
||||
# Assume instance is a Fusion tool directly
|
||||
instance["tool"].Delete()
|
||||
|
||||
|
||||
class FusionEventThread(QtCore.QThread):
|
||||
"""QThread which will periodically ping Fusion app for any events.
|
||||
|
||||
The fusion.UIManager must be set up to be notified of events before they'll
|
||||
be reported by this thread, for example:
|
||||
fusion.UIManager.AddNotify("Comp_Save", None)
|
||||
|
|
|
|||
|
|
@ -1,45 +0,0 @@
|
|||
"""Host API required Work Files tool"""
|
||||
import sys
|
||||
import os
|
||||
|
||||
from .lib import get_current_comp
|
||||
|
||||
|
||||
def file_extensions():
|
||||
return [".comp"]
|
||||
|
||||
|
||||
def has_unsaved_changes():
|
||||
comp = get_current_comp()
|
||||
return comp.GetAttrs()["COMPB_Modified"]
|
||||
|
||||
|
||||
def save_file(filepath):
|
||||
comp = get_current_comp()
|
||||
comp.Save(filepath)
|
||||
|
||||
|
||||
def open_file(filepath):
|
||||
# Hack to get fusion, see
|
||||
# openpype.hosts.fusion.api.pipeline.get_current_comp()
|
||||
fusion = getattr(sys.modules["__main__"], "fusion", None)
|
||||
|
||||
return fusion.LoadComp(filepath)
|
||||
|
||||
|
||||
def current_file():
|
||||
comp = get_current_comp()
|
||||
current_filepath = comp.GetAttrs()["COMPS_FileName"]
|
||||
if not current_filepath:
|
||||
return None
|
||||
|
||||
return current_filepath
|
||||
|
||||
|
||||
def work_root(session):
|
||||
work_dir = session["AVALON_WORKDIR"]
|
||||
scene_dir = session.get("AVALON_SCENEDIR")
|
||||
if scene_dir:
|
||||
return os.path.join(work_dir, scene_dir)
|
||||
else:
|
||||
return work_dir
|
||||
|
|
@ -13,11 +13,11 @@ def main(env):
|
|||
# However the contents of that folder can conflict with Qt library dlls
|
||||
# so we make sure to move out of it to avoid DLL Load Failed errors.
|
||||
os.chdir("..")
|
||||
from openpype.hosts.fusion import api
|
||||
from openpype.hosts.fusion.api import FusionHost
|
||||
from openpype.hosts.fusion.api import menu
|
||||
|
||||
# activate resolve from pype
|
||||
install_host(api)
|
||||
install_host(FusionHost())
|
||||
|
||||
log = Logger.get_logger(__name__)
|
||||
log.info(f"Registered host: {registered_host()}")
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
import os
|
||||
import platform
|
||||
from openpype.lib import PreLaunchHook
|
||||
|
||||
from openpype.lib import PreLaunchHook, ApplicationLaunchFailed
|
||||
from openpype.pipeline.colorspace import get_imageio_config
|
||||
from openpype.pipeline.template_data import get_template_data_with_names
|
||||
|
||||
|
||||
class FusionPreLaunchOCIO(PreLaunchHook):
|
||||
|
|
@ -11,24 +11,22 @@ class FusionPreLaunchOCIO(PreLaunchHook):
|
|||
def execute(self):
|
||||
"""Hook entry method."""
|
||||
|
||||
# get image io
|
||||
project_settings = self.data["project_settings"]
|
||||
template_data = get_template_data_with_names(
|
||||
project_name=self.data["project_name"],
|
||||
asset_name=self.data["asset_name"],
|
||||
task_name=self.data["task_name"],
|
||||
host_name=self.host_name,
|
||||
system_settings=self.data["system_settings"]
|
||||
)
|
||||
|
||||
# make sure anatomy settings are having flame key
|
||||
imageio_fusion = project_settings["fusion"]["imageio"]
|
||||
|
||||
ocio = imageio_fusion.get("ocio")
|
||||
enabled = ocio.get("enabled", False)
|
||||
if not enabled:
|
||||
return
|
||||
|
||||
platform_key = platform.system().lower()
|
||||
ocio_path = ocio["configFilePath"][platform_key]
|
||||
if not ocio_path:
|
||||
raise ApplicationLaunchFailed(
|
||||
"Fusion OCIO is enabled in project settings but no OCIO config"
|
||||
f"path is set for your current platform: {platform_key}"
|
||||
)
|
||||
config_data = get_imageio_config(
|
||||
project_name=self.data["project_name"],
|
||||
host_name=self.host_name,
|
||||
project_settings=self.data["project_settings"],
|
||||
anatomy_data=template_data,
|
||||
anatomy=self.data["anatomy"]
|
||||
)
|
||||
ocio_path = config_data["path"]
|
||||
|
||||
self.log.info(f"Setting OCIO config path: {ocio_path}")
|
||||
self.launch_context.env["OCIO"] = os.pathsep.join(ocio_path)
|
||||
self.launch_context.env["OCIO"] = ocio_path
|
||||
|
|
|
|||
|
|
@ -36,7 +36,7 @@ class FusionPrelaunch(PreLaunchHook):
|
|||
"Make sure the environment in fusion settings has "
|
||||
"'FUSION_PYTHON3_HOME' set correctly and make sure "
|
||||
"Python 3 is installed in the given path."
|
||||
f"\n\nPYTHON36: {fusion_python3_home}"
|
||||
f"\n\nPYTHON PATH: {fusion_python3_home}"
|
||||
)
|
||||
|
||||
self.log.info(f"Setting {py3_var}: '{py3_dir}'...")
|
||||
|
|
|
|||
|
|
@ -1,49 +0,0 @@
|
|||
import os
|
||||
|
||||
from openpype.pipeline import (
|
||||
LegacyCreator,
|
||||
legacy_io
|
||||
)
|
||||
from openpype.hosts.fusion.api import (
|
||||
get_current_comp,
|
||||
comp_lock_and_undo_chunk
|
||||
)
|
||||
|
||||
|
||||
class CreateOpenEXRSaver(LegacyCreator):
|
||||
|
||||
name = "openexrDefault"
|
||||
label = "Create OpenEXR Saver"
|
||||
hosts = ["fusion"]
|
||||
family = "render"
|
||||
defaults = ["Main"]
|
||||
|
||||
def process(self):
|
||||
|
||||
file_format = "OpenEXRFormat"
|
||||
|
||||
comp = get_current_comp()
|
||||
|
||||
workdir = os.path.normpath(legacy_io.Session["AVALON_WORKDIR"])
|
||||
|
||||
filename = "{}..exr".format(self.name)
|
||||
filepath = os.path.join(workdir, "render", filename)
|
||||
|
||||
with comp_lock_and_undo_chunk(comp):
|
||||
args = (-32768, -32768) # Magical position numbers
|
||||
saver = comp.AddTool("Saver", *args)
|
||||
saver.SetAttrs({"TOOLS_Name": self.name})
|
||||
|
||||
# Setting input attributes is different from basic attributes
|
||||
# Not confused with "MainInputAttributes" which
|
||||
saver["Clip"] = filepath
|
||||
saver["OutputFormat"] = file_format
|
||||
|
||||
# Check file format settings are available
|
||||
if saver[file_format] is None:
|
||||
raise RuntimeError("File format is not set to {}, "
|
||||
"this is a bug".format(file_format))
|
||||
|
||||
# Set file format attributes
|
||||
saver[file_format]["Depth"] = 1 # int8 | int16 | float32 | other
|
||||
saver[file_format]["SaveAlpha"] = 0
|
||||
215
openpype/hosts/fusion/plugins/create/create_saver.py
Normal file
215
openpype/hosts/fusion/plugins/create/create_saver.py
Normal file
|
|
@ -0,0 +1,215 @@
|
|||
import os
|
||||
|
||||
import qtawesome
|
||||
|
||||
from openpype.hosts.fusion.api import (
|
||||
get_current_comp,
|
||||
comp_lock_and_undo_chunk
|
||||
)
|
||||
|
||||
from openpype.lib import BoolDef
|
||||
from openpype.pipeline import (
|
||||
legacy_io,
|
||||
Creator,
|
||||
CreatedInstance
|
||||
)
|
||||
from openpype.client import get_asset_by_name
|
||||
|
||||
|
||||
class CreateSaver(Creator):
|
||||
identifier = "io.openpype.creators.fusion.saver"
|
||||
name = "saver"
|
||||
label = "Saver"
|
||||
family = "render"
|
||||
default_variants = ["Main"]
|
||||
|
||||
description = "Fusion Saver to generate image sequence"
|
||||
|
||||
def create(self, subset_name, instance_data, pre_create_data):
|
||||
|
||||
# TODO: Add pre_create attributes to choose file format?
|
||||
file_format = "OpenEXRFormat"
|
||||
|
||||
comp = get_current_comp()
|
||||
with comp_lock_and_undo_chunk(comp):
|
||||
args = (-32768, -32768) # Magical position numbers
|
||||
saver = comp.AddTool("Saver", *args)
|
||||
|
||||
instance_data["subset"] = subset_name
|
||||
self._update_tool_with_data(saver, data=instance_data)
|
||||
|
||||
saver["OutputFormat"] = file_format
|
||||
|
||||
# Check file format settings are available
|
||||
if saver[file_format] is None:
|
||||
raise RuntimeError(
|
||||
f"File format is not set to {file_format}, this is a bug"
|
||||
)
|
||||
|
||||
# Set file format attributes
|
||||
saver[file_format]["Depth"] = 0 # Auto | float16 | float32
|
||||
# TODO Is this needed?
|
||||
saver[file_format]["SaveAlpha"] = 1
|
||||
|
||||
self._imprint(saver, instance_data)
|
||||
|
||||
# Register the CreatedInstance
|
||||
instance = CreatedInstance(
|
||||
family=self.family,
|
||||
subset_name=subset_name,
|
||||
data=instance_data,
|
||||
creator=self)
|
||||
|
||||
# Insert the transient data
|
||||
instance.transient_data["tool"] = saver
|
||||
|
||||
self._add_instance_to_context(instance)
|
||||
|
||||
return instance
|
||||
|
||||
def collect_instances(self):
|
||||
|
||||
comp = get_current_comp()
|
||||
tools = comp.GetToolList(False, "Saver").values()
|
||||
for tool in tools:
|
||||
|
||||
data = self.get_managed_tool_data(tool)
|
||||
if not data:
|
||||
data = self._collect_unmanaged_saver(tool)
|
||||
|
||||
# Add instance
|
||||
created_instance = CreatedInstance.from_existing(data, self)
|
||||
|
||||
# Collect transient data
|
||||
created_instance.transient_data["tool"] = tool
|
||||
|
||||
self._add_instance_to_context(created_instance)
|
||||
|
||||
def get_icon(self):
|
||||
return qtawesome.icon("fa.eye", color="white")
|
||||
|
||||
def update_instances(self, update_list):
|
||||
for created_inst, _changes in update_list:
|
||||
|
||||
new_data = created_inst.data_to_store()
|
||||
tool = created_inst.transient_data["tool"]
|
||||
self._update_tool_with_data(tool, new_data)
|
||||
self._imprint(tool, new_data)
|
||||
|
||||
def remove_instances(self, instances):
|
||||
for instance in instances:
|
||||
# Remove the tool from the scene
|
||||
|
||||
tool = instance.transient_data["tool"]
|
||||
if tool:
|
||||
tool.Delete()
|
||||
|
||||
# Remove the collected CreatedInstance to remove from UI directly
|
||||
self._remove_instance_from_context(instance)
|
||||
|
||||
def _imprint(self, tool, data):
|
||||
# Save all data in a "openpype.{key}" = value data
|
||||
|
||||
active = data.pop("active", None)
|
||||
if active is not None:
|
||||
# Use active value to set the passthrough state
|
||||
tool.SetAttrs({"TOOLB_PassThrough": not active})
|
||||
|
||||
for key, value in data.items():
|
||||
tool.SetData(f"openpype.{key}", value)
|
||||
|
||||
def _update_tool_with_data(self, tool, data):
|
||||
"""Update tool node name and output path based on subset data"""
|
||||
if "subset" not in data:
|
||||
return
|
||||
|
||||
original_subset = tool.GetData("openpype.subset")
|
||||
subset = data["subset"]
|
||||
if original_subset != subset:
|
||||
# Subset change detected
|
||||
# Update output filepath
|
||||
workdir = os.path.normpath(legacy_io.Session["AVALON_WORKDIR"])
|
||||
filename = f"{subset}..exr"
|
||||
filepath = os.path.join(workdir, "render", subset, filename)
|
||||
tool["Clip"] = filepath
|
||||
|
||||
# Rename tool
|
||||
if tool.Name != subset:
|
||||
print(f"Renaming {tool.Name} -> {subset}")
|
||||
tool.SetAttrs({"TOOLS_Name": subset})
|
||||
|
||||
def _collect_unmanaged_saver(self, tool):
|
||||
|
||||
# TODO: this should not be done this way - this should actually
|
||||
# get the data as stored on the tool explicitly (however)
|
||||
# that would disallow any 'regular saver' to be collected
|
||||
# unless the instance data is stored on it to begin with
|
||||
|
||||
print("Collecting unmanaged saver..")
|
||||
comp = tool.Comp()
|
||||
|
||||
# Allow regular non-managed savers to also be picked up
|
||||
project = legacy_io.Session["AVALON_PROJECT"]
|
||||
asset = legacy_io.Session["AVALON_ASSET"]
|
||||
task = legacy_io.Session["AVALON_TASK"]
|
||||
|
||||
asset_doc = get_asset_by_name(project_name=project,
|
||||
asset_name=asset)
|
||||
|
||||
path = tool["Clip"][comp.TIME_UNDEFINED]
|
||||
fname = os.path.basename(path)
|
||||
fname, _ext = os.path.splitext(fname)
|
||||
variant = fname.rstrip(".")
|
||||
subset = self.get_subset_name(
|
||||
variant=variant,
|
||||
task_name=task,
|
||||
asset_doc=asset_doc,
|
||||
project_name=project,
|
||||
)
|
||||
|
||||
attrs = tool.GetAttrs()
|
||||
passthrough = attrs["TOOLB_PassThrough"]
|
||||
return {
|
||||
# Required data
|
||||
"project": project,
|
||||
"asset": asset,
|
||||
"subset": subset,
|
||||
"task": task,
|
||||
"variant": variant,
|
||||
"active": not passthrough,
|
||||
"family": self.family,
|
||||
|
||||
# Unique identifier for instance and this creator
|
||||
"id": "pyblish.avalon.instance",
|
||||
"creator_identifier": self.identifier
|
||||
}
|
||||
|
||||
def get_managed_tool_data(self, tool):
|
||||
"""Return data of the tool if it matches creator identifier"""
|
||||
data = tool.GetData('openpype')
|
||||
if not isinstance(data, dict):
|
||||
return
|
||||
|
||||
required = {
|
||||
"id": "pyblish.avalon.instance",
|
||||
"creator_identifier": self.identifier
|
||||
}
|
||||
for key, value in required.items():
|
||||
if key not in data or data[key] != value:
|
||||
return
|
||||
|
||||
# Get active state from the actual tool state
|
||||
attrs = tool.GetAttrs()
|
||||
passthrough = attrs["TOOLB_PassThrough"]
|
||||
data["active"] = not passthrough
|
||||
|
||||
return data
|
||||
|
||||
def get_instance_attr_defs(self):
|
||||
return [
|
||||
BoolDef(
|
||||
"review",
|
||||
default=True,
|
||||
label="Review"
|
||||
)
|
||||
]
|
||||
109
openpype/hosts/fusion/plugins/create/create_workfile.py
Normal file
109
openpype/hosts/fusion/plugins/create/create_workfile.py
Normal file
|
|
@ -0,0 +1,109 @@
|
|||
import qtawesome
|
||||
|
||||
from openpype.hosts.fusion.api import (
|
||||
get_current_comp
|
||||
)
|
||||
from openpype.client import get_asset_by_name
|
||||
from openpype.pipeline import (
|
||||
AutoCreator,
|
||||
CreatedInstance,
|
||||
legacy_io,
|
||||
)
|
||||
|
||||
|
||||
class FusionWorkfileCreator(AutoCreator):
|
||||
identifier = "workfile"
|
||||
family = "workfile"
|
||||
label = "Workfile"
|
||||
|
||||
default_variant = "Main"
|
||||
|
||||
create_allow_context_change = False
|
||||
|
||||
data_key = "openpype_workfile"
|
||||
|
||||
def collect_instances(self):
|
||||
|
||||
comp = get_current_comp()
|
||||
data = comp.GetData(self.data_key)
|
||||
if not data:
|
||||
return
|
||||
|
||||
instance = CreatedInstance(
|
||||
family=self.family,
|
||||
subset_name=data["subset"],
|
||||
data=data,
|
||||
creator=self
|
||||
)
|
||||
instance.transient_data["comp"] = comp
|
||||
|
||||
self._add_instance_to_context(instance)
|
||||
|
||||
def update_instances(self, update_list):
|
||||
for created_inst, _changes in update_list:
|
||||
comp = created_inst.transient_data["comp"]
|
||||
if not hasattr(comp, "SetData"):
|
||||
# Comp is not alive anymore, likely closed by the user
|
||||
self.log.error("Workfile comp not found for existing instance."
|
||||
" Comp might have been closed in the meantime.")
|
||||
continue
|
||||
|
||||
# Imprint data into the comp
|
||||
data = created_inst.data_to_store()
|
||||
comp.SetData(self.data_key, data)
|
||||
|
||||
def create(self, options=None):
|
||||
|
||||
comp = get_current_comp()
|
||||
if not comp:
|
||||
self.log.error("Unable to find current comp")
|
||||
return
|
||||
|
||||
existing_instance = None
|
||||
for instance in self.create_context.instances:
|
||||
if instance.family == self.family:
|
||||
existing_instance = instance
|
||||
break
|
||||
|
||||
project_name = legacy_io.Session["AVALON_PROJECT"]
|
||||
asset_name = legacy_io.Session["AVALON_ASSET"]
|
||||
task_name = legacy_io.Session["AVALON_TASK"]
|
||||
host_name = legacy_io.Session["AVALON_APP"]
|
||||
|
||||
if existing_instance is None:
|
||||
asset_doc = get_asset_by_name(project_name, asset_name)
|
||||
subset_name = self.get_subset_name(
|
||||
self.default_variant, task_name, asset_doc,
|
||||
project_name, host_name
|
||||
)
|
||||
data = {
|
||||
"asset": asset_name,
|
||||
"task": task_name,
|
||||
"variant": self.default_variant
|
||||
}
|
||||
data.update(self.get_dynamic_data(
|
||||
self.default_variant, task_name, asset_doc,
|
||||
project_name, host_name, None
|
||||
))
|
||||
|
||||
new_instance = CreatedInstance(
|
||||
self.family, subset_name, data, self
|
||||
)
|
||||
new_instance.transient_data["comp"] = comp
|
||||
self._add_instance_to_context(new_instance)
|
||||
|
||||
elif (
|
||||
existing_instance["asset"] != asset_name
|
||||
or existing_instance["task"] != task_name
|
||||
):
|
||||
asset_doc = get_asset_by_name(project_name, asset_name)
|
||||
subset_name = self.get_subset_name(
|
||||
self.default_variant, task_name, asset_doc,
|
||||
project_name, host_name
|
||||
)
|
||||
existing_instance["asset"] = asset_name
|
||||
existing_instance["task"] = task_name
|
||||
existing_instance["subset"] = subset_name
|
||||
|
||||
def get_icon(self):
|
||||
return qtawesome.icon("fa.file-o", color="white")
|
||||
|
|
@ -15,6 +15,7 @@ class FusionSetFrameRangeLoader(load.LoaderPlugin):
|
|||
"pointcache",
|
||||
"render"]
|
||||
representations = ["*"]
|
||||
extensions = {"*"}
|
||||
|
||||
label = "Set frame range"
|
||||
order = 11
|
||||
|
|
|
|||
|
|
@ -13,7 +13,8 @@ class FusionLoadAlembicMesh(load.LoaderPlugin):
|
|||
"""Load Alembic mesh into Fusion"""
|
||||
|
||||
families = ["pointcache", "model"]
|
||||
representations = ["abc"]
|
||||
representations = ["*"]
|
||||
extensions = {"abc"}
|
||||
|
||||
label = "Load alembic mesh"
|
||||
order = -10
|
||||
|
|
|
|||
|
|
@ -14,7 +14,8 @@ class FusionLoadFBXMesh(load.LoaderPlugin):
|
|||
"""Load FBX mesh into Fusion"""
|
||||
|
||||
families = ["*"]
|
||||
representations = ["fbx"]
|
||||
representations = ["*"]
|
||||
extensions = {"fbx"}
|
||||
|
||||
label = "Load FBX mesh"
|
||||
order = -10
|
||||
|
|
|
|||
|
|
@ -1,17 +1,19 @@
|
|||
import os
|
||||
import contextlib
|
||||
|
||||
from openpype.client import get_version_by_id
|
||||
from openpype.pipeline import (
|
||||
load,
|
||||
legacy_io,
|
||||
get_representation_path,
|
||||
import openpype.pipeline.load as load
|
||||
from openpype.pipeline.load import (
|
||||
get_representation_context,
|
||||
get_representation_path_from_context
|
||||
)
|
||||
from openpype.hosts.fusion.api import (
|
||||
imprint_container,
|
||||
get_current_comp,
|
||||
comp_lock_and_undo_chunk
|
||||
)
|
||||
from openpype.lib.transcoding import (
|
||||
IMAGE_EXTENSIONS,
|
||||
VIDEO_EXTENSIONS
|
||||
)
|
||||
|
||||
comp = get_current_comp()
|
||||
|
||||
|
|
@ -129,6 +131,9 @@ class FusionLoadSequence(load.LoaderPlugin):
|
|||
|
||||
families = ["imagesequence", "review", "render", "plate"]
|
||||
representations = ["*"]
|
||||
extensions = set(
|
||||
ext.lstrip(".") for ext in IMAGE_EXTENSIONS.union(VIDEO_EXTENSIONS)
|
||||
)
|
||||
|
||||
label = "Load sequence"
|
||||
order = -10
|
||||
|
|
@ -141,7 +146,7 @@ class FusionLoadSequence(load.LoaderPlugin):
|
|||
namespace = context['asset']['name']
|
||||
|
||||
# Use the first file for now
|
||||
path = self._get_first_image(os.path.dirname(self.fname))
|
||||
path = get_representation_path_from_context(context)
|
||||
|
||||
# Create the Loader with the filename path set
|
||||
comp = get_current_comp()
|
||||
|
|
@ -210,13 +215,11 @@ class FusionLoadSequence(load.LoaderPlugin):
|
|||
assert tool.ID == "Loader", "Must be Loader"
|
||||
comp = tool.Comp()
|
||||
|
||||
root = os.path.dirname(get_representation_path(representation))
|
||||
path = self._get_first_image(root)
|
||||
context = get_representation_context(representation)
|
||||
path = get_representation_path_from_context(context)
|
||||
|
||||
# Get start frame from version data
|
||||
project_name = legacy_io.active_project()
|
||||
version = get_version_by_id(project_name, representation["parent"])
|
||||
start = self._get_start(version, tool)
|
||||
start = self._get_start(context["version"], tool)
|
||||
|
||||
with comp_lock_and_undo_chunk(comp, "Update Loader"):
|
||||
|
||||
|
|
@ -249,11 +252,6 @@ class FusionLoadSequence(load.LoaderPlugin):
|
|||
with comp_lock_and_undo_chunk(comp, "Remove Loader"):
|
||||
tool.Delete()
|
||||
|
||||
def _get_first_image(self, root):
|
||||
"""Get first file in representation root"""
|
||||
files = sorted(os.listdir(root))
|
||||
return os.path.join(root, files[0])
|
||||
|
||||
def _get_start(self, version_doc, tool):
|
||||
"""Return real start frame of published files (incl. handles)"""
|
||||
data = version_doc["data"]
|
||||
|
|
|
|||
|
|
@ -1,5 +1,3 @@
|
|||
import os
|
||||
|
||||
import pyblish.api
|
||||
|
||||
from openpype.hosts.fusion.api import get_current_comp
|
||||
|
|
|
|||
|
|
@ -0,0 +1,43 @@
|
|||
import pyblish.api
|
||||
|
||||
|
||||
def get_comp_render_range(comp):
|
||||
"""Return comp's start-end render range and global start-end range."""
|
||||
comp_attrs = comp.GetAttrs()
|
||||
start = comp_attrs["COMPN_RenderStart"]
|
||||
end = comp_attrs["COMPN_RenderEnd"]
|
||||
global_start = comp_attrs["COMPN_GlobalStart"]
|
||||
global_end = comp_attrs["COMPN_GlobalEnd"]
|
||||
|
||||
# Whenever render ranges are undefined fall back
|
||||
# to the comp's global start and end
|
||||
if start == -1000000000:
|
||||
start = global_start
|
||||
if end == -1000000000:
|
||||
end = global_end
|
||||
|
||||
return start, end, global_start, global_end
|
||||
|
||||
|
||||
class CollectFusionCompFrameRanges(pyblish.api.ContextPlugin):
|
||||
"""Collect current comp"""
|
||||
|
||||
# We run this after CollectorOrder - 0.1 otherwise it gets
|
||||
# overridden by global plug-in `CollectContextEntities`
|
||||
order = pyblish.api.CollectorOrder - 0.05
|
||||
label = "Collect Comp Frame Ranges"
|
||||
hosts = ["fusion"]
|
||||
|
||||
def process(self, context):
|
||||
"""Collect all image sequence tools"""
|
||||
|
||||
comp = context.data["currentComp"]
|
||||
|
||||
# Store comp render ranges
|
||||
start, end, global_start, global_end = get_comp_render_range(comp)
|
||||
context.data["frameStart"] = int(start)
|
||||
context.data["frameEnd"] = int(end)
|
||||
context.data["frameStartHandle"] = int(global_start)
|
||||
context.data["frameEndHandle"] = int(global_end)
|
||||
context.data["handleStart"] = int(start) - int(global_start)
|
||||
context.data["handleEnd"] = int(global_end) - int(end)
|
||||
|
|
@ -1,5 +1,3 @@
|
|||
from bson.objectid import ObjectId
|
||||
|
||||
import pyblish.api
|
||||
|
||||
from openpype.pipeline import registered_host
|
||||
|
|
@ -97,10 +95,15 @@ class CollectUpstreamInputs(pyblish.api.InstancePlugin):
|
|||
label = "Collect Inputs"
|
||||
order = pyblish.api.CollectorOrder + 0.2
|
||||
hosts = ["fusion"]
|
||||
families = ["render"]
|
||||
|
||||
def process(self, instance):
|
||||
|
||||
# Get all upstream and include itself
|
||||
if not any(instance[:]):
|
||||
self.log.debug("No tool found in instance, skipping..")
|
||||
return
|
||||
|
||||
tool = instance[0]
|
||||
nodes = list(iter_upstream(tool))
|
||||
nodes.append(tool)
|
||||
|
|
@ -108,7 +111,6 @@ class CollectUpstreamInputs(pyblish.api.InstancePlugin):
|
|||
# Collect containers for the given set of nodes
|
||||
containers = collect_input_containers(nodes)
|
||||
|
||||
inputs = [ObjectId(c["representation"]) for c in containers]
|
||||
inputs = [c["representation"] for c in containers]
|
||||
instance.data["inputRepresentations"] = inputs
|
||||
|
||||
self.log.info("Collected inputs: %s" % inputs)
|
||||
|
|
|
|||
|
|
@ -3,25 +3,7 @@ import os
|
|||
import pyblish.api
|
||||
|
||||
|
||||
def get_comp_render_range(comp):
|
||||
"""Return comp's start-end render range and global start-end range."""
|
||||
comp_attrs = comp.GetAttrs()
|
||||
start = comp_attrs["COMPN_RenderStart"]
|
||||
end = comp_attrs["COMPN_RenderEnd"]
|
||||
global_start = comp_attrs["COMPN_GlobalStart"]
|
||||
global_end = comp_attrs["COMPN_GlobalEnd"]
|
||||
|
||||
# Whenever render ranges are undefined fall back
|
||||
# to the comp's global start and end
|
||||
if start == -1000000000:
|
||||
start = global_start
|
||||
if end == -1000000000:
|
||||
end = global_end
|
||||
|
||||
return start, end, global_start, global_end
|
||||
|
||||
|
||||
class CollectInstances(pyblish.api.ContextPlugin):
|
||||
class CollectInstanceData(pyblish.api.InstancePlugin):
|
||||
"""Collect Fusion saver instances
|
||||
|
||||
This additionally stores the Comp start and end render range in the
|
||||
|
|
@ -30,76 +12,68 @@ class CollectInstances(pyblish.api.ContextPlugin):
|
|||
"""
|
||||
|
||||
order = pyblish.api.CollectorOrder
|
||||
label = "Collect Instances"
|
||||
label = "Collect Instances Data"
|
||||
hosts = ["fusion"]
|
||||
|
||||
def process(self, context):
|
||||
def process(self, instance):
|
||||
"""Collect all image sequence tools"""
|
||||
|
||||
from openpype.hosts.fusion.api.lib import get_frame_path
|
||||
context = instance.context
|
||||
|
||||
comp = context.data["currentComp"]
|
||||
# Include creator attributes directly as instance data
|
||||
creator_attributes = instance.data["creator_attributes"]
|
||||
instance.data.update(creator_attributes)
|
||||
|
||||
# Get all savers in the comp
|
||||
tools = comp.GetToolList(False).values()
|
||||
savers = [tool for tool in tools if tool.ID == "Saver"]
|
||||
# Include start and end render frame in label
|
||||
subset = instance.data["subset"]
|
||||
start = context.data["frameStart"]
|
||||
end = context.data["frameEnd"]
|
||||
label = "{subset} ({start}-{end})".format(subset=subset,
|
||||
start=int(start),
|
||||
end=int(end))
|
||||
instance.data.update({
|
||||
"label": label,
|
||||
|
||||
start, end, global_start, global_end = get_comp_render_range(comp)
|
||||
context.data["frameStart"] = int(start)
|
||||
context.data["frameEnd"] = int(end)
|
||||
context.data["frameStartHandle"] = int(global_start)
|
||||
context.data["frameEndHandle"] = int(global_end)
|
||||
# todo: Allow custom frame range per instance
|
||||
"frameStart": context.data["frameStart"],
|
||||
"frameEnd": context.data["frameEnd"],
|
||||
"frameStartHandle": context.data["frameStartHandle"],
|
||||
"frameEndHandle": context.data["frameStartHandle"],
|
||||
"handleStart": context.data["handleStart"],
|
||||
"handleEnd": context.data["handleEnd"],
|
||||
"fps": context.data["fps"],
|
||||
})
|
||||
|
||||
for tool in savers:
|
||||
# Add review family if the instance is marked as 'review'
|
||||
# This could be done through a 'review' Creator attribute.
|
||||
if instance.data.get("review", False):
|
||||
self.log.info("Adding review family..")
|
||||
instance.data["families"].append("review")
|
||||
|
||||
if instance.data["family"] == "render":
|
||||
# TODO: This should probably move into a collector of
|
||||
# its own for the "render" family
|
||||
from openpype.hosts.fusion.api.lib import get_frame_path
|
||||
comp = context.data["currentComp"]
|
||||
|
||||
# This is only the case for savers currently but not
|
||||
# for workfile instances. So we assume saver here.
|
||||
tool = instance.data["transientData"]["tool"]
|
||||
path = tool["Clip"][comp.TIME_UNDEFINED]
|
||||
|
||||
tool_attrs = tool.GetAttrs()
|
||||
active = not tool_attrs["TOOLB_PassThrough"]
|
||||
|
||||
if not path:
|
||||
self.log.warning("Skipping saver because it "
|
||||
"has no path set: {}".format(tool.Name))
|
||||
continue
|
||||
|
||||
filename = os.path.basename(path)
|
||||
head, padding, tail = get_frame_path(filename)
|
||||
ext = os.path.splitext(path)[1]
|
||||
assert tail == ext, ("Tail does not match %s" % ext)
|
||||
subset = head.rstrip("_. ") # subset is head of the filename
|
||||
|
||||
# Include start and end render frame in label
|
||||
label = "{subset} ({start}-{end})".format(subset=subset,
|
||||
start=int(start),
|
||||
end=int(end))
|
||||
|
||||
instance = context.create_instance(subset)
|
||||
instance.data.update({
|
||||
"asset": os.environ["AVALON_ASSET"], # todo: not a constant
|
||||
"subset": subset,
|
||||
"path": path,
|
||||
"outputDir": os.path.dirname(path),
|
||||
"ext": ext, # todo: should be redundant
|
||||
"label": label,
|
||||
"frameStart": context.data["frameStart"],
|
||||
"frameEnd": context.data["frameEnd"],
|
||||
"frameStartHandle": context.data["frameStartHandle"],
|
||||
"frameEndHandle": context.data["frameStartHandle"],
|
||||
"fps": context.data["fps"],
|
||||
"families": ["render", "review"],
|
||||
"family": "render",
|
||||
"active": active,
|
||||
"publish": active # backwards compatibility
|
||||
"ext": ext, # todo: should be redundant?
|
||||
|
||||
# Backwards compatibility: embed tool in instance.data
|
||||
"tool": tool
|
||||
})
|
||||
|
||||
# Add tool itself as member
|
||||
instance.append(tool)
|
||||
|
||||
self.log.info("Found: \"%s\" " % path)
|
||||
|
||||
# Sort/grouped by family (preserving local index)
|
||||
context[:] = sorted(context, key=self.sort_by_family)
|
||||
|
||||
return context
|
||||
|
||||
def sort_by_family(self, instance):
|
||||
"""Sort by family"""
|
||||
return instance.data.get("families", instance.data.get("family"))
|
||||
|
|
|
|||
26
openpype/hosts/fusion/plugins/publish/collect_workfile.py
Normal file
26
openpype/hosts/fusion/plugins/publish/collect_workfile.py
Normal file
|
|
@ -0,0 +1,26 @@
|
|||
import os
|
||||
|
||||
import pyblish.api
|
||||
|
||||
|
||||
class CollectFusionWorkfile(pyblish.api.InstancePlugin):
|
||||
"""Collect Fusion workfile representation."""
|
||||
|
||||
order = pyblish.api.CollectorOrder + 0.1
|
||||
label = "Collect Workfile"
|
||||
hosts = ["fusion"]
|
||||
families = ["workfile"]
|
||||
|
||||
def process(self, instance):
|
||||
|
||||
current_file = instance.context.data["currentFile"]
|
||||
|
||||
folder, file = os.path.split(current_file)
|
||||
filename, ext = os.path.splitext(file)
|
||||
|
||||
instance.data['representations'] = [{
|
||||
'name': ext.lstrip("."),
|
||||
'ext': ext.lstrip("."),
|
||||
'files': file,
|
||||
"stagingDir": folder,
|
||||
}]
|
||||
|
|
@ -11,7 +11,7 @@ class FusionIncrementCurrentFile(pyblish.api.ContextPlugin):
|
|||
label = "Increment current file"
|
||||
order = pyblish.api.IntegratorOrder + 9.0
|
||||
hosts = ["fusion"]
|
||||
families = ["render.farm"]
|
||||
families = ["workfile"]
|
||||
optional = True
|
||||
|
||||
def process(self, context):
|
||||
|
|
@ -1,11 +1,11 @@
|
|||
import os
|
||||
from pprint import pformat
|
||||
|
||||
import pyblish.api
|
||||
from openpype.pipeline import publish
|
||||
from openpype.hosts.fusion.api import comp_lock_and_undo_chunk
|
||||
|
||||
|
||||
class Fusionlocal(pyblish.api.InstancePlugin):
|
||||
class Fusionlocal(pyblish.api.InstancePlugin,
|
||||
publish.ColormanagedPyblishPluginMixin):
|
||||
"""Render the current Fusion composition locally.
|
||||
|
||||
Extract the result of savers by starting a comp render
|
||||
|
|
@ -19,55 +19,82 @@ class Fusionlocal(pyblish.api.InstancePlugin):
|
|||
families = ["render.local"]
|
||||
|
||||
def process(self, instance):
|
||||
|
||||
# This plug-in runs only once and thus assumes all instances
|
||||
# currently will render the same frame range
|
||||
context = instance.context
|
||||
key = "__hasRun{}".format(self.__class__.__name__)
|
||||
if context.data.get(key, False):
|
||||
return
|
||||
else:
|
||||
context.data[key] = True
|
||||
|
||||
current_comp = context.data["currentComp"]
|
||||
# Start render
|
||||
self.render_once(context)
|
||||
|
||||
# Log render status
|
||||
self.log.info(
|
||||
"Rendered '{nm}' for asset '{ast}' under the task '{tsk}'".format(
|
||||
nm=instance.data["name"],
|
||||
ast=instance.data["asset"],
|
||||
tsk=instance.data["task"],
|
||||
)
|
||||
)
|
||||
|
||||
frame_start = context.data["frameStartHandle"]
|
||||
frame_end = context.data["frameEndHandle"]
|
||||
path = instance.data["path"]
|
||||
output_dir = instance.data["outputDir"]
|
||||
|
||||
ext = os.path.splitext(os.path.basename(path))[-1]
|
||||
basename = os.path.basename(path)
|
||||
head, ext = os.path.splitext(basename)
|
||||
files = [
|
||||
f"{head}{str(frame).zfill(4)}{ext}"
|
||||
for frame in range(frame_start, frame_end + 1)
|
||||
]
|
||||
repre = {
|
||||
"name": ext[1:],
|
||||
"ext": ext[1:],
|
||||
"frameStart": f"%0{len(str(frame_end))}d" % frame_start,
|
||||
"files": files,
|
||||
"stagingDir": output_dir,
|
||||
}
|
||||
|
||||
self.log.info("Starting render")
|
||||
self.log.info("Start frame: {}".format(frame_start))
|
||||
self.log.info("End frame: {}".format(frame_end))
|
||||
|
||||
with comp_lock_and_undo_chunk(current_comp):
|
||||
result = current_comp.Render({
|
||||
"Start": frame_start,
|
||||
"End": frame_end,
|
||||
"Wait": True
|
||||
})
|
||||
self.set_representation_colorspace(
|
||||
representation=repre,
|
||||
context=context,
|
||||
)
|
||||
|
||||
if "representations" not in instance.data:
|
||||
instance.data["representations"] = []
|
||||
|
||||
collected_frames = os.listdir(output_dir)
|
||||
repre = {
|
||||
'name': ext[1:],
|
||||
'ext': ext[1:],
|
||||
'frameStart': "%0{}d".format(len(str(frame_end))) % frame_start,
|
||||
'files': collected_frames,
|
||||
"stagingDir": output_dir,
|
||||
}
|
||||
instance.data["representations"].append(repre)
|
||||
|
||||
# review representation
|
||||
repre_preview = repre.copy()
|
||||
repre_preview["name"] = repre_preview["ext"] = "mp4"
|
||||
repre_preview["tags"] = ["review", "preview", "ftrackreview", "delete"]
|
||||
instance.data["representations"].append(repre_preview)
|
||||
if instance.data.get("review", False):
|
||||
repre["tags"] = ["review", "ftrackreview"]
|
||||
|
||||
self.log.debug(f"_ instance.data: {pformat(instance.data)}")
|
||||
def render_once(self, context):
|
||||
"""Render context comp only once, even with more render instances"""
|
||||
|
||||
if not result:
|
||||
# This plug-in assumes all render nodes get rendered at the same time
|
||||
# to speed up the rendering. The check below makes sure that we only
|
||||
# execute the rendering once and not for each instance.
|
||||
key = f"__hasRun{self.__class__.__name__}"
|
||||
if key not in context.data:
|
||||
# We initialize as false to indicate it wasn't successful yet
|
||||
# so we can keep track of whether Fusion succeeded
|
||||
context.data[key] = False
|
||||
|
||||
current_comp = context.data["currentComp"]
|
||||
frame_start = context.data["frameStartHandle"]
|
||||
frame_end = context.data["frameEndHandle"]
|
||||
|
||||
self.log.info("Starting Fusion render")
|
||||
self.log.info(f"Start frame: {frame_start}")
|
||||
self.log.info(f"End frame: {frame_end}")
|
||||
|
||||
with comp_lock_and_undo_chunk(current_comp):
|
||||
result = current_comp.Render(
|
||||
{
|
||||
"Start": frame_start,
|
||||
"End": frame_end,
|
||||
"Wait": True,
|
||||
}
|
||||
)
|
||||
|
||||
context.data[key] = bool(result)
|
||||
|
||||
if context.data[key] is False:
|
||||
raise RuntimeError("Comp render failed")
|
||||
|
|
|
|||
|
|
@ -7,7 +7,7 @@ class FusionSaveComp(pyblish.api.ContextPlugin):
|
|||
label = "Save current file"
|
||||
order = pyblish.api.ExtractorOrder - 0.49
|
||||
hosts = ["fusion"]
|
||||
families = ["render"]
|
||||
families = ["render", "workfile"]
|
||||
|
||||
def process(self, context):
|
||||
|
||||
|
|
|
|||
|
|
@ -1,6 +1,9 @@
|
|||
import pyblish.api
|
||||
|
||||
from openpype.pipeline.publish import RepairAction
|
||||
from openpype.pipeline import PublishValidationError
|
||||
|
||||
from openpype.hosts.fusion.api.action import SelectInvalidAction
|
||||
|
||||
|
||||
class ValidateBackgroundDepth(pyblish.api.InstancePlugin):
|
||||
|
|
@ -8,11 +11,12 @@ class ValidateBackgroundDepth(pyblish.api.InstancePlugin):
|
|||
|
||||
order = pyblish.api.ValidatorOrder
|
||||
label = "Validate Background Depth 32 bit"
|
||||
actions = [RepairAction]
|
||||
hosts = ["fusion"]
|
||||
families = ["render"]
|
||||
optional = True
|
||||
|
||||
actions = [SelectInvalidAction, RepairAction]
|
||||
|
||||
@classmethod
|
||||
def get_invalid(cls, instance):
|
||||
|
||||
|
|
@ -29,8 +33,10 @@ class ValidateBackgroundDepth(pyblish.api.InstancePlugin):
|
|||
def process(self, instance):
|
||||
invalid = self.get_invalid(instance)
|
||||
if invalid:
|
||||
raise RuntimeError("Found %i nodes which are not set to float32"
|
||||
% len(invalid))
|
||||
raise PublishValidationError(
|
||||
"Found {} Backgrounds tools which"
|
||||
" are not set to float32".format(len(invalid)),
|
||||
title=self.label)
|
||||
|
||||
@classmethod
|
||||
def repair(cls, instance):
|
||||
|
|
|
|||
|
|
@ -1,6 +1,7 @@
|
|||
import os
|
||||
|
||||
import pyblish.api
|
||||
from openpype.pipeline import PublishValidationError
|
||||
|
||||
|
||||
class ValidateFusionCompSaved(pyblish.api.ContextPlugin):
|
||||
|
|
@ -19,10 +20,12 @@ class ValidateFusionCompSaved(pyblish.api.ContextPlugin):
|
|||
|
||||
filename = attrs["COMPS_FileName"]
|
||||
if not filename:
|
||||
raise RuntimeError("Comp is not saved.")
|
||||
raise PublishValidationError("Comp is not saved.",
|
||||
title=self.label)
|
||||
|
||||
if not os.path.exists(filename):
|
||||
raise RuntimeError("Comp file does not exist: %s" % filename)
|
||||
raise PublishValidationError(
|
||||
"Comp file does not exist: %s" % filename, title=self.label)
|
||||
|
||||
if attrs["COMPB_Modified"]:
|
||||
self.log.warning("Comp is modified. Save your comp to ensure your "
|
||||
|
|
|
|||
|
|
@ -1,6 +1,9 @@
|
|||
import pyblish.api
|
||||
|
||||
from openpype.pipeline.publish import RepairAction
|
||||
from openpype.pipeline import PublishValidationError
|
||||
|
||||
from openpype.hosts.fusion.api.action import SelectInvalidAction
|
||||
|
||||
|
||||
class ValidateCreateFolderChecked(pyblish.api.InstancePlugin):
|
||||
|
|
@ -15,6 +18,7 @@ class ValidateCreateFolderChecked(pyblish.api.InstancePlugin):
|
|||
label = "Validate Create Folder Checked"
|
||||
families = ["render"]
|
||||
hosts = ["fusion"]
|
||||
actions = [SelectInvalidAction]
|
||||
|
||||
@classmethod
|
||||
def get_invalid(cls, instance):
|
||||
|
|
@ -31,8 +35,9 @@ class ValidateCreateFolderChecked(pyblish.api.InstancePlugin):
|
|||
def process(self, instance):
|
||||
invalid = self.get_invalid(instance)
|
||||
if invalid:
|
||||
raise RuntimeError("Found Saver with Create Folder During "
|
||||
"Render checked off")
|
||||
raise PublishValidationError(
|
||||
"Found Saver with Create Folder During Render checked off",
|
||||
title=self.label)
|
||||
|
||||
@classmethod
|
||||
def repair(cls, instance):
|
||||
|
|
|
|||
|
|
@ -1,6 +1,9 @@
|
|||
import os
|
||||
|
||||
import pyblish.api
|
||||
from openpype.pipeline import PublishValidationError
|
||||
|
||||
from openpype.hosts.fusion.api.action import SelectInvalidAction
|
||||
|
||||
|
||||
class ValidateFilenameHasExtension(pyblish.api.InstancePlugin):
|
||||
|
|
@ -16,11 +19,13 @@ class ValidateFilenameHasExtension(pyblish.api.InstancePlugin):
|
|||
label = "Validate Filename Has Extension"
|
||||
families = ["render"]
|
||||
hosts = ["fusion"]
|
||||
actions = [SelectInvalidAction]
|
||||
|
||||
def process(self, instance):
|
||||
invalid = self.get_invalid(instance)
|
||||
if invalid:
|
||||
raise RuntimeError("Found Saver without an extension")
|
||||
raise PublishValidationError("Found Saver without an extension",
|
||||
title=self.label)
|
||||
|
||||
@classmethod
|
||||
def get_invalid(cls, instance):
|
||||
|
|
|
|||
|
|
@ -1,4 +1,7 @@
|
|||
import pyblish.api
|
||||
from openpype.pipeline import PublishValidationError
|
||||
|
||||
from openpype.hosts.fusion.api.action import SelectInvalidAction
|
||||
|
||||
|
||||
class ValidateSaverHasInput(pyblish.api.InstancePlugin):
|
||||
|
|
@ -12,6 +15,7 @@ class ValidateSaverHasInput(pyblish.api.InstancePlugin):
|
|||
label = "Validate Saver Has Input"
|
||||
families = ["render"]
|
||||
hosts = ["fusion"]
|
||||
actions = [SelectInvalidAction]
|
||||
|
||||
@classmethod
|
||||
def get_invalid(cls, instance):
|
||||
|
|
@ -25,5 +29,8 @@ class ValidateSaverHasInput(pyblish.api.InstancePlugin):
|
|||
def process(self, instance):
|
||||
invalid = self.get_invalid(instance)
|
||||
if invalid:
|
||||
raise RuntimeError("Saver has no incoming connection: "
|
||||
"{} ({})".format(instance, invalid[0].Name))
|
||||
saver_name = invalid[0].Name
|
||||
raise PublishValidationError(
|
||||
"Saver has no incoming connection: {} ({})".format(instance,
|
||||
saver_name),
|
||||
title=self.label)
|
||||
|
|
|
|||
|
|
@ -1,4 +1,7 @@
|
|||
import pyblish.api
|
||||
from openpype.pipeline import PublishValidationError
|
||||
|
||||
from openpype.hosts.fusion.api.action import SelectInvalidAction
|
||||
|
||||
|
||||
class ValidateSaverPassthrough(pyblish.api.ContextPlugin):
|
||||
|
|
@ -8,6 +11,7 @@ class ValidateSaverPassthrough(pyblish.api.ContextPlugin):
|
|||
label = "Validate Saver Passthrough"
|
||||
families = ["render"]
|
||||
hosts = ["fusion"]
|
||||
actions = [SelectInvalidAction]
|
||||
|
||||
def process(self, context):
|
||||
|
||||
|
|
@ -27,8 +31,9 @@ class ValidateSaverPassthrough(pyblish.api.ContextPlugin):
|
|||
if invalid_instances:
|
||||
self.log.info("Reset pyblish to collect your current scene state, "
|
||||
"that should fix error.")
|
||||
raise RuntimeError("Invalid instances: "
|
||||
"{0}".format(invalid_instances))
|
||||
raise PublishValidationError(
|
||||
"Invalid instances: {0}".format(invalid_instances),
|
||||
title=self.label)
|
||||
|
||||
def is_invalid(self, instance):
|
||||
|
||||
|
|
@ -36,7 +41,7 @@ class ValidateSaverPassthrough(pyblish.api.ContextPlugin):
|
|||
attr = saver.GetAttrs()
|
||||
active = not attr["TOOLB_PassThrough"]
|
||||
|
||||
if active != instance.data["publish"]:
|
||||
if active != instance.data.get("publish", True):
|
||||
self.log.info("Saver has different passthrough state than "
|
||||
"Pyblish: {} ({})".format(instance, saver.Name))
|
||||
return [saver]
|
||||
|
|
|
|||
|
|
@ -0,0 +1,55 @@
|
|||
from collections import defaultdict
|
||||
|
||||
import pyblish.api
|
||||
from openpype.pipeline import PublishValidationError
|
||||
|
||||
from openpype.hosts.fusion.api.action import SelectInvalidAction
|
||||
|
||||
|
||||
class ValidateUniqueSubsets(pyblish.api.ContextPlugin):
|
||||
"""Ensure all instances have a unique subset name"""
|
||||
|
||||
order = pyblish.api.ValidatorOrder
|
||||
label = "Validate Unique Subsets"
|
||||
families = ["render"]
|
||||
hosts = ["fusion"]
|
||||
actions = [SelectInvalidAction]
|
||||
|
||||
@classmethod
|
||||
def get_invalid(cls, context):
|
||||
|
||||
# Collect instances per subset per asset
|
||||
instances_per_subset_asset = defaultdict(lambda: defaultdict(list))
|
||||
for instance in context:
|
||||
asset = instance.data.get("asset", context.data.get("asset"))
|
||||
subset = instance.data.get("subset", context.data.get("subset"))
|
||||
instances_per_subset_asset[asset][subset].append(instance)
|
||||
|
||||
# Find which asset + subset combination has more than one instance
|
||||
# Those are considered invalid because they'd integrate to the same
|
||||
# destination.
|
||||
invalid = []
|
||||
for asset, instances_per_subset in instances_per_subset_asset.items():
|
||||
for subset, instances in instances_per_subset.items():
|
||||
if len(instances) > 1:
|
||||
cls.log.warning(
|
||||
"{asset} > {subset} used by more than "
|
||||
"one instance: {instances}".format(
|
||||
asset=asset,
|
||||
subset=subset,
|
||||
instances=instances
|
||||
)
|
||||
)
|
||||
invalid.extend(instances)
|
||||
|
||||
# Return tools for the invalid instances so they can be selected
|
||||
invalid = [instance.data["tool"] for instance in invalid]
|
||||
|
||||
return invalid
|
||||
|
||||
def process(self, context):
|
||||
invalid = self.get_invalid(context)
|
||||
if invalid:
|
||||
raise PublishValidationError("Multiple instances are set to "
|
||||
"the same asset > subset.",
|
||||
title=self.label)
|
||||
|
|
@ -126,10 +126,6 @@ def check_inventory():
|
|||
|
||||
def application_launch(event):
|
||||
"""Event that is executed after Harmony is launched."""
|
||||
# FIXME: This is breaking server <-> client communication.
|
||||
# It is now moved so it it manually called.
|
||||
# ensure_scene_settings()
|
||||
# check_inventory()
|
||||
# fills OPENPYPE_HARMONY_JS
|
||||
pype_harmony_path = Path(__file__).parent.parent / "js" / "PypeHarmony.js"
|
||||
pype_harmony_js = pype_harmony_path.read_text()
|
||||
|
|
@ -146,6 +142,9 @@ def application_launch(event):
|
|||
harmony.send({"script": script})
|
||||
inject_avalon_js()
|
||||
|
||||
ensure_scene_settings()
|
||||
check_inventory()
|
||||
|
||||
|
||||
def export_template(backdrops, nodes, filepath):
|
||||
"""Export Template to file.
|
||||
|
|
|
|||
|
|
@ -20,8 +20,9 @@ class ImageSequenceLoader(load.LoaderPlugin):
|
|||
Stores the imported asset in a container named after the asset.
|
||||
"""
|
||||
|
||||
families = ["shot", "render", "image", "plate", "reference"]
|
||||
representations = ["jpeg", "png", "jpg"]
|
||||
families = ["shot", "render", "image", "plate", "reference", "review"]
|
||||
representations = ["*"]
|
||||
extensions = {"jpeg", "png", "jpg"}
|
||||
|
||||
def load(self, context, name=None, namespace=None, data=None):
|
||||
"""Plugin entry point.
|
||||
|
|
|
|||
|
|
@ -108,9 +108,9 @@ class ExtractRender(pyblish.api.InstancePlugin):
|
|||
output = process.communicate()[0]
|
||||
|
||||
if process.returncode != 0:
|
||||
raise ValueError(output.decode("utf-8"))
|
||||
raise ValueError(output.decode("utf-8", errors="backslashreplace"))
|
||||
|
||||
self.log.debug(output.decode("utf-8"))
|
||||
self.log.debug(output.decode("utf-8", errors="backslashreplace"))
|
||||
|
||||
# Generate representations.
|
||||
extension = collection.tail[1:]
|
||||
|
|
|
|||
|
|
@ -6,6 +6,10 @@ from openpype.pipeline import (
|
|||
legacy_io,
|
||||
get_representation_path,
|
||||
)
|
||||
from openpype.lib.transcoding import (
|
||||
VIDEO_EXTENSIONS,
|
||||
IMAGE_EXTENSIONS
|
||||
)
|
||||
import openpype.hosts.hiero.api as phiero
|
||||
|
||||
|
||||
|
|
@ -17,7 +21,10 @@ class LoadClip(phiero.SequenceLoader):
|
|||
"""
|
||||
|
||||
families = ["render2d", "source", "plate", "render", "review"]
|
||||
representations = ["exr", "dpx", "jpg", "jpeg", "png", "h264"]
|
||||
representations = ["*"]
|
||||
extensions = set(
|
||||
ext.lstrip(".") for ext in IMAGE_EXTENSIONS.union(VIDEO_EXTENSIONS)
|
||||
)
|
||||
|
||||
label = "Load as clip"
|
||||
order = -10
|
||||
|
|
@ -34,6 +41,38 @@ class LoadClip(phiero.SequenceLoader):
|
|||
|
||||
clip_name_template = "{asset}_{subset}_{representation}"
|
||||
|
||||
def apply_settings(cls, project_settings, system_settings):
|
||||
|
||||
plugin_type_settings = (
|
||||
project_settings
|
||||
.get("hiero", {})
|
||||
.get("load", {})
|
||||
)
|
||||
|
||||
if not plugin_type_settings:
|
||||
return
|
||||
|
||||
plugin_name = cls.__name__
|
||||
|
||||
plugin_settings = None
|
||||
# Look for plugin settings in host specific settings
|
||||
if plugin_name in plugin_type_settings:
|
||||
plugin_settings = plugin_type_settings[plugin_name]
|
||||
|
||||
if not plugin_settings:
|
||||
return
|
||||
|
||||
print(">>> We have preset for {}".format(plugin_name))
|
||||
for option, value in plugin_settings.items():
|
||||
if option == "enabled" and value is False:
|
||||
print(" - is disabled by preset")
|
||||
elif option == "representations":
|
||||
continue
|
||||
else:
|
||||
print(" - setting `{}`: `{}`".format(option, value))
|
||||
setattr(cls, option, value)
|
||||
|
||||
|
||||
def load(self, context, name, namespace, options):
|
||||
# add clip name template to options
|
||||
options.update({
|
||||
|
|
|
|||
|
|
@ -19,8 +19,9 @@ from openpype.lib import Logger
|
|||
class LoadEffects(load.LoaderPlugin):
|
||||
"""Loading colorspace soft effect exported from nukestudio"""
|
||||
|
||||
representations = ["effectJson"]
|
||||
families = ["effect"]
|
||||
representations = ["*"]
|
||||
extension = {"json"}
|
||||
|
||||
label = "Load Effects"
|
||||
order = 0
|
||||
|
|
|
|||
|
|
@ -120,13 +120,9 @@ class CollectClipEffects(pyblish.api.InstancePlugin):
|
|||
track = sitem.parentTrack().name()
|
||||
# node serialization
|
||||
node = sitem.node()
|
||||
node_serialized = self.node_serialisation(node)
|
||||
node_serialized = self.node_serialization(node)
|
||||
node_name = sitem.name()
|
||||
|
||||
if "_" in node_name:
|
||||
node_class = re.sub(r"(?:_)[_0-9]+", "", node_name) # more numbers
|
||||
else:
|
||||
node_class = re.sub(r"\d+", "", node_name) # one number
|
||||
node_class = node.Class()
|
||||
|
||||
# collect timelineIn/Out
|
||||
effect_t_in = int(sitem.timelineIn())
|
||||
|
|
@ -148,7 +144,7 @@ class CollectClipEffects(pyblish.api.InstancePlugin):
|
|||
"node": node_serialized
|
||||
}}
|
||||
|
||||
def node_serialisation(self, node):
|
||||
def node_serialization(self, node):
|
||||
node_serialized = {}
|
||||
|
||||
# adding ignoring knob keys
|
||||
|
|
|
|||
|
|
@ -144,13 +144,20 @@ class HoudiniHost(HostBase, IWorkfileHost, ILoadHost, IPublishHost):
|
|||
|
||||
"""
|
||||
obj_network = hou.node("/obj")
|
||||
op_ctx = obj_network.createNode(
|
||||
"null", node_name="OpenPypeContext")
|
||||
op_ctx = obj_network.createNode("null", node_name="OpenPypeContext")
|
||||
|
||||
# A null in houdini by default comes with content inside to visualize
|
||||
# the null. However since we explicitly want to hide the node lets
|
||||
# remove the content and disable the display flag of the node
|
||||
for node in op_ctx.children():
|
||||
node.destroy()
|
||||
|
||||
op_ctx.moveToGoodPosition()
|
||||
op_ctx.setBuiltExplicitly(False)
|
||||
op_ctx.setCreatorState("OpenPype")
|
||||
op_ctx.setComment("OpenPype node to hold context metadata")
|
||||
op_ctx.setColor(hou.Color((0.081, 0.798, 0.810)))
|
||||
op_ctx.setDisplayFlag(False)
|
||||
op_ctx.hide(True)
|
||||
return op_ctx
|
||||
|
||||
|
|
|
|||
|
|
@ -103,9 +103,8 @@ class HoudiniCreatorBase(object):
|
|||
fill it with all collected instances from the scene under its
|
||||
respective creator identifiers.
|
||||
|
||||
If legacy instances are detected in the scene, create
|
||||
`houdini_cached_legacy_subsets` there and fill it with
|
||||
all legacy subsets under family as a key.
|
||||
Create `houdini_cached_legacy_subsets` key for any legacy instances
|
||||
detected in the scene as instances per family.
|
||||
|
||||
Args:
|
||||
Dict[str, Any]: Shared data.
|
||||
|
|
@ -115,29 +114,30 @@ class HoudiniCreatorBase(object):
|
|||
|
||||
"""
|
||||
if shared_data.get("houdini_cached_subsets") is None:
|
||||
shared_data["houdini_cached_subsets"] = {}
|
||||
if shared_data.get("houdini_cached_legacy_subsets") is None:
|
||||
shared_data["houdini_cached_legacy_subsets"] = {}
|
||||
cached_instances = lsattr("id", "pyblish.avalon.instance")
|
||||
for i in cached_instances:
|
||||
if not i.parm("creator_identifier"):
|
||||
# we have legacy instance
|
||||
family = i.parm("family").eval()
|
||||
if family not in shared_data[
|
||||
"houdini_cached_legacy_subsets"]:
|
||||
shared_data["houdini_cached_legacy_subsets"][
|
||||
family] = [i]
|
||||
else:
|
||||
shared_data[
|
||||
"houdini_cached_legacy_subsets"][family].append(i)
|
||||
continue
|
||||
cache = dict()
|
||||
cache_legacy = dict()
|
||||
|
||||
for node in lsattr("id", "pyblish.avalon.instance"):
|
||||
|
||||
creator_identifier_parm = node.parm("creator_identifier")
|
||||
if creator_identifier_parm:
|
||||
# creator instance
|
||||
creator_id = creator_identifier_parm.eval()
|
||||
cache.setdefault(creator_id, []).append(node)
|
||||
|
||||
creator_id = i.parm("creator_identifier").eval()
|
||||
if creator_id not in shared_data["houdini_cached_subsets"]:
|
||||
shared_data["houdini_cached_subsets"][creator_id] = [i]
|
||||
else:
|
||||
shared_data[
|
||||
"houdini_cached_subsets"][creator_id].append(i) # noqa
|
||||
# legacy instance
|
||||
family_parm = node.parm("family")
|
||||
if not family_parm:
|
||||
# must be a broken instance
|
||||
continue
|
||||
|
||||
family = family_parm.eval()
|
||||
cache_legacy.setdefault(family, []).append(node)
|
||||
|
||||
shared_data["houdini_cached_subsets"] = cache
|
||||
shared_data["houdini_cached_legacy_subsets"] = cache_legacy
|
||||
|
||||
return shared_data
|
||||
|
||||
@staticmethod
|
||||
|
|
@ -225,12 +225,12 @@ class HoudiniCreator(NewCreator, HoudiniCreatorBase):
|
|||
self._add_instance_to_context(created_instance)
|
||||
|
||||
def update_instances(self, update_list):
|
||||
for created_inst, _changes in update_list:
|
||||
for created_inst, changes in update_list:
|
||||
instance_node = hou.node(created_inst.get("instance_node"))
|
||||
|
||||
new_values = {
|
||||
key: new_value
|
||||
for key, (_old_value, new_value) in _changes.items()
|
||||
key: changes[key].new_value
|
||||
for key in changes.changed_keys
|
||||
}
|
||||
imprint(
|
||||
instance_node,
|
||||
|
|
|
|||
|
|
@ -1,4 +1,5 @@
|
|||
import os
|
||||
import re
|
||||
import logging
|
||||
import platform
|
||||
|
||||
|
|
@ -66,7 +67,7 @@ def generate_shelves():
|
|||
)
|
||||
continue
|
||||
|
||||
mandatory_attributes = {'name', 'script'}
|
||||
mandatory_attributes = {'label', 'script'}
|
||||
for tool_definition in shelf_definition.get('tools_list'):
|
||||
# We verify that the name and script attibutes of the tool
|
||||
# are set
|
||||
|
|
@ -152,31 +153,32 @@ def get_or_create_tool(tool_definition, shelf):
|
|||
Returns:
|
||||
hou.Tool: The tool updated or the new one
|
||||
"""
|
||||
existing_tools = shelf.tools()
|
||||
tool_label = tool_definition.get('label')
|
||||
|
||||
tool_label = tool_definition.get("label")
|
||||
if not tool_label:
|
||||
log.warning("Skipped shelf without label")
|
||||
return
|
||||
|
||||
script_path = tool_definition["script"]
|
||||
if not script_path or not os.path.exists(script_path):
|
||||
log.warning("This path doesn't exist - {}".format(script_path))
|
||||
return
|
||||
|
||||
existing_tools = shelf.tools()
|
||||
existing_tool = next(
|
||||
(tool for tool in existing_tools if tool.label() == tool_label),
|
||||
None
|
||||
)
|
||||
|
||||
with open(script_path) as stream:
|
||||
script = stream.read()
|
||||
|
||||
tool_definition["script"] = script
|
||||
|
||||
if existing_tool:
|
||||
tool_definition.pop('name', None)
|
||||
tool_definition.pop('label', None)
|
||||
tool_definition.pop("label", None)
|
||||
existing_tool.setData(**tool_definition)
|
||||
return existing_tool
|
||||
|
||||
tool_name = tool_label.replace(' ', '_').lower()
|
||||
|
||||
if not os.path.exists(tool_definition['script']):
|
||||
log.warning(
|
||||
"This path doesn't exist - {}".format(tool_definition['script'])
|
||||
)
|
||||
return
|
||||
|
||||
with open(tool_definition['script']) as f:
|
||||
script = f.read()
|
||||
tool_definition.update({'script': script})
|
||||
|
||||
new_tool = hou.shelves.newTool(name=tool_name, **tool_definition)
|
||||
|
||||
return new_tool
|
||||
tool_name = re.sub(r"[^\w\d]+", "_", tool_label).lower()
|
||||
return hou.shelves.newTool(name=tool_name, **tool_definition)
|
||||
|
|
|
|||
|
|
@ -1,5 +1,3 @@
|
|||
from bson.objectid import ObjectId
|
||||
|
||||
import pyblish.api
|
||||
|
||||
from openpype.pipeline import registered_host
|
||||
|
|
@ -117,7 +115,6 @@ class CollectUpstreamInputs(pyblish.api.InstancePlugin):
|
|||
# Collect containers for the given set of nodes
|
||||
containers = collect_input_containers(nodes)
|
||||
|
||||
inputs = [ObjectId(c["representation"]) for c in containers]
|
||||
inputs = [c["representation"] for c in containers]
|
||||
instance.data["inputRepresentations"] = inputs
|
||||
|
||||
self.log.info("Collected inputs: %s" % inputs)
|
||||
|
|
|
|||
|
|
@ -12,5 +12,17 @@ class MaxAddon(OpenPypeModule, IHostAddon):
|
|||
def initialize(self, module_settings):
|
||||
self.enabled = True
|
||||
|
||||
def add_implementation_envs(self, env, _app):
|
||||
# Remove auto screen scale factor for Qt
|
||||
# - let 3dsmax decide it's value
|
||||
env.pop("QT_AUTO_SCREEN_SCALE_FACTOR", None)
|
||||
|
||||
def get_workfile_extensions(self):
|
||||
return [".max"]
|
||||
|
||||
def get_launch_hook_paths(self, app):
|
||||
if app.host_name != self.host_name:
|
||||
return []
|
||||
return [
|
||||
os.path.join(MAX_HOST_DIR, "hooks")
|
||||
]
|
||||
|
|
|
|||
|
|
@ -120,3 +120,51 @@ def get_all_children(parent, node_type=None):
|
|||
|
||||
return ([x for x in child_list if rt.superClassOf(x) == node_type]
|
||||
if node_type else child_list)
|
||||
|
||||
|
||||
def get_current_renderer():
|
||||
"""get current renderer"""
|
||||
return rt.renderers.production
|
||||
|
||||
|
||||
def get_default_render_folder(project_setting=None):
|
||||
return (project_setting["max"]
|
||||
["RenderSettings"]
|
||||
["default_render_image_folder"])
|
||||
|
||||
|
||||
def set_framerange(start_frame, end_frame):
|
||||
"""
|
||||
Note:
|
||||
Frame range can be specified in different types. Possible values are:
|
||||
* `1` - Single frame.
|
||||
* `2` - Active time segment ( animationRange ).
|
||||
* `3` - User specified Range.
|
||||
* `4` - User specified Frame pickup string (for example `1,3,5-12`).
|
||||
|
||||
Todo:
|
||||
Current type is hard-coded, there should be a custom setting for this.
|
||||
"""
|
||||
rt.rendTimeType = 4
|
||||
if start_frame is not None and end_frame is not None:
|
||||
frame_range = "{0}-{1}".format(start_frame, end_frame)
|
||||
rt.rendPickupFrames = frame_range
|
||||
|
||||
|
||||
def get_multipass_setting(project_setting=None):
|
||||
return (project_setting["max"]
|
||||
["RenderSettings"]
|
||||
["multipass"])
|
||||
|
||||
|
||||
def get_max_version():
|
||||
"""
|
||||
Args:
|
||||
get max version date for deadline
|
||||
|
||||
Returns:
|
||||
#(25000, 62, 0, 25, 0, 0, 997, 2023, "")
|
||||
max_info[7] = max version date
|
||||
"""
|
||||
max_info = rt.maxversion()
|
||||
return max_info[7]
|
||||
|
|
|
|||
114
openpype/hosts/max/api/lib_renderproducts.py
Normal file
114
openpype/hosts/max/api/lib_renderproducts.py
Normal file
|
|
@ -0,0 +1,114 @@
|
|||
# Render Element Example : For scanline render, VRay
|
||||
# https://help.autodesk.com/view/MAXDEV/2022/ENU/?guid=GUID-E8F75D47-B998-4800-A3A5-610E22913CFC
|
||||
# arnold
|
||||
# https://help.autodesk.com/view/ARNOL/ENU/?guid=arnold_for_3ds_max_ax_maxscript_commands_ax_renderview_commands_html
|
||||
import os
|
||||
from pymxs import runtime as rt
|
||||
from openpype.hosts.max.api.lib import (
|
||||
get_current_renderer,
|
||||
get_default_render_folder
|
||||
)
|
||||
from openpype.settings import get_project_settings
|
||||
from openpype.pipeline import legacy_io
|
||||
|
||||
|
||||
class RenderProducts(object):
|
||||
|
||||
def __init__(self, project_settings=None):
|
||||
self._project_settings = project_settings
|
||||
if not self._project_settings:
|
||||
self._project_settings = get_project_settings(
|
||||
legacy_io.Session["AVALON_PROJECT"]
|
||||
)
|
||||
|
||||
def render_product(self, container):
|
||||
folder = rt.maxFilePath
|
||||
file = rt.maxFileName
|
||||
folder = folder.replace("\\", "/")
|
||||
setting = self._project_settings
|
||||
render_folder = get_default_render_folder(setting)
|
||||
filename, ext = os.path.splitext(file)
|
||||
|
||||
output_file = os.path.join(folder,
|
||||
render_folder,
|
||||
filename,
|
||||
container)
|
||||
|
||||
img_fmt = self._project_settings["max"]["RenderSettings"]["image_format"] # noqa
|
||||
full_render_list = []
|
||||
beauty = self.beauty_render_product(output_file, img_fmt)
|
||||
full_render_list.append(beauty)
|
||||
|
||||
renderer_class = get_current_renderer()
|
||||
renderer = str(renderer_class).split(":")[0]
|
||||
|
||||
if renderer == "VUE_File_Renderer":
|
||||
return full_render_list
|
||||
|
||||
if renderer in [
|
||||
"ART_Renderer",
|
||||
"Redshift_Renderer",
|
||||
"V_Ray_6_Hotfix_3",
|
||||
"V_Ray_GPU_6_Hotfix_3",
|
||||
"Default_Scanline_Renderer",
|
||||
"Quicksilver_Hardware_Renderer",
|
||||
]:
|
||||
render_elem_list = self.render_elements_product(output_file,
|
||||
img_fmt)
|
||||
if render_elem_list:
|
||||
full_render_list.extend(iter(render_elem_list))
|
||||
return full_render_list
|
||||
|
||||
if renderer == "Arnold":
|
||||
aov_list = self.arnold_render_product(output_file,
|
||||
img_fmt)
|
||||
if aov_list:
|
||||
full_render_list.extend(iter(aov_list))
|
||||
return full_render_list
|
||||
|
||||
def beauty_render_product(self, folder, fmt):
|
||||
beauty_output = f"{folder}.####.{fmt}"
|
||||
beauty_output = beauty_output.replace("\\", "/")
|
||||
return beauty_output
|
||||
|
||||
# TODO: Get the arnold render product
|
||||
def arnold_render_product(self, folder, fmt):
|
||||
"""Get all the Arnold AOVs"""
|
||||
aovs = []
|
||||
|
||||
amw = rt.MaxtoAOps.AOVsManagerWindow()
|
||||
aov_mgr = rt.renderers.current.AOVManager
|
||||
# Check if there is any aov group set in AOV manager
|
||||
aov_group_num = len(aov_mgr.drivers)
|
||||
if aov_group_num < 1:
|
||||
return
|
||||
for i in range(aov_group_num):
|
||||
# get the specific AOV group
|
||||
for aov in aov_mgr.drivers[i].aov_list:
|
||||
render_element = f"{folder}_{aov.name}.####.{fmt}"
|
||||
render_element = render_element.replace("\\", "/")
|
||||
aovs.append(render_element)
|
||||
# close the AOVs manager window
|
||||
amw.close()
|
||||
|
||||
return aovs
|
||||
|
||||
def render_elements_product(self, folder, fmt):
|
||||
"""Get all the render element output files. """
|
||||
render_dirname = []
|
||||
|
||||
render_elem = rt.maxOps.GetCurRenderElementMgr()
|
||||
render_elem_num = render_elem.NumRenderElements()
|
||||
# get render elements from the renders
|
||||
for i in range(render_elem_num):
|
||||
renderlayer_name = render_elem.GetRenderElement(i)
|
||||
target, renderpass = str(renderlayer_name).split(":")
|
||||
if renderlayer_name.enabled:
|
||||
render_element = f"{folder}_{renderpass}.####.{fmt}"
|
||||
render_element = render_element.replace("\\", "/")
|
||||
render_dirname.append(render_element)
|
||||
|
||||
return render_dirname
|
||||
|
||||
def image_format(self):
|
||||
return self._project_settings["max"]["RenderSettings"]["image_format"] # noqa
|
||||
168
openpype/hosts/max/api/lib_rendersettings.py
Normal file
168
openpype/hosts/max/api/lib_rendersettings.py
Normal file
|
|
@ -0,0 +1,168 @@
|
|||
import os
|
||||
from pymxs import runtime as rt
|
||||
from openpype.lib import Logger
|
||||
from openpype.settings import get_project_settings
|
||||
from openpype.pipeline import legacy_io
|
||||
from openpype.pipeline.context_tools import get_current_project_asset
|
||||
|
||||
from openpype.hosts.max.api.lib import (
|
||||
set_framerange,
|
||||
get_current_renderer,
|
||||
get_default_render_folder
|
||||
)
|
||||
|
||||
|
||||
class RenderSettings(object):
|
||||
|
||||
log = Logger.get_logger("RenderSettings")
|
||||
|
||||
_aov_chars = {
|
||||
"dot": ".",
|
||||
"dash": "-",
|
||||
"underscore": "_"
|
||||
}
|
||||
|
||||
def __init__(self, project_settings=None):
|
||||
"""
|
||||
Set up the naming convention for the render
|
||||
elements for the deadline submission
|
||||
"""
|
||||
|
||||
self._project_settings = project_settings
|
||||
if not self._project_settings:
|
||||
self._project_settings = get_project_settings(
|
||||
legacy_io.Session["AVALON_PROJECT"]
|
||||
)
|
||||
|
||||
def set_render_camera(self, selection):
|
||||
for sel in selection:
|
||||
# to avoid Attribute Error from pymxs wrapper
|
||||
found = False
|
||||
if rt.classOf(sel) in rt.Camera.classes:
|
||||
found = True
|
||||
rt.viewport.setCamera(sel)
|
||||
break
|
||||
if not found:
|
||||
raise RuntimeError("Camera not found")
|
||||
|
||||
def render_output(self, container):
|
||||
folder = rt.maxFilePath
|
||||
# hard-coded, should be customized in the setting
|
||||
file = rt.maxFileName
|
||||
folder = folder.replace("\\", "/")
|
||||
# hard-coded, set the renderoutput path
|
||||
setting = self._project_settings
|
||||
render_folder = get_default_render_folder(setting)
|
||||
filename, ext = os.path.splitext(file)
|
||||
output_dir = os.path.join(folder,
|
||||
render_folder,
|
||||
filename)
|
||||
if not os.path.exists(output_dir):
|
||||
os.makedirs(output_dir)
|
||||
# hard-coded, should be customized in the setting
|
||||
context = get_current_project_asset()
|
||||
|
||||
# get project resolution
|
||||
width = context["data"].get("resolutionWidth")
|
||||
height = context["data"].get("resolutionHeight")
|
||||
# Set Frame Range
|
||||
frame_start = context["data"].get("frame_start")
|
||||
frame_end = context["data"].get("frame_end")
|
||||
set_framerange(frame_start, frame_end)
|
||||
# get the production render
|
||||
renderer_class = get_current_renderer()
|
||||
renderer = str(renderer_class).split(":")[0]
|
||||
|
||||
img_fmt = self._project_settings["max"]["RenderSettings"]["image_format"] # noqa
|
||||
output = os.path.join(output_dir, container)
|
||||
try:
|
||||
aov_separator = self._aov_chars[(
|
||||
self._project_settings["maya"]
|
||||
["RenderSettings"]
|
||||
["aov_separator"]
|
||||
)]
|
||||
except KeyError:
|
||||
aov_separator = "."
|
||||
output_filename = "{0}..{1}".format(output, img_fmt)
|
||||
output_filename = output_filename.replace("{aov_separator}",
|
||||
aov_separator)
|
||||
rt.rendOutputFilename = output_filename
|
||||
if renderer == "VUE_File_Renderer":
|
||||
return
|
||||
# TODO: Finish the arnold render setup
|
||||
if renderer == "Arnold":
|
||||
self.arnold_setup()
|
||||
|
||||
if renderer in [
|
||||
"ART_Renderer",
|
||||
"Redshift_Renderer",
|
||||
"V_Ray_6_Hotfix_3",
|
||||
"V_Ray_GPU_6_Hotfix_3",
|
||||
"Default_Scanline_Renderer",
|
||||
"Quicksilver_Hardware_Renderer",
|
||||
]:
|
||||
self.render_element_layer(output, width, height, img_fmt)
|
||||
|
||||
rt.rendSaveFile = True
|
||||
|
||||
def arnold_setup(self):
|
||||
# get Arnold RenderView run in the background
|
||||
# for setting up renderable camera
|
||||
arv = rt.MAXToAOps.ArnoldRenderView()
|
||||
render_camera = rt.viewport.GetCamera()
|
||||
arv.setOption("Camera", str(render_camera))
|
||||
|
||||
# TODO: add AOVs and extension
|
||||
img_fmt = self._project_settings["max"]["RenderSettings"]["image_format"] # noqa
|
||||
setup_cmd = (
|
||||
f"""
|
||||
amw = MaxtoAOps.AOVsManagerWindow()
|
||||
amw.close()
|
||||
aovmgr = renderers.current.AOVManager
|
||||
aovmgr.drivers = #()
|
||||
img_fmt = "{img_fmt}"
|
||||
if img_fmt == "png" then driver = ArnoldPNGDriver()
|
||||
if img_fmt == "jpg" then driver = ArnoldJPEGDriver()
|
||||
if img_fmt == "exr" then driver = ArnoldEXRDriver()
|
||||
if img_fmt == "tif" then driver = ArnoldTIFFDriver()
|
||||
if img_fmt == "tiff" then driver = ArnoldTIFFDriver()
|
||||
append aovmgr.drivers driver
|
||||
aovmgr.drivers[1].aov_list = #()
|
||||
""")
|
||||
|
||||
rt.execute(setup_cmd)
|
||||
arv.close()
|
||||
|
||||
def render_element_layer(self, dir, width, height, ext):
|
||||
"""For Renderers with render elements"""
|
||||
rt.renderWidth = width
|
||||
rt.renderHeight = height
|
||||
render_elem = rt.maxOps.GetCurRenderElementMgr()
|
||||
render_elem_num = render_elem.NumRenderElements()
|
||||
if render_elem_num < 0:
|
||||
return
|
||||
|
||||
for i in range(render_elem_num):
|
||||
renderlayer_name = render_elem.GetRenderElement(i)
|
||||
target, renderpass = str(renderlayer_name).split(":")
|
||||
aov_name = "{0}_{1}..{2}".format(dir, renderpass, ext)
|
||||
render_elem.SetRenderElementFileName(i, aov_name)
|
||||
|
||||
def get_render_output(self, container, output_dir):
|
||||
output = os.path.join(output_dir, container)
|
||||
img_fmt = self._project_settings["max"]["RenderSettings"]["image_format"] # noqa
|
||||
output_filename = "{0}..{1}".format(output, img_fmt)
|
||||
return output_filename
|
||||
|
||||
def get_render_element(self):
|
||||
orig_render_elem = []
|
||||
render_elem = rt.maxOps.GetCurRenderElementMgr()
|
||||
render_elem_num = render_elem.NumRenderElements()
|
||||
if render_elem_num < 0:
|
||||
return
|
||||
|
||||
for i in range(render_elem_num):
|
||||
render_element = render_elem.GetRenderElementFilename(i)
|
||||
orig_render_elem.append(render_element)
|
||||
|
||||
return orig_render_elem
|
||||
|
|
@ -119,7 +119,7 @@ class OpenPypeMenu(object):
|
|||
|
||||
def manage_callback(self):
|
||||
"""Callback to show Scene Manager/Inventory tool."""
|
||||
host_tools.show_subset_manager(parent=self.main_widget)
|
||||
host_tools.show_scene_inventory(parent=self.main_widget)
|
||||
|
||||
def library_callback(self):
|
||||
"""Callback to show Library Loader tool."""
|
||||
|
|
|
|||
|
|
@ -2,6 +2,7 @@
|
|||
"""Pipeline tools for OpenPype Houdini integration."""
|
||||
import os
|
||||
import logging
|
||||
from operator import attrgetter
|
||||
|
||||
import json
|
||||
|
||||
|
|
@ -141,5 +142,25 @@ def ls() -> list:
|
|||
if rt.getUserProp(obj, "id") == AVALON_CONTAINER_ID
|
||||
]
|
||||
|
||||
for container in sorted(containers, key=lambda name: container.name):
|
||||
for container in sorted(containers, key=attrgetter("name")):
|
||||
yield lib.read(container)
|
||||
|
||||
|
||||
def containerise(name: str, nodes: list, context, loader=None, suffix="_CON"):
|
||||
data = {
|
||||
"schema": "openpype:container-2.0",
|
||||
"id": AVALON_CONTAINER_ID,
|
||||
"name": name,
|
||||
"namespace": "",
|
||||
"loader": loader,
|
||||
"representation": context["representation"]["_id"],
|
||||
}
|
||||
|
||||
container_name = f"{name}{suffix}"
|
||||
container = rt.container(name=container_name)
|
||||
for node in nodes:
|
||||
node.Parent = container
|
||||
|
||||
if not lib.imprint(container_name, data):
|
||||
print(f"imprinting of {container_name} failed.")
|
||||
return container
|
||||
|
|
|
|||
|
|
@ -78,12 +78,12 @@ class MaxCreator(Creator, MaxCreatorBase):
|
|||
self._add_instance_to_context(created_instance)
|
||||
|
||||
def update_instances(self, update_list):
|
||||
for created_inst, _changes in update_list:
|
||||
for created_inst, changes in update_list:
|
||||
instance_node = created_inst.get("instance_node")
|
||||
|
||||
new_values = {
|
||||
key: new_value
|
||||
for key, (_old_value, new_value) in _changes.items()
|
||||
key: changes[key].new_value
|
||||
for key in changes.changed_keys
|
||||
}
|
||||
imprint(
|
||||
instance_node,
|
||||
|
|
|
|||
19
openpype/hosts/max/hooks/inject_python.py
Normal file
19
openpype/hosts/max/hooks/inject_python.py
Normal file
|
|
@ -0,0 +1,19 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
"""Pre-launch hook to inject python environment."""
|
||||
from openpype.lib import PreLaunchHook
|
||||
import os
|
||||
|
||||
|
||||
class InjectPythonPath(PreLaunchHook):
|
||||
"""Inject OpenPype environment to 3dsmax.
|
||||
|
||||
Note that this works in combination whit 3dsmax startup script that
|
||||
is translating it back to PYTHONPATH for cases when 3dsmax drops PYTHONPATH
|
||||
environment.
|
||||
|
||||
Hook `GlobalHostDataHook` must be executed before this hook.
|
||||
"""
|
||||
app_groups = ["3dsmax"]
|
||||
|
||||
def execute(self):
|
||||
self.launch_context.env["MAX_PYTHONPATH"] = os.environ["PYTHONPATH"]
|
||||
26
openpype/hosts/max/plugins/create/create_camera.py
Normal file
26
openpype/hosts/max/plugins/create/create_camera.py
Normal file
|
|
@ -0,0 +1,26 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
"""Creator plugin for creating camera."""
|
||||
from openpype.hosts.max.api import plugin
|
||||
from openpype.pipeline import CreatedInstance
|
||||
|
||||
|
||||
class CreateCamera(plugin.MaxCreator):
|
||||
identifier = "io.openpype.creators.max.camera"
|
||||
label = "Camera"
|
||||
family = "camera"
|
||||
icon = "gear"
|
||||
|
||||
def create(self, subset_name, instance_data, pre_create_data):
|
||||
from pymxs import runtime as rt
|
||||
sel_obj = list(rt.selection)
|
||||
instance = super(CreateCamera, self).create(
|
||||
subset_name,
|
||||
instance_data,
|
||||
pre_create_data) # type: CreatedInstance
|
||||
container = rt.getNodeByName(instance.data.get("instance_node"))
|
||||
# TODO: Disable "Add to Containers?" Panel
|
||||
# parent the selected cameras into the container
|
||||
for obj in sel_obj:
|
||||
obj.parent = container
|
||||
# for additional work on the node:
|
||||
# instance_node = rt.getNodeByName(instance.get("instance_node"))
|
||||
33
openpype/hosts/max/plugins/create/create_render.py
Normal file
33
openpype/hosts/max/plugins/create/create_render.py
Normal file
|
|
@ -0,0 +1,33 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
"""Creator plugin for creating camera."""
|
||||
from openpype.hosts.max.api import plugin
|
||||
from openpype.pipeline import CreatedInstance
|
||||
from openpype.hosts.max.api.lib_rendersettings import RenderSettings
|
||||
|
||||
|
||||
class CreateRender(plugin.MaxCreator):
|
||||
identifier = "io.openpype.creators.max.render"
|
||||
label = "Render"
|
||||
family = "maxrender"
|
||||
icon = "gear"
|
||||
|
||||
def create(self, subset_name, instance_data, pre_create_data):
|
||||
from pymxs import runtime as rt
|
||||
sel_obj = list(rt.selection)
|
||||
instance = super(CreateRender, self).create(
|
||||
subset_name,
|
||||
instance_data,
|
||||
pre_create_data) # type: CreatedInstance
|
||||
container_name = instance.data.get("instance_node")
|
||||
container = rt.getNodeByName(container_name)
|
||||
# TODO: Disable "Add to Containers?" Panel
|
||||
# parent the selected cameras into the container
|
||||
for obj in sel_obj:
|
||||
obj.parent = container
|
||||
# for additional work on the node:
|
||||
# instance_node = rt.getNodeByName(instance.get("instance_node"))
|
||||
|
||||
# set viewport camera for rendering(mandatory for deadline)
|
||||
RenderSettings().set_render_camera(sel_obj)
|
||||
# set output paths for rendering(mandatory for deadline)
|
||||
RenderSettings().render_output(container_name)
|
||||
64
openpype/hosts/max/plugins/load/load_camera_fbx.py
Normal file
64
openpype/hosts/max/plugins/load/load_camera_fbx.py
Normal file
|
|
@ -0,0 +1,64 @@
|
|||
import os
|
||||
from openpype.pipeline import (
|
||||
load,
|
||||
get_representation_path
|
||||
)
|
||||
from openpype.hosts.max.api.pipeline import containerise
|
||||
from openpype.hosts.max.api import lib
|
||||
|
||||
|
||||
class FbxLoader(load.LoaderPlugin):
|
||||
"""Fbx Loader"""
|
||||
|
||||
families = ["camera"]
|
||||
representations = ["fbx"]
|
||||
order = -9
|
||||
icon = "code-fork"
|
||||
color = "white"
|
||||
|
||||
def load(self, context, name=None, namespace=None, data=None):
|
||||
from pymxs import runtime as rt
|
||||
|
||||
filepath = os.path.normpath(self.fname)
|
||||
|
||||
fbx_import_cmd = (
|
||||
f"""
|
||||
|
||||
FBXImporterSetParam "Animation" true
|
||||
FBXImporterSetParam "Cameras" true
|
||||
FBXImporterSetParam "AxisConversionMethod" true
|
||||
FbxExporterSetParam "UpAxis" "Y"
|
||||
FbxExporterSetParam "Preserveinstances" true
|
||||
|
||||
importFile @"{filepath}" #noPrompt using:FBXIMP
|
||||
""")
|
||||
|
||||
self.log.debug(f"Executing command: {fbx_import_cmd}")
|
||||
rt.execute(fbx_import_cmd)
|
||||
|
||||
container_name = f"{name}_CON"
|
||||
|
||||
asset = rt.getNodeByName(f"{name}")
|
||||
|
||||
return containerise(
|
||||
name, [asset], context, loader=self.__class__.__name__)
|
||||
|
||||
def update(self, container, representation):
|
||||
from pymxs import runtime as rt
|
||||
|
||||
path = get_representation_path(representation)
|
||||
node = rt.getNodeByName(container["instance_node"])
|
||||
|
||||
fbx_objects = self.get_container_children(node)
|
||||
for fbx_object in fbx_objects:
|
||||
fbx_object.source = path
|
||||
|
||||
lib.imprint(container["instance_node"], {
|
||||
"representation": str(representation["_id"])
|
||||
})
|
||||
|
||||
def remove(self, container):
|
||||
from pymxs import runtime as rt
|
||||
|
||||
node = rt.getNodeByName(container["instance_node"])
|
||||
rt.delete(node)
|
||||
62
openpype/hosts/max/plugins/load/load_max_scene.py
Normal file
62
openpype/hosts/max/plugins/load/load_max_scene.py
Normal file
|
|
@ -0,0 +1,62 @@
|
|||
import os
|
||||
from openpype.pipeline import (
|
||||
load, get_representation_path
|
||||
)
|
||||
from openpype.hosts.max.api.pipeline import containerise
|
||||
from openpype.hosts.max.api import lib
|
||||
|
||||
|
||||
class MaxSceneLoader(load.LoaderPlugin):
|
||||
"""Max Scene Loader"""
|
||||
|
||||
families = ["camera"]
|
||||
representations = ["max"]
|
||||
order = -8
|
||||
icon = "code-fork"
|
||||
color = "green"
|
||||
|
||||
def load(self, context, name=None, namespace=None, data=None):
|
||||
from pymxs import runtime as rt
|
||||
path = os.path.normpath(self.fname)
|
||||
# import the max scene by using "merge file"
|
||||
path = path.replace('\\', '/')
|
||||
|
||||
merge_before = {
|
||||
c for c in rt.rootNode.Children
|
||||
if rt.classOf(c) == rt.Container
|
||||
}
|
||||
rt.mergeMaxFile(path)
|
||||
|
||||
merge_after = {
|
||||
c for c in rt.rootNode.Children
|
||||
if rt.classOf(c) == rt.Container
|
||||
}
|
||||
max_containers = merge_after.difference(merge_before)
|
||||
|
||||
if len(max_containers) != 1:
|
||||
self.log.error("Something failed when loading.")
|
||||
|
||||
max_container = max_containers.pop()
|
||||
|
||||
return containerise(
|
||||
name, [max_container], context, loader=self.__class__.__name__)
|
||||
|
||||
def update(self, container, representation):
|
||||
from pymxs import runtime as rt
|
||||
|
||||
path = get_representation_path(representation)
|
||||
node = rt.getNodeByName(container["instance_node"])
|
||||
|
||||
max_objects = self.get_container_children(node)
|
||||
for max_object in max_objects:
|
||||
max_object.source = path
|
||||
|
||||
lib.imprint(container["instance_node"], {
|
||||
"representation": str(representation["_id"])
|
||||
})
|
||||
|
||||
def remove(self, container):
|
||||
from pymxs import runtime as rt
|
||||
|
||||
node = rt.getNodeByName(container["instance_node"])
|
||||
rt.delete(node)
|
||||
|
|
@ -6,14 +6,19 @@ Because of limited api, alembics can be only loaded, but not easily updated.
|
|||
"""
|
||||
import os
|
||||
from openpype.pipeline import (
|
||||
load
|
||||
load, get_representation_path
|
||||
)
|
||||
from openpype.hosts.max.api.pipeline import containerise
|
||||
from openpype.hosts.max.api import lib
|
||||
|
||||
|
||||
class AbcLoader(load.LoaderPlugin):
|
||||
"""Alembic loader."""
|
||||
|
||||
families = ["model", "animation", "pointcache"]
|
||||
families = ["model",
|
||||
"camera",
|
||||
"animation",
|
||||
"pointcache"]
|
||||
label = "Load Alembic"
|
||||
representations = ["abc"]
|
||||
order = -10
|
||||
|
|
@ -52,14 +57,47 @@ importFile @"{file_path}" #noPrompt
|
|||
|
||||
abc_container = abc_containers.pop()
|
||||
|
||||
container_name = f"{name}_CON"
|
||||
container = rt.container(name=container_name)
|
||||
abc_container.Parent = container
|
||||
return containerise(
|
||||
name, [abc_container], context, loader=self.__class__.__name__)
|
||||
|
||||
return container
|
||||
def update(self, container, representation):
|
||||
from pymxs import runtime as rt
|
||||
|
||||
path = get_representation_path(representation)
|
||||
node = rt.getNodeByName(container["instance_node"])
|
||||
|
||||
alembic_objects = self.get_container_children(node, "AlembicObject")
|
||||
for alembic_object in alembic_objects:
|
||||
alembic_object.source = path
|
||||
|
||||
lib.imprint(container["instance_node"], {
|
||||
"representation": str(representation["_id"])
|
||||
})
|
||||
|
||||
def switch(self, container, representation):
|
||||
self.update(container, representation)
|
||||
|
||||
def remove(self, container):
|
||||
from pymxs import runtime as rt
|
||||
|
||||
node = container["node"]
|
||||
node = rt.getNodeByName(container["instance_node"])
|
||||
rt.delete(node)
|
||||
|
||||
@staticmethod
|
||||
def get_container_children(parent, type_name):
|
||||
from pymxs import runtime as rt
|
||||
|
||||
def list_children(node):
|
||||
children = []
|
||||
for c in node.Children:
|
||||
children.append(c)
|
||||
children += list_children(c)
|
||||
return children
|
||||
|
||||
filtered = []
|
||||
for child in list_children(parent):
|
||||
class_type = str(rt.classOf(child.baseObject))
|
||||
if class_type == type_name:
|
||||
filtered.append(child)
|
||||
|
||||
return filtered
|
||||
|
|
|
|||
67
openpype/hosts/max/plugins/publish/collect_render.py
Normal file
67
openpype/hosts/max/plugins/publish/collect_render.py
Normal file
|
|
@ -0,0 +1,67 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
"""Collect Render"""
|
||||
import os
|
||||
import pyblish.api
|
||||
|
||||
from pymxs import runtime as rt
|
||||
from openpype.pipeline import get_current_asset_name
|
||||
from openpype.hosts.max.api.lib import get_max_version
|
||||
from openpype.hosts.max.api.lib_renderproducts import RenderProducts
|
||||
from openpype.client import get_last_version_by_subset_name
|
||||
|
||||
|
||||
class CollectRender(pyblish.api.InstancePlugin):
|
||||
"""Collect Render for Deadline"""
|
||||
|
||||
order = pyblish.api.CollectorOrder + 0.01
|
||||
label = "Collect 3dsmax Render Layers"
|
||||
hosts = ['max']
|
||||
families = ["maxrender"]
|
||||
|
||||
def process(self, instance):
|
||||
context = instance.context
|
||||
folder = rt.maxFilePath
|
||||
file = rt.maxFileName
|
||||
current_file = os.path.join(folder, file)
|
||||
filepath = current_file.replace("\\", "/")
|
||||
|
||||
context.data['currentFile'] = current_file
|
||||
asset = get_current_asset_name()
|
||||
|
||||
render_layer_files = RenderProducts().render_product(instance.name)
|
||||
folder = folder.replace("\\", "/")
|
||||
|
||||
img_format = RenderProducts().image_format()
|
||||
project_name = context.data["projectName"]
|
||||
asset_doc = context.data["assetEntity"]
|
||||
asset_id = asset_doc["_id"]
|
||||
version_doc = get_last_version_by_subset_name(project_name,
|
||||
instance.name,
|
||||
asset_id)
|
||||
|
||||
self.log.debug("version_doc: {0}".format(version_doc))
|
||||
version_int = 1
|
||||
if version_doc:
|
||||
version_int += int(version_doc["name"])
|
||||
|
||||
self.log.debug(f"Setting {version_int} to context.")
|
||||
context.data["version"] = version_int
|
||||
|
||||
# setup the plugin as 3dsmax for the internal renderer
|
||||
data = {
|
||||
"subset": instance.name,
|
||||
"asset": asset,
|
||||
"publish": True,
|
||||
"maxversion": str(get_max_version()),
|
||||
"imageFormat": img_format,
|
||||
"family": 'maxrender',
|
||||
"families": ['maxrender'],
|
||||
"source": filepath,
|
||||
"expectedFiles": render_layer_files,
|
||||
"plugin": "3dsmax",
|
||||
"frameStart": context.data['frameStart'],
|
||||
"frameEnd": context.data['frameEnd'],
|
||||
"version": version_int
|
||||
}
|
||||
self.log.info("data: {0}".format(data))
|
||||
instance.data.update(data)
|
||||
75
openpype/hosts/max/plugins/publish/extract_camera_abc.py
Normal file
75
openpype/hosts/max/plugins/publish/extract_camera_abc.py
Normal file
|
|
@ -0,0 +1,75 @@
|
|||
import os
|
||||
import pyblish.api
|
||||
from openpype.pipeline import (
|
||||
publish,
|
||||
OptionalPyblishPluginMixin
|
||||
)
|
||||
from pymxs import runtime as rt
|
||||
from openpype.hosts.max.api import (
|
||||
maintained_selection,
|
||||
get_all_children
|
||||
)
|
||||
|
||||
|
||||
class ExtractCameraAlembic(publish.Extractor,
|
||||
OptionalPyblishPluginMixin):
|
||||
"""
|
||||
Extract Camera with AlembicExport
|
||||
"""
|
||||
|
||||
order = pyblish.api.ExtractorOrder - 0.1
|
||||
label = "Extract Alembic Camera"
|
||||
hosts = ["max"]
|
||||
families = ["camera"]
|
||||
optional = True
|
||||
|
||||
def process(self, instance):
|
||||
if not self.is_active(instance.data):
|
||||
return
|
||||
start = float(instance.data.get("frameStartHandle", 1))
|
||||
end = float(instance.data.get("frameEndHandle", 1))
|
||||
|
||||
container = instance.data["instance_node"]
|
||||
|
||||
self.log.info("Extracting Camera ...")
|
||||
|
||||
stagingdir = self.staging_dir(instance)
|
||||
filename = "{name}.abc".format(**instance.data)
|
||||
path = os.path.join(stagingdir, filename)
|
||||
|
||||
# We run the render
|
||||
self.log.info("Writing alembic '%s' to '%s'" % (filename,
|
||||
stagingdir))
|
||||
|
||||
export_cmd = (
|
||||
f"""
|
||||
AlembicExport.ArchiveType = #ogawa
|
||||
AlembicExport.CoordinateSystem = #maya
|
||||
AlembicExport.StartFrame = {start}
|
||||
AlembicExport.EndFrame = {end}
|
||||
AlembicExport.CustomAttributes = true
|
||||
|
||||
exportFile @"{path}" #noPrompt selectedOnly:on using:AlembicExport
|
||||
|
||||
""")
|
||||
|
||||
self.log.debug(f"Executing command: {export_cmd}")
|
||||
|
||||
with maintained_selection():
|
||||
# select and export
|
||||
rt.select(get_all_children(rt.getNodeByName(container)))
|
||||
rt.execute(export_cmd)
|
||||
|
||||
self.log.info("Performing Extraction ...")
|
||||
if "representations" not in instance.data:
|
||||
instance.data["representations"] = []
|
||||
|
||||
representation = {
|
||||
'name': 'abc',
|
||||
'ext': 'abc',
|
||||
'files': filename,
|
||||
"stagingDir": stagingdir,
|
||||
}
|
||||
instance.data["representations"].append(representation)
|
||||
self.log.info("Extracted instance '%s' to: %s" % (instance.name,
|
||||
path))
|
||||
75
openpype/hosts/max/plugins/publish/extract_camera_fbx.py
Normal file
75
openpype/hosts/max/plugins/publish/extract_camera_fbx.py
Normal file
|
|
@ -0,0 +1,75 @@
|
|||
import os
|
||||
import pyblish.api
|
||||
from openpype.pipeline import (
|
||||
publish,
|
||||
OptionalPyblishPluginMixin
|
||||
)
|
||||
from pymxs import runtime as rt
|
||||
from openpype.hosts.max.api import (
|
||||
maintained_selection,
|
||||
get_all_children
|
||||
)
|
||||
|
||||
|
||||
class ExtractCameraFbx(publish.Extractor,
|
||||
OptionalPyblishPluginMixin):
|
||||
"""
|
||||
Extract Camera with FbxExporter
|
||||
"""
|
||||
|
||||
order = pyblish.api.ExtractorOrder - 0.2
|
||||
label = "Extract Fbx Camera"
|
||||
hosts = ["max"]
|
||||
families = ["camera"]
|
||||
optional = True
|
||||
|
||||
def process(self, instance):
|
||||
if not self.is_active(instance.data):
|
||||
return
|
||||
container = instance.data["instance_node"]
|
||||
|
||||
self.log.info("Extracting Camera ...")
|
||||
stagingdir = self.staging_dir(instance)
|
||||
filename = "{name}.fbx".format(**instance.data)
|
||||
|
||||
filepath = os.path.join(stagingdir, filename)
|
||||
self.log.info("Writing fbx file '%s' to '%s'" % (filename,
|
||||
filepath))
|
||||
|
||||
# Need to export:
|
||||
# Animation = True
|
||||
# Cameras = True
|
||||
# AxisConversionMethod
|
||||
fbx_export_cmd = (
|
||||
f"""
|
||||
|
||||
FBXExporterSetParam "Animation" true
|
||||
FBXExporterSetParam "Cameras" true
|
||||
FBXExporterSetParam "AxisConversionMethod" "Animation"
|
||||
FbxExporterSetParam "UpAxis" "Y"
|
||||
FbxExporterSetParam "Preserveinstances" true
|
||||
|
||||
exportFile @"{filepath}" #noPrompt selectedOnly:true using:FBXEXP
|
||||
|
||||
""")
|
||||
|
||||
self.log.debug(f"Executing command: {fbx_export_cmd}")
|
||||
|
||||
with maintained_selection():
|
||||
# select and export
|
||||
rt.select(get_all_children(rt.getNodeByName(container)))
|
||||
rt.execute(fbx_export_cmd)
|
||||
|
||||
self.log.info("Performing Extraction ...")
|
||||
if "representations" not in instance.data:
|
||||
instance.data["representations"] = []
|
||||
|
||||
representation = {
|
||||
'name': 'fbx',
|
||||
'ext': 'fbx',
|
||||
'files': filename,
|
||||
"stagingDir": stagingdir,
|
||||
}
|
||||
instance.data["representations"].append(representation)
|
||||
self.log.info("Extracted instance '%s' to: %s" % (instance.name,
|
||||
filepath))
|
||||
60
openpype/hosts/max/plugins/publish/extract_max_scene_raw.py
Normal file
60
openpype/hosts/max/plugins/publish/extract_max_scene_raw.py
Normal file
|
|
@ -0,0 +1,60 @@
|
|||
import os
|
||||
import pyblish.api
|
||||
from openpype.pipeline import (
|
||||
publish,
|
||||
OptionalPyblishPluginMixin
|
||||
)
|
||||
from pymxs import runtime as rt
|
||||
from openpype.hosts.max.api import (
|
||||
maintained_selection,
|
||||
get_all_children
|
||||
)
|
||||
|
||||
|
||||
class ExtractMaxSceneRaw(publish.Extractor,
|
||||
OptionalPyblishPluginMixin):
|
||||
"""
|
||||
Extract Raw Max Scene with SaveSelected
|
||||
"""
|
||||
|
||||
order = pyblish.api.ExtractorOrder - 0.2
|
||||
label = "Extract Max Scene (Raw)"
|
||||
hosts = ["max"]
|
||||
families = ["camera"]
|
||||
optional = True
|
||||
|
||||
def process(self, instance):
|
||||
if not self.is_active(instance.data):
|
||||
return
|
||||
container = instance.data["instance_node"]
|
||||
|
||||
# publish the raw scene for camera
|
||||
self.log.info("Extracting Raw Max Scene ...")
|
||||
|
||||
stagingdir = self.staging_dir(instance)
|
||||
filename = "{name}.max".format(**instance.data)
|
||||
|
||||
max_path = os.path.join(stagingdir, filename)
|
||||
self.log.info("Writing max file '%s' to '%s'" % (filename,
|
||||
max_path))
|
||||
|
||||
if "representations" not in instance.data:
|
||||
instance.data["representations"] = []
|
||||
|
||||
# saving max scene
|
||||
with maintained_selection():
|
||||
# need to figure out how to select the camera
|
||||
rt.select(get_all_children(rt.getNodeByName(container)))
|
||||
rt.execute(f'saveNodes selection "{max_path}" quiet:true')
|
||||
|
||||
self.log.info("Performing Extraction ...")
|
||||
|
||||
representation = {
|
||||
'name': 'max',
|
||||
'ext': 'max',
|
||||
'files': filename,
|
||||
"stagingDir": stagingdir,
|
||||
}
|
||||
instance.data["representations"].append(representation)
|
||||
self.log.info("Extracted instance '%s' to: %s" % (instance.name,
|
||||
max_path))
|
||||
|
|
@ -51,7 +51,7 @@ class ExtractAlembic(publish.Extractor):
|
|||
order = pyblish.api.ExtractorOrder
|
||||
label = "Extract Pointcache"
|
||||
hosts = ["max"]
|
||||
families = ["pointcache", "camera"]
|
||||
families = ["pointcache"]
|
||||
|
||||
def process(self, instance):
|
||||
start = float(instance.data.get("frameStartHandle", 1))
|
||||
|
|
|
|||
|
|
@ -0,0 +1,48 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
import pyblish.api
|
||||
from openpype.pipeline import PublishValidationError
|
||||
from pymxs import runtime as rt
|
||||
|
||||
|
||||
class ValidateCameraContent(pyblish.api.InstancePlugin):
|
||||
"""Validates Camera instance contents.
|
||||
|
||||
A Camera instance may only hold a SINGLE camera's transform
|
||||
"""
|
||||
|
||||
order = pyblish.api.ValidatorOrder
|
||||
families = ["camera"]
|
||||
hosts = ["max"]
|
||||
label = "Camera Contents"
|
||||
camera_type = ["$Free_Camera", "$Target_Camera",
|
||||
"$Physical_Camera", "$Target"]
|
||||
|
||||
def process(self, instance):
|
||||
invalid = self.get_invalid(instance)
|
||||
if invalid:
|
||||
raise PublishValidationError("Camera instance must only include"
|
||||
"camera (and camera target)")
|
||||
|
||||
def get_invalid(self, instance):
|
||||
"""
|
||||
Get invalid nodes if the instance is not camera
|
||||
"""
|
||||
invalid = list()
|
||||
container = instance.data["instance_node"]
|
||||
self.log.info("Validating look content for "
|
||||
"{}".format(container))
|
||||
|
||||
con = rt.getNodeByName(container)
|
||||
selection_list = list(con.Children)
|
||||
for sel in selection_list:
|
||||
# to avoid Attribute Error from pymxs wrapper
|
||||
sel_tmp = str(sel)
|
||||
found = False
|
||||
for cam in self.camera_type:
|
||||
if sel_tmp.startswith(cam):
|
||||
found = True
|
||||
break
|
||||
if not found:
|
||||
self.log.error("Camera not found")
|
||||
invalid.append(sel)
|
||||
return invalid
|
||||
|
|
@ -2,8 +2,11 @@
|
|||
(
|
||||
local sysPath = dotNetClass "System.IO.Path"
|
||||
local sysDir = dotNetClass "System.IO.Directory"
|
||||
local localScript = getThisScriptFilename()
|
||||
local localScript = getThisScriptFilename()
|
||||
local startup = sysPath.Combine (sysPath.GetDirectoryName localScript) "startup.py"
|
||||
|
||||
local pythonpath = systemTools.getEnvVariable "MAX_PYTHONPATH"
|
||||
systemTools.setEnvVariable "PYTHONPATH" pythonpath
|
||||
|
||||
python.ExecuteFile startup
|
||||
)
|
||||
|
|
@ -1,4 +1,13 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
import os
|
||||
import sys
|
||||
|
||||
# this might happen in some 3dsmax version where PYTHONPATH isn't added
|
||||
# to sys.path automatically
|
||||
for path in os.environ["PYTHONPATH"].split(os.pathsep):
|
||||
if path and path not in sys.path:
|
||||
sys.path.append(path)
|
||||
|
||||
from openpype.hosts.max.api import MaxHost
|
||||
from openpype.pipeline import install_host
|
||||
|
||||
|
|
|
|||
|
|
@ -57,68 +57,6 @@ def edit_shader_definitions():
|
|||
window.show()
|
||||
|
||||
|
||||
def reset_frame_range():
|
||||
"""Set frame range to current asset"""
|
||||
# Set FPS first
|
||||
fps = {15: 'game',
|
||||
24: 'film',
|
||||
25: 'pal',
|
||||
30: 'ntsc',
|
||||
48: 'show',
|
||||
50: 'palf',
|
||||
60: 'ntscf',
|
||||
23.98: '23.976fps',
|
||||
23.976: '23.976fps',
|
||||
29.97: '29.97fps',
|
||||
47.952: '47.952fps',
|
||||
47.95: '47.952fps',
|
||||
59.94: '59.94fps',
|
||||
44100: '44100fps',
|
||||
48000: '48000fps'
|
||||
}.get(float(legacy_io.Session.get("AVALON_FPS", 25)), "pal")
|
||||
|
||||
cmds.currentUnit(time=fps)
|
||||
|
||||
# Set frame start/end
|
||||
project_name = legacy_io.active_project()
|
||||
asset_name = legacy_io.Session["AVALON_ASSET"]
|
||||
asset = get_asset_by_name(project_name, asset_name)
|
||||
|
||||
frame_start = asset["data"].get("frameStart")
|
||||
frame_end = asset["data"].get("frameEnd")
|
||||
# Backwards compatibility
|
||||
if frame_start is None or frame_end is None:
|
||||
frame_start = asset["data"].get("edit_in")
|
||||
frame_end = asset["data"].get("edit_out")
|
||||
|
||||
if frame_start is None or frame_end is None:
|
||||
cmds.warning("No edit information found for %s" % asset_name)
|
||||
return
|
||||
|
||||
handles = asset["data"].get("handles") or 0
|
||||
handle_start = asset["data"].get("handleStart")
|
||||
if handle_start is None:
|
||||
handle_start = handles
|
||||
|
||||
handle_end = asset["data"].get("handleEnd")
|
||||
if handle_end is None:
|
||||
handle_end = handles
|
||||
|
||||
frame_start -= int(handle_start)
|
||||
frame_end += int(handle_end)
|
||||
|
||||
cmds.playbackOptions(minTime=frame_start)
|
||||
cmds.playbackOptions(maxTime=frame_end)
|
||||
cmds.playbackOptions(animationStartTime=frame_start)
|
||||
cmds.playbackOptions(animationEndTime=frame_end)
|
||||
cmds.playbackOptions(minTime=frame_start)
|
||||
cmds.playbackOptions(maxTime=frame_end)
|
||||
cmds.currentTime(frame_start)
|
||||
|
||||
cmds.setAttr("defaultRenderGlobals.startFrame", frame_start)
|
||||
cmds.setAttr("defaultRenderGlobals.endFrame", frame_end)
|
||||
|
||||
|
||||
def _resolution_from_document(doc):
|
||||
if not doc or "data" not in doc:
|
||||
print("Entered document is not valid. \"{}\"".format(str(doc)))
|
||||
|
|
|
|||
|
|
@ -4,7 +4,7 @@ import os
|
|||
import sys
|
||||
import platform
|
||||
import uuid
|
||||
import math
|
||||
import re
|
||||
|
||||
import json
|
||||
import logging
|
||||
|
|
@ -14,7 +14,7 @@ from math import ceil
|
|||
from six import string_types
|
||||
|
||||
from maya import cmds, mel
|
||||
import maya.api.OpenMaya as om
|
||||
from maya.api import OpenMaya
|
||||
|
||||
from openpype.client import (
|
||||
get_project,
|
||||
|
|
@ -33,7 +33,6 @@ from openpype.pipeline import (
|
|||
registered_host,
|
||||
)
|
||||
from openpype.pipeline.context_tools import get_current_project_asset
|
||||
from .commands import reset_frame_range
|
||||
|
||||
|
||||
self = sys.modules[__name__]
|
||||
|
|
@ -254,11 +253,6 @@ def read(node):
|
|||
return data
|
||||
|
||||
|
||||
def _get_mel_global(name):
|
||||
"""Return the value of a mel global variable"""
|
||||
return mel.eval("$%s = $%s;" % (name, name))
|
||||
|
||||
|
||||
def matrix_equals(a, b, tolerance=1e-10):
|
||||
"""
|
||||
Compares two matrices with an imperfection tolerance
|
||||
|
|
@ -408,9 +402,9 @@ def lsattrs(attrs):
|
|||
|
||||
"""
|
||||
|
||||
dep_fn = om.MFnDependencyNode()
|
||||
dag_fn = om.MFnDagNode()
|
||||
selection_list = om.MSelectionList()
|
||||
dep_fn = OpenMaya.MFnDependencyNode()
|
||||
dag_fn = OpenMaya.MFnDagNode()
|
||||
selection_list = OpenMaya.MSelectionList()
|
||||
|
||||
first_attr = next(iter(attrs))
|
||||
|
||||
|
|
@ -424,7 +418,7 @@ def lsattrs(attrs):
|
|||
matches = set()
|
||||
for i in range(selection_list.length()):
|
||||
node = selection_list.getDependNode(i)
|
||||
if node.hasFn(om.MFn.kDagNode):
|
||||
if node.hasFn(OpenMaya.MFn.kDagNode):
|
||||
fn_node = dag_fn.setObject(node)
|
||||
full_path_names = [path.fullPathName()
|
||||
for path in fn_node.getAllPaths()]
|
||||
|
|
@ -624,15 +618,15 @@ class delete_after(object):
|
|||
cmds.delete(self._nodes)
|
||||
|
||||
|
||||
def get_current_renderlayer():
|
||||
return cmds.editRenderLayerGlobals(query=True, currentRenderLayer=True)
|
||||
|
||||
|
||||
def get_renderer(layer):
|
||||
with renderlayer(layer):
|
||||
return cmds.getAttr("defaultRenderGlobals.currentRenderer")
|
||||
|
||||
|
||||
def get_current_renderlayer():
|
||||
return cmds.editRenderLayerGlobals(query=True, currentRenderLayer=True)
|
||||
|
||||
|
||||
@contextlib.contextmanager
|
||||
def no_undo(flush=False):
|
||||
"""Disable the undo queue during the context
|
||||
|
|
@ -873,11 +867,11 @@ def maintained_selection_api():
|
|||
Warning: This is *not* added to the undo stack.
|
||||
|
||||
"""
|
||||
original = om.MGlobal.getActiveSelectionList()
|
||||
original = OpenMaya.MGlobal.getActiveSelectionList()
|
||||
try:
|
||||
yield
|
||||
finally:
|
||||
om.MGlobal.setActiveSelectionList(original)
|
||||
OpenMaya.MGlobal.setActiveSelectionList(original)
|
||||
|
||||
|
||||
@contextlib.contextmanager
|
||||
|
|
@ -1287,11 +1281,11 @@ def get_id(node):
|
|||
if node is None:
|
||||
return
|
||||
|
||||
sel = om.MSelectionList()
|
||||
sel = OpenMaya.MSelectionList()
|
||||
sel.add(node)
|
||||
|
||||
api_node = sel.getDependNode(0)
|
||||
fn = om.MFnDependencyNode(api_node)
|
||||
fn = OpenMaya.MFnDependencyNode(api_node)
|
||||
|
||||
if not fn.hasAttribute("cbId"):
|
||||
return
|
||||
|
|
@ -1373,27 +1367,6 @@ def set_id(node, unique_id, overwrite=False):
|
|||
cmds.setAttr(attr, unique_id, type="string")
|
||||
|
||||
|
||||
# endregion ID
|
||||
def get_reference_node(path):
|
||||
"""
|
||||
Get the reference node when the path is found being used in a reference
|
||||
Args:
|
||||
path (str): the file path to check
|
||||
|
||||
Returns:
|
||||
node (str): name of the reference node in question
|
||||
"""
|
||||
try:
|
||||
node = cmds.file(path, query=True, referenceNode=True)
|
||||
except RuntimeError:
|
||||
log.debug('File is not referenced : "{}"'.format(path))
|
||||
return
|
||||
|
||||
reference_path = cmds.referenceQuery(path, filename=True)
|
||||
if os.path.normpath(path) == os.path.normpath(reference_path):
|
||||
return node
|
||||
|
||||
|
||||
def set_attribute(attribute, value, node):
|
||||
"""Adjust attributes based on the value from the attribute data
|
||||
|
||||
|
|
@ -1995,8 +1968,6 @@ def get_id_from_sibling(node, history_only=True):
|
|||
return first_id
|
||||
|
||||
|
||||
|
||||
# Project settings
|
||||
def set_scene_fps(fps, update=True):
|
||||
"""Set FPS from project configuration
|
||||
|
||||
|
|
@ -2009,30 +1980,23 @@ def set_scene_fps(fps, update=True):
|
|||
|
||||
"""
|
||||
|
||||
fps_mapping = {'15': 'game',
|
||||
'24': 'film',
|
||||
'25': 'pal',
|
||||
'30': 'ntsc',
|
||||
'48': 'show',
|
||||
'50': 'palf',
|
||||
'60': 'ntscf',
|
||||
'23.98': '23.976fps',
|
||||
'23.976': '23.976fps',
|
||||
'29.97': '29.97fps',
|
||||
'47.952': '47.952fps',
|
||||
'47.95': '47.952fps',
|
||||
'59.94': '59.94fps',
|
||||
'44100': '44100fps',
|
||||
'48000': '48000fps'}
|
||||
fps_mapping = {
|
||||
'15': 'game',
|
||||
'24': 'film',
|
||||
'25': 'pal',
|
||||
'30': 'ntsc',
|
||||
'48': 'show',
|
||||
'50': 'palf',
|
||||
'60': 'ntscf',
|
||||
'23.976023976023978': '23.976fps',
|
||||
'29.97002997002997': '29.97fps',
|
||||
'47.952047952047955': '47.952fps',
|
||||
'59.94005994005994': '59.94fps',
|
||||
'44100': '44100fps',
|
||||
'48000': '48000fps'
|
||||
}
|
||||
|
||||
# pull from mapping
|
||||
# this should convert float string to float and int to int
|
||||
# so 25.0 is converted to 25, but 23.98 will be still float.
|
||||
dec, ipart = math.modf(fps)
|
||||
if dec == 0.0:
|
||||
fps = int(ipart)
|
||||
|
||||
unit = fps_mapping.get(str(fps), None)
|
||||
unit = fps_mapping.get(str(convert_to_maya_fps(fps)), None)
|
||||
if unit is None:
|
||||
raise ValueError("Unsupported FPS value: `%s`" % fps)
|
||||
|
||||
|
|
@ -2099,6 +2063,67 @@ def set_scene_resolution(width, height, pixelAspect):
|
|||
cmds.setAttr("%s.pixelAspect" % control_node, pixelAspect)
|
||||
|
||||
|
||||
def get_frame_range():
|
||||
"""Get the current assets frame range and handles."""
|
||||
|
||||
# Set frame start/end
|
||||
project_name = legacy_io.active_project()
|
||||
asset_name = legacy_io.Session["AVALON_ASSET"]
|
||||
asset = get_asset_by_name(project_name, asset_name)
|
||||
|
||||
frame_start = asset["data"].get("frameStart")
|
||||
frame_end = asset["data"].get("frameEnd")
|
||||
# Backwards compatibility
|
||||
if frame_start is None or frame_end is None:
|
||||
frame_start = asset["data"].get("edit_in")
|
||||
frame_end = asset["data"].get("edit_out")
|
||||
|
||||
if frame_start is None or frame_end is None:
|
||||
cmds.warning("No edit information found for %s" % asset_name)
|
||||
return
|
||||
|
||||
handles = asset["data"].get("handles") or 0
|
||||
handle_start = asset["data"].get("handleStart")
|
||||
if handle_start is None:
|
||||
handle_start = handles
|
||||
|
||||
handle_end = asset["data"].get("handleEnd")
|
||||
if handle_end is None:
|
||||
handle_end = handles
|
||||
|
||||
return {
|
||||
"frameStart": frame_start,
|
||||
"frameEnd": frame_end,
|
||||
"handleStart": handle_start,
|
||||
"handleEnd": handle_end
|
||||
}
|
||||
|
||||
|
||||
def reset_frame_range():
|
||||
"""Set frame range to current asset"""
|
||||
|
||||
fps = convert_to_maya_fps(
|
||||
float(legacy_io.Session.get("AVALON_FPS", 25))
|
||||
)
|
||||
set_scene_fps(fps)
|
||||
|
||||
frame_range = get_frame_range()
|
||||
|
||||
frame_start = frame_range["frameStart"] - int(frame_range["handleStart"])
|
||||
frame_end = frame_range["frameEnd"] + int(frame_range["handleEnd"])
|
||||
|
||||
cmds.playbackOptions(minTime=frame_start)
|
||||
cmds.playbackOptions(maxTime=frame_end)
|
||||
cmds.playbackOptions(animationStartTime=frame_start)
|
||||
cmds.playbackOptions(animationEndTime=frame_end)
|
||||
cmds.playbackOptions(minTime=frame_start)
|
||||
cmds.playbackOptions(maxTime=frame_end)
|
||||
cmds.currentTime(frame_start)
|
||||
|
||||
cmds.setAttr("defaultRenderGlobals.startFrame", frame_start)
|
||||
cmds.setAttr("defaultRenderGlobals.endFrame", frame_end)
|
||||
|
||||
|
||||
def reset_scene_resolution():
|
||||
"""Apply the scene resolution from the project definition
|
||||
|
||||
|
|
@ -2150,7 +2175,9 @@ def set_context_settings():
|
|||
asset_data = asset_doc.get("data", {})
|
||||
|
||||
# Set project fps
|
||||
fps = asset_data.get("fps", project_data.get("fps", 25))
|
||||
fps = convert_to_maya_fps(
|
||||
asset_data.get("fps", project_data.get("fps", 25))
|
||||
)
|
||||
legacy_io.Session["AVALON_FPS"] = str(fps)
|
||||
set_scene_fps(fps)
|
||||
|
||||
|
|
@ -2172,15 +2199,12 @@ def validate_fps():
|
|||
|
||||
"""
|
||||
|
||||
fps = get_current_project_asset(fields=["data.fps"])["data"]["fps"]
|
||||
# TODO(antirotor): This is hack as for framerates having multiple
|
||||
# decimal places. FTrack is ceiling decimal values on
|
||||
# fps to two decimal places but Maya 2019+ is reporting those fps
|
||||
# with much higher resolution. As we currently cannot fix Ftrack
|
||||
# rounding, we have to round those numbers coming from Maya.
|
||||
current_fps = float_round(mel.eval('currentTimeUnitToFPS()'), 2)
|
||||
expected_fps = convert_to_maya_fps(
|
||||
get_current_project_asset(fields=["data.fps"])["data"]["fps"]
|
||||
)
|
||||
current_fps = mel.eval('currentTimeUnitToFPS()')
|
||||
|
||||
fps_match = current_fps == fps
|
||||
fps_match = current_fps == expected_fps
|
||||
if not fps_match and not IS_HEADLESS:
|
||||
from openpype.widgets import popup
|
||||
|
||||
|
|
@ -2189,14 +2213,19 @@ def validate_fps():
|
|||
dialog = popup.PopupUpdateKeys(parent=parent)
|
||||
dialog.setModal(True)
|
||||
dialog.setWindowTitle("Maya scene does not match project FPS")
|
||||
dialog.setMessage("Scene %i FPS does not match project %i FPS" %
|
||||
(current_fps, fps))
|
||||
dialog.setMessage(
|
||||
"Scene {} FPS does not match project {} FPS".format(
|
||||
current_fps, expected_fps
|
||||
)
|
||||
)
|
||||
dialog.setButtonText("Fix")
|
||||
|
||||
# Set new text for button (add optional argument for the popup?)
|
||||
toggle = dialog.widgets["toggle"]
|
||||
update = toggle.isChecked()
|
||||
dialog.on_clicked_state.connect(lambda: set_scene_fps(fps, update))
|
||||
dialog.on_clicked_state.connect(
|
||||
lambda: set_scene_fps(expected_fps, update)
|
||||
)
|
||||
|
||||
dialog.show()
|
||||
|
||||
|
|
@ -3324,15 +3353,15 @@ def iter_visible_nodes_in_range(nodes, start, end):
|
|||
@memodict
|
||||
def get_visibility_mplug(node):
|
||||
"""Return api 2.0 MPlug with cached memoize decorator"""
|
||||
sel = om.MSelectionList()
|
||||
sel = OpenMaya.MSelectionList()
|
||||
sel.add(node)
|
||||
dag = sel.getDagPath(0)
|
||||
return om.MFnDagNode(dag).findPlug("visibility", True)
|
||||
return OpenMaya.MFnDagNode(dag).findPlug("visibility", True)
|
||||
|
||||
@contextlib.contextmanager
|
||||
def dgcontext(mtime):
|
||||
"""MDGContext context manager"""
|
||||
context = om.MDGContext(mtime)
|
||||
context = OpenMaya.MDGContext(mtime)
|
||||
try:
|
||||
previous = context.makeCurrent()
|
||||
yield context
|
||||
|
|
@ -3341,9 +3370,9 @@ def iter_visible_nodes_in_range(nodes, start, end):
|
|||
|
||||
# We skip the first frame as we already used that frame to check for
|
||||
# overall visibilities. And end+1 to include the end frame.
|
||||
scene_units = om.MTime.uiUnit()
|
||||
scene_units = OpenMaya.MTime.uiUnit()
|
||||
for frame in range(start + 1, end + 1):
|
||||
mtime = om.MTime(frame, unit=scene_units)
|
||||
mtime = OpenMaya.MTime(frame, unit=scene_units)
|
||||
|
||||
# Build little cache so we don't query the same MPlug's value
|
||||
# again if it was checked on this frame and also is a dependency
|
||||
|
|
@ -3379,3 +3408,200 @@ def iter_visible_nodes_in_range(nodes, start, end):
|
|||
def get_attribute_input(attr):
|
||||
connections = cmds.listConnections(attr, plugs=True, destination=False)
|
||||
return connections[0] if connections else None
|
||||
|
||||
|
||||
def convert_to_maya_fps(fps):
|
||||
"""Convert any fps to supported Maya framerates."""
|
||||
float_framerates = [
|
||||
23.976023976023978,
|
||||
# WTF is 29.97 df vs fps?
|
||||
29.97002997002997,
|
||||
47.952047952047955,
|
||||
59.94005994005994
|
||||
]
|
||||
# 44100 fps evaluates as 41000.0. Why? Omitting for now.
|
||||
int_framerates = [
|
||||
2,
|
||||
3,
|
||||
4,
|
||||
5,
|
||||
6,
|
||||
8,
|
||||
10,
|
||||
12,
|
||||
15,
|
||||
16,
|
||||
20,
|
||||
24,
|
||||
25,
|
||||
30,
|
||||
40,
|
||||
48,
|
||||
50,
|
||||
60,
|
||||
75,
|
||||
80,
|
||||
90,
|
||||
100,
|
||||
120,
|
||||
125,
|
||||
150,
|
||||
200,
|
||||
240,
|
||||
250,
|
||||
300,
|
||||
375,
|
||||
400,
|
||||
500,
|
||||
600,
|
||||
750,
|
||||
1200,
|
||||
1500,
|
||||
2000,
|
||||
3000,
|
||||
6000,
|
||||
48000
|
||||
]
|
||||
|
||||
# If input fps is a whole number we'll return.
|
||||
if float(fps).is_integer():
|
||||
# Validate fps is part of Maya's fps selection.
|
||||
if int(fps) not in int_framerates:
|
||||
raise ValueError(
|
||||
"Framerate \"{}\" is not supported in Maya".format(fps)
|
||||
)
|
||||
return int(fps)
|
||||
else:
|
||||
# Differences to supported float frame rates.
|
||||
differences = []
|
||||
for i in float_framerates:
|
||||
differences.append(abs(i - fps))
|
||||
|
||||
# Validate difference does not stray too far from supported framerates.
|
||||
min_difference = min(differences)
|
||||
min_index = differences.index(min_difference)
|
||||
supported_framerate = float_framerates[min_index]
|
||||
if min_difference > 0.1:
|
||||
raise ValueError(
|
||||
"Framerate \"{}\" strays too far from any supported framerate"
|
||||
" in Maya. Closest supported framerate is \"{}\"".format(
|
||||
fps, supported_framerate
|
||||
)
|
||||
)
|
||||
|
||||
return supported_framerate
|
||||
|
||||
|
||||
def write_xgen_file(data, filepath):
|
||||
"""Overwrites data in .xgen files.
|
||||
|
||||
Quite naive approach to mainly overwrite "xgDataPath" and "xgProjectPath".
|
||||
|
||||
Args:
|
||||
data (dict): Dictionary of key, value. Key matches with xgen file.
|
||||
For example:
|
||||
{"xgDataPath": "some/path"}
|
||||
filepath (string): Absolute path of .xgen file.
|
||||
"""
|
||||
# Generate regex lookup for line to key basically
|
||||
# match any of the keys in `\t{key}\t\t`
|
||||
keys = "|".join(re.escape(key) for key in data.keys())
|
||||
re_keys = re.compile("^\t({})\t\t".format(keys))
|
||||
|
||||
lines = []
|
||||
with open(filepath, "r") as f:
|
||||
for line in f:
|
||||
match = re_keys.match(line)
|
||||
if match:
|
||||
key = match.group(1)
|
||||
value = data[key]
|
||||
line = "\t{}\t\t{}\n".format(key, value)
|
||||
|
||||
lines.append(line)
|
||||
|
||||
with open(filepath, "w") as f:
|
||||
f.writelines(lines)
|
||||
|
||||
|
||||
def get_color_management_preferences():
|
||||
"""Get and resolve OCIO preferences."""
|
||||
data = {
|
||||
# Is color management enabled.
|
||||
"enabled": cmds.colorManagementPrefs(
|
||||
query=True, cmEnabled=True
|
||||
),
|
||||
"rendering_space": cmds.colorManagementPrefs(
|
||||
query=True, renderingSpaceName=True
|
||||
),
|
||||
"output_transform": cmds.colorManagementPrefs(
|
||||
query=True, outputTransformName=True
|
||||
),
|
||||
"output_transform_enabled": cmds.colorManagementPrefs(
|
||||
query=True, outputTransformEnabled=True
|
||||
),
|
||||
"view_transform": cmds.colorManagementPrefs(
|
||||
query=True, viewTransformName=True
|
||||
)
|
||||
}
|
||||
|
||||
# Split view and display from view_transform. view_transform comes in
|
||||
# format of "{view} ({display})".
|
||||
regex = re.compile(r"^(?P<view>.+) \((?P<display>.+)\)$")
|
||||
match = regex.match(data["view_transform"])
|
||||
data.update({
|
||||
"display": match.group("display"),
|
||||
"view": match.group("view")
|
||||
})
|
||||
|
||||
# Get config absolute path.
|
||||
path = cmds.colorManagementPrefs(
|
||||
query=True, configFilePath=True
|
||||
)
|
||||
|
||||
# The OCIO config supports a custom <MAYA_RESOURCES> token.
|
||||
maya_resources_token = "<MAYA_RESOURCES>"
|
||||
maya_resources_path = OpenMaya.MGlobal.getAbsolutePathToResources()
|
||||
path = path.replace(maya_resources_token, maya_resources_path)
|
||||
|
||||
data["config"] = path
|
||||
|
||||
return data
|
||||
|
||||
|
||||
def get_color_management_output_transform():
|
||||
preferences = get_color_management_preferences()
|
||||
colorspace = preferences["rendering_space"]
|
||||
if preferences["output_transform_enabled"]:
|
||||
colorspace = preferences["output_transform"]
|
||||
return colorspace
|
||||
|
||||
|
||||
def len_flattened(components):
|
||||
"""Return the length of the list as if it was flattened.
|
||||
|
||||
Maya will return consecutive components as a single entry
|
||||
when requesting with `maya.cmds.ls` without the `flatten`
|
||||
flag. Though enabling `flatten` on a large list (e.g. millions)
|
||||
will result in a slow result. This command will return the amount
|
||||
of entries in a non-flattened list by parsing the result with
|
||||
regex.
|
||||
|
||||
Args:
|
||||
components (list): The non-flattened components.
|
||||
|
||||
Returns:
|
||||
int: The amount of entries.
|
||||
|
||||
"""
|
||||
assert isinstance(components, (list, tuple))
|
||||
n = 0
|
||||
|
||||
pattern = re.compile(r"\[(\d+):(\d+)\]")
|
||||
for c in components:
|
||||
match = pattern.search(c)
|
||||
if match:
|
||||
start, end = match.groups()
|
||||
n += int(end) - int(start) + 1
|
||||
else:
|
||||
n += 1
|
||||
return n
|
||||
|
|
|
|||
|
|
@ -46,6 +46,7 @@ import attr
|
|||
|
||||
from . import lib
|
||||
from . import lib_rendersetup
|
||||
from openpype.pipeline.colorspace import get_ocio_config_views
|
||||
|
||||
from maya import cmds, mel
|
||||
|
||||
|
|
@ -127,6 +128,7 @@ class RenderProduct(object):
|
|||
"""
|
||||
productName = attr.ib()
|
||||
ext = attr.ib() # extension
|
||||
colorspace = attr.ib() # colorspace
|
||||
aov = attr.ib(default=None) # source aov
|
||||
driver = attr.ib(default=None) # source driver
|
||||
multipart = attr.ib(default=False) # multichannel file
|
||||
|
|
@ -196,12 +198,18 @@ class ARenderProducts:
|
|||
"""Constructor."""
|
||||
self.layer = layer
|
||||
self.render_instance = render_instance
|
||||
self.multipart = False
|
||||
self.multipart = self.get_multipart()
|
||||
|
||||
# Initialize
|
||||
self.layer_data = self._get_layer_data()
|
||||
self.layer_data.products = self.get_render_products()
|
||||
|
||||
def get_multipart(self):
|
||||
raise NotImplementedError(
|
||||
"The render product implementation does not have a "
|
||||
"\"get_multipart\" method."
|
||||
)
|
||||
|
||||
def has_camera_token(self):
|
||||
# type: () -> bool
|
||||
"""Check if camera token is in image prefix.
|
||||
|
|
@ -344,7 +352,6 @@ class ARenderProducts:
|
|||
separator = file_prefix[matches[0].end(1):matches[1].start(1)]
|
||||
return separator
|
||||
|
||||
|
||||
def _get_layer_data(self):
|
||||
# type: () -> LayerMetadata
|
||||
# ______________________________________________
|
||||
|
|
@ -531,16 +538,20 @@ class RenderProductsArnold(ARenderProducts):
|
|||
|
||||
return prefix
|
||||
|
||||
def _get_aov_render_products(self, aov, cameras=None):
|
||||
"""Return all render products for the AOV"""
|
||||
|
||||
products = []
|
||||
aov_name = self._get_attr(aov, "name")
|
||||
def get_multipart(self):
|
||||
multipart = False
|
||||
multilayer = bool(self._get_attr("defaultArnoldDriver.multipart"))
|
||||
merge_AOVs = bool(self._get_attr("defaultArnoldDriver.mergeAOVs"))
|
||||
if multilayer or merge_AOVs:
|
||||
multipart = True
|
||||
|
||||
return multipart
|
||||
|
||||
def _get_aov_render_products(self, aov, cameras=None):
|
||||
"""Return all render products for the AOV"""
|
||||
|
||||
products = []
|
||||
aov_name = self._get_attr(aov, "name")
|
||||
ai_drivers = cmds.listConnections("{}.outputs".format(aov),
|
||||
source=True,
|
||||
destination=False,
|
||||
|
|
@ -553,6 +564,9 @@ class RenderProductsArnold(ARenderProducts):
|
|||
]
|
||||
|
||||
for ai_driver in ai_drivers:
|
||||
colorspace = self._get_colorspace(
|
||||
ai_driver + ".colorManagement"
|
||||
)
|
||||
# todo: check aiAOVDriver.prefix as it could have
|
||||
# a custom path prefix set for this driver
|
||||
|
||||
|
|
@ -590,12 +604,15 @@ class RenderProductsArnold(ARenderProducts):
|
|||
global_aov = self._get_attr(aov, "globalAov")
|
||||
if global_aov:
|
||||
for camera in cameras:
|
||||
product = RenderProduct(productName=name,
|
||||
ext=ext,
|
||||
aov=aov_name,
|
||||
driver=ai_driver,
|
||||
multipart=multipart,
|
||||
camera=camera)
|
||||
product = RenderProduct(
|
||||
productName=name,
|
||||
ext=ext,
|
||||
aov=aov_name,
|
||||
driver=ai_driver,
|
||||
multipart=self.multipart,
|
||||
camera=camera,
|
||||
colorspace=colorspace
|
||||
)
|
||||
products.append(product)
|
||||
|
||||
all_light_groups = self._get_attr(aov, "lightGroups")
|
||||
|
|
@ -603,13 +620,16 @@ class RenderProductsArnold(ARenderProducts):
|
|||
# All light groups is enabled. A single multipart
|
||||
# Render Product
|
||||
for camera in cameras:
|
||||
product = RenderProduct(productName=name + "_lgroups",
|
||||
ext=ext,
|
||||
aov=aov_name,
|
||||
driver=ai_driver,
|
||||
# Always multichannel output
|
||||
multipart=True,
|
||||
camera=camera)
|
||||
product = RenderProduct(
|
||||
productName=name + "_lgroups",
|
||||
ext=ext,
|
||||
aov=aov_name,
|
||||
driver=ai_driver,
|
||||
# Always multichannel output
|
||||
multipart=True,
|
||||
camera=camera,
|
||||
colorspace=colorspace
|
||||
)
|
||||
products.append(product)
|
||||
else:
|
||||
value = self._get_attr(aov, "lightGroupsList")
|
||||
|
|
@ -625,12 +645,36 @@ class RenderProductsArnold(ARenderProducts):
|
|||
aov=aov_name,
|
||||
driver=ai_driver,
|
||||
ext=ext,
|
||||
camera=camera
|
||||
camera=camera,
|
||||
colorspace=colorspace
|
||||
)
|
||||
products.append(product)
|
||||
|
||||
return products
|
||||
|
||||
def _get_colorspace(self, attribute):
|
||||
"""Resolve colorspace from Arnold settings."""
|
||||
|
||||
def _view_transform():
|
||||
preferences = lib.get_color_management_preferences()
|
||||
views_data = get_ocio_config_views(preferences["config"])
|
||||
view_data = views_data[
|
||||
"{}/{}".format(preferences["display"], preferences["view"])
|
||||
]
|
||||
return view_data["colorspace"]
|
||||
|
||||
def _raw():
|
||||
preferences = lib.get_color_management_preferences()
|
||||
return preferences["rendering_space"]
|
||||
|
||||
resolved_values = {
|
||||
"Raw": _raw,
|
||||
"Use View Transform": _view_transform,
|
||||
# Default. Same as Maya Preferences.
|
||||
"Use Output Transform": lib.get_color_management_output_transform
|
||||
}
|
||||
return resolved_values[self._get_attr(attribute)]()
|
||||
|
||||
def get_render_products(self):
|
||||
"""Get all AOVs.
|
||||
|
||||
|
|
@ -659,11 +703,19 @@ class RenderProductsArnold(ARenderProducts):
|
|||
]
|
||||
|
||||
default_ext = self._get_attr("defaultRenderGlobals.imfPluginKey")
|
||||
beauty_products = [RenderProduct(
|
||||
productName="beauty",
|
||||
ext=default_ext,
|
||||
driver="defaultArnoldDriver",
|
||||
camera=camera) for camera in cameras]
|
||||
colorspace = self._get_colorspace(
|
||||
"defaultArnoldDriver.colorManagement"
|
||||
)
|
||||
beauty_products = [
|
||||
RenderProduct(
|
||||
productName="beauty",
|
||||
ext=default_ext,
|
||||
driver="defaultArnoldDriver",
|
||||
camera=camera,
|
||||
colorspace=colorspace
|
||||
) for camera in cameras
|
||||
]
|
||||
|
||||
# AOVs > Legacy > Maya Render View > Mode
|
||||
aovs_enabled = bool(
|
||||
self._get_attr("defaultArnoldRenderOptions.aovMode")
|
||||
|
|
@ -731,6 +783,14 @@ class RenderProductsVray(ARenderProducts):
|
|||
|
||||
renderer = "vray"
|
||||
|
||||
def get_multipart(self):
|
||||
multipart = False
|
||||
image_format = self._get_attr("vraySettings.imageFormatStr")
|
||||
if image_format == "exr (multichannel)":
|
||||
multipart = True
|
||||
|
||||
return multipart
|
||||
|
||||
def get_renderer_prefix(self):
|
||||
# type: () -> str
|
||||
"""Get image prefix for V-Ray.
|
||||
|
|
@ -804,23 +864,29 @@ class RenderProductsVray(ARenderProducts):
|
|||
if not dont_save_rgb:
|
||||
for camera in cameras:
|
||||
products.append(
|
||||
RenderProduct(productName="",
|
||||
ext=default_ext,
|
||||
camera=camera))
|
||||
RenderProduct(
|
||||
productName="",
|
||||
ext=default_ext,
|
||||
camera=camera,
|
||||
colorspace=lib.get_color_management_output_transform(),
|
||||
multipart=self.multipart
|
||||
)
|
||||
)
|
||||
|
||||
# separate alpha file
|
||||
separate_alpha = self._get_attr("vraySettings.separateAlpha")
|
||||
if separate_alpha:
|
||||
for camera in cameras:
|
||||
products.append(
|
||||
RenderProduct(productName="Alpha",
|
||||
ext=default_ext,
|
||||
camera=camera)
|
||||
RenderProduct(
|
||||
productName="Alpha",
|
||||
ext=default_ext,
|
||||
camera=camera,
|
||||
multipart=self.multipart
|
||||
)
|
||||
)
|
||||
|
||||
if image_format_str == "exr (multichannel)":
|
||||
if self.multipart:
|
||||
# AOVs are merged in m-channel file, only main layer is rendered
|
||||
self.multipart = True
|
||||
return products
|
||||
|
||||
# handle aovs from references
|
||||
|
|
@ -858,10 +924,13 @@ class RenderProductsVray(ARenderProducts):
|
|||
|
||||
aov_name = self._get_vray_aov_name(aov)
|
||||
for camera in cameras:
|
||||
product = RenderProduct(productName=aov_name,
|
||||
ext=default_ext,
|
||||
aov=aov,
|
||||
camera=camera)
|
||||
product = RenderProduct(
|
||||
productName=aov_name,
|
||||
ext=default_ext,
|
||||
aov=aov,
|
||||
camera=camera,
|
||||
colorspace=lib.get_color_management_output_transform()
|
||||
)
|
||||
products.append(product)
|
||||
|
||||
return products
|
||||
|
|
@ -979,6 +1048,34 @@ class RenderProductsRedshift(ARenderProducts):
|
|||
renderer = "redshift"
|
||||
unmerged_aovs = {"Cryptomatte"}
|
||||
|
||||
def get_files(self, product):
|
||||
# When outputting AOVs we need to replace Redshift specific AOV tokens
|
||||
# with Maya render tokens for generating file sequences. We validate to
|
||||
# a specific AOV fileprefix so we only need to accout for one
|
||||
# replacement.
|
||||
if not product.multipart and product.driver:
|
||||
file_prefix = self._get_attr(product.driver + ".filePrefix")
|
||||
self.layer_data.filePrefix = file_prefix.replace(
|
||||
"<BeautyPath>/<BeautyFile>",
|
||||
"<Scene>/<RenderLayer>/<RenderLayer>"
|
||||
)
|
||||
|
||||
return super(RenderProductsRedshift, self).get_files(product)
|
||||
|
||||
def get_multipart(self):
|
||||
# For Redshift we don't directly return upon forcing multilayer
|
||||
# due to some AOVs still being written into separate files,
|
||||
# like Cryptomatte.
|
||||
# AOVs are merged in multi-channel file
|
||||
multipart = False
|
||||
force_layer = bool(
|
||||
self._get_attr("redshiftOptions.exrForceMultilayer")
|
||||
)
|
||||
if force_layer:
|
||||
multipart = True
|
||||
|
||||
return multipart
|
||||
|
||||
def get_renderer_prefix(self):
|
||||
"""Get image prefix for Redshift.
|
||||
|
||||
|
|
@ -1018,16 +1115,6 @@ class RenderProductsRedshift(ARenderProducts):
|
|||
for c in self.get_renderable_cameras()
|
||||
]
|
||||
|
||||
# For Redshift we don't directly return upon forcing multilayer
|
||||
# due to some AOVs still being written into separate files,
|
||||
# like Cryptomatte.
|
||||
# AOVs are merged in multi-channel file
|
||||
multipart = False
|
||||
force_layer = bool(self._get_attr("redshiftOptions.exrForceMultilayer")) # noqa
|
||||
exMultipart = bool(self._get_attr("redshiftOptions.exrMultipart"))
|
||||
if exMultipart or force_layer:
|
||||
multipart = True
|
||||
|
||||
# Get Redshift Extension from image format
|
||||
image_format = self._get_attr("redshiftOptions.imageFormat") # integer
|
||||
ext = mel.eval("redshiftGetImageExtension(%i)" % image_format)
|
||||
|
|
@ -1049,7 +1136,7 @@ class RenderProductsRedshift(ARenderProducts):
|
|||
continue
|
||||
|
||||
aov_type = self._get_attr(aov, "aovType")
|
||||
if multipart and aov_type not in self.unmerged_aovs:
|
||||
if self.multipart and aov_type not in self.unmerged_aovs:
|
||||
continue
|
||||
|
||||
# Any AOVs that still get processed, like Cryptomatte
|
||||
|
|
@ -1084,8 +1171,9 @@ class RenderProductsRedshift(ARenderProducts):
|
|||
productName=aov_light_group_name,
|
||||
aov=aov_name,
|
||||
ext=ext,
|
||||
multipart=multipart,
|
||||
camera=camera)
|
||||
multipart=False,
|
||||
camera=camera,
|
||||
driver=aov)
|
||||
products.append(product)
|
||||
|
||||
if light_groups:
|
||||
|
|
@ -1098,8 +1186,9 @@ class RenderProductsRedshift(ARenderProducts):
|
|||
product = RenderProduct(productName=aov_name,
|
||||
aov=aov_name,
|
||||
ext=ext,
|
||||
multipart=multipart,
|
||||
camera=camera)
|
||||
multipart=False,
|
||||
camera=camera,
|
||||
driver=aov)
|
||||
products.append(product)
|
||||
|
||||
# When a Beauty AOV is added manually, it will be rendered as
|
||||
|
|
@ -1114,7 +1203,7 @@ class RenderProductsRedshift(ARenderProducts):
|
|||
products.insert(0,
|
||||
RenderProduct(productName=beauty_name,
|
||||
ext=ext,
|
||||
multipart=multipart,
|
||||
multipart=self.multipart,
|
||||
camera=camera))
|
||||
|
||||
return products
|
||||
|
|
@ -1132,6 +1221,11 @@ class RenderProductsRenderman(ARenderProducts):
|
|||
"""
|
||||
|
||||
renderer = "renderman"
|
||||
unmerged_aovs = {"PxrCryptomatte"}
|
||||
|
||||
def get_multipart(self):
|
||||
# Implemented as display specific in "get_render_products".
|
||||
return False
|
||||
|
||||
def get_render_products(self):
|
||||
"""Get all AOVs.
|
||||
|
|
@ -1181,6 +1275,17 @@ class RenderProductsRenderman(ARenderProducts):
|
|||
if not display_types.get(display["driverNode"]["type"]):
|
||||
continue
|
||||
|
||||
has_cryptomatte = cmds.ls(type=self.unmerged_aovs)
|
||||
matte_enabled = False
|
||||
if has_cryptomatte:
|
||||
for cryptomatte in has_cryptomatte:
|
||||
cryptomatte_aov = cryptomatte
|
||||
matte_name = "cryptomatte"
|
||||
rman_globals = cmds.listConnections(cryptomatte +
|
||||
".message")
|
||||
if rman_globals:
|
||||
matte_enabled = True
|
||||
|
||||
aov_name = name
|
||||
if aov_name == "rmanDefaultDisplay":
|
||||
aov_name = "beauty"
|
||||
|
|
@ -1199,6 +1304,15 @@ class RenderProductsRenderman(ARenderProducts):
|
|||
camera=camera,
|
||||
multipart=True
|
||||
)
|
||||
|
||||
if has_cryptomatte and matte_enabled:
|
||||
cryptomatte = RenderProduct(
|
||||
productName=matte_name,
|
||||
aov=cryptomatte_aov,
|
||||
ext=extensions,
|
||||
camera=camera,
|
||||
multipart=True
|
||||
)
|
||||
else:
|
||||
# this code should handle the case where no multipart
|
||||
# capable format is selected. But since it involves
|
||||
|
|
@ -1218,6 +1332,9 @@ class RenderProductsRenderman(ARenderProducts):
|
|||
|
||||
products.append(product)
|
||||
|
||||
if has_cryptomatte and matte_enabled:
|
||||
products.append(cryptomatte)
|
||||
|
||||
return products
|
||||
|
||||
def get_files(self, product):
|
||||
|
|
@ -1249,6 +1366,10 @@ class RenderProductsMayaHardware(ARenderProducts):
|
|||
{"label": "EXR(exr)", "index": 40, "extension": "exr"}
|
||||
]
|
||||
|
||||
def get_multipart(self):
|
||||
# MayaHardware does not support multipart EXRs.
|
||||
return False
|
||||
|
||||
def _get_extension(self, value):
|
||||
result = None
|
||||
if isinstance(value, int):
|
||||
|
|
@ -1293,7 +1414,12 @@ class RenderProductsMayaHardware(ARenderProducts):
|
|||
|
||||
products = []
|
||||
for cam in self.get_renderable_cameras():
|
||||
product = RenderProduct(productName="beauty", ext=ext, camera=cam)
|
||||
product = RenderProduct(
|
||||
productName="beauty",
|
||||
ext=ext,
|
||||
camera=cam,
|
||||
colorspace=lib.get_color_management_output_transform()
|
||||
)
|
||||
products.append(product)
|
||||
|
||||
return products
|
||||
|
|
|
|||
|
|
@ -14,7 +14,7 @@ from openpype.settings import (
|
|||
from openpype.pipeline import legacy_io
|
||||
from openpype.pipeline import CreatorError
|
||||
from openpype.pipeline.context_tools import get_current_project_asset
|
||||
from openpype.hosts.maya.api.commands import reset_frame_range
|
||||
from openpype.hosts.maya.api.lib import reset_frame_range
|
||||
|
||||
|
||||
class RenderSettings(object):
|
||||
|
|
@ -22,17 +22,26 @@ class RenderSettings(object):
|
|||
_image_prefix_nodes = {
|
||||
'vray': 'vraySettings.fileNamePrefix',
|
||||
'arnold': 'defaultRenderGlobals.imageFilePrefix',
|
||||
'renderman': 'defaultRenderGlobals.imageFilePrefix',
|
||||
'redshift': 'defaultRenderGlobals.imageFilePrefix'
|
||||
'renderman': 'rmanGlobals.imageFileFormat',
|
||||
'redshift': 'defaultRenderGlobals.imageFilePrefix',
|
||||
'mayahardware2': 'defaultRenderGlobals.imageFilePrefix'
|
||||
}
|
||||
|
||||
_image_prefixes = {
|
||||
'vray': get_current_project_settings()["maya"]["RenderSettings"]["vray_renderer"]["image_prefix"], # noqa
|
||||
'arnold': get_current_project_settings()["maya"]["RenderSettings"]["arnold_renderer"]["image_prefix"], # noqa
|
||||
'renderman': '<Scene>/<layer>/<layer>{aov_separator}<aov>',
|
||||
'renderman': get_current_project_settings()["maya"]["RenderSettings"]["renderman_renderer"]["image_prefix"], # noqa
|
||||
'redshift': get_current_project_settings()["maya"]["RenderSettings"]["redshift_renderer"]["image_prefix"] # noqa
|
||||
}
|
||||
|
||||
# Renderman only
|
||||
_image_dir = {
|
||||
'renderman': get_current_project_settings()["maya"]["RenderSettings"]["renderman_renderer"]["image_dir"], # noqa
|
||||
'cryptomatte': get_current_project_settings()["maya"]["RenderSettings"]["renderman_renderer"]["cryptomatte_dir"], # noqa
|
||||
'imageDisplay': get_current_project_settings()["maya"]["RenderSettings"]["renderman_renderer"]["imageDisplay_dir"], # noqa
|
||||
"watermark": get_current_project_settings()["maya"]["RenderSettings"]["renderman_renderer"]["watermark_dir"] # noqa
|
||||
}
|
||||
|
||||
_aov_chars = {
|
||||
"dot": ".",
|
||||
"dash": "-",
|
||||
|
|
@ -81,7 +90,6 @@ class RenderSettings(object):
|
|||
prefix, type="string") # noqa
|
||||
else:
|
||||
print("{0} isn't a supported renderer to autoset settings.".format(renderer)) # noqa
|
||||
|
||||
# TODO: handle not having res values in the doc
|
||||
width = asset_doc["data"].get("resolutionWidth")
|
||||
height = asset_doc["data"].get("resolutionHeight")
|
||||
|
|
@ -97,6 +105,13 @@ class RenderSettings(object):
|
|||
self._set_redshift_settings(width, height)
|
||||
mel.eval("redshiftUpdateActiveAovList")
|
||||
|
||||
if renderer == "renderman":
|
||||
image_dir = self._image_dir["renderman"]
|
||||
cmds.setAttr("rmanGlobals.imageOutputDir",
|
||||
image_dir, type="string")
|
||||
self._set_renderman_settings(width, height,
|
||||
aov_separator)
|
||||
|
||||
def _set_arnold_settings(self, width, height):
|
||||
"""Sets settings for Arnold."""
|
||||
from mtoa.core import createOptions # noqa
|
||||
|
|
@ -202,6 +217,66 @@ class RenderSettings(object):
|
|||
cmds.setAttr("defaultResolution.height", height)
|
||||
self._additional_attribs_setter(additional_options)
|
||||
|
||||
def _set_renderman_settings(self, width, height, aov_separator):
|
||||
"""Sets settings for Renderman"""
|
||||
rman_render_presets = (
|
||||
self._project_settings
|
||||
["maya"]
|
||||
["RenderSettings"]
|
||||
["renderman_renderer"]
|
||||
)
|
||||
display_filters = rman_render_presets["display_filters"]
|
||||
d_filters_number = len(display_filters)
|
||||
for i in range(d_filters_number):
|
||||
d_node = cmds.ls(typ=display_filters[i])
|
||||
if len(d_node) > 0:
|
||||
filter_nodes = d_node[0]
|
||||
else:
|
||||
filter_nodes = cmds.createNode(display_filters[i])
|
||||
|
||||
cmds.connectAttr(filter_nodes + ".message",
|
||||
"rmanGlobals.displayFilters[%i]" % i,
|
||||
force=True)
|
||||
if filter_nodes.startswith("PxrImageDisplayFilter"):
|
||||
imageDisplay_dir = self._image_dir["imageDisplay"]
|
||||
imageDisplay_dir = imageDisplay_dir.replace("{aov_separator}",
|
||||
aov_separator)
|
||||
cmds.setAttr(filter_nodes + ".filename",
|
||||
imageDisplay_dir, type="string")
|
||||
|
||||
sample_filters = rman_render_presets["sample_filters"]
|
||||
s_filters_number = len(sample_filters)
|
||||
for n in range(s_filters_number):
|
||||
s_node = cmds.ls(typ=sample_filters[n])
|
||||
if len(s_node) > 0:
|
||||
filter_nodes = s_node[0]
|
||||
else:
|
||||
filter_nodes = cmds.createNode(sample_filters[n])
|
||||
|
||||
cmds.connectAttr(filter_nodes + ".message",
|
||||
"rmanGlobals.sampleFilters[%i]" % n,
|
||||
force=True)
|
||||
|
||||
if filter_nodes.startswith("PxrCryptomatte"):
|
||||
matte_dir = self._image_dir["cryptomatte"]
|
||||
matte_dir = matte_dir.replace("{aov_separator}",
|
||||
aov_separator)
|
||||
cmds.setAttr(filter_nodes + ".filename",
|
||||
matte_dir, type="string")
|
||||
elif filter_nodes.startswith("PxrWatermarkFilter"):
|
||||
watermark_dir = self._image_dir["watermark"]
|
||||
watermark_dir = watermark_dir.replace("{aov_separator}",
|
||||
aov_separator)
|
||||
cmds.setAttr(filter_nodes + ".filename",
|
||||
watermark_dir, type="string")
|
||||
|
||||
additional_options = rman_render_presets["additional_options"]
|
||||
|
||||
self._set_global_output_settings()
|
||||
cmds.setAttr("defaultResolution.width", width)
|
||||
cmds.setAttr("defaultResolution.height", height)
|
||||
self._additional_attribs_setter(additional_options)
|
||||
|
||||
def _set_vray_settings(self, aov_separator, width, height):
|
||||
# type: (str, int, int) -> None
|
||||
"""Sets important settings for Vray."""
|
||||
|
|
|
|||
|
|
@ -12,7 +12,6 @@ from openpype.pipeline.workfile import BuildWorkfile
|
|||
from openpype.tools.utils import host_tools
|
||||
from openpype.hosts.maya.api import lib, lib_rendersettings
|
||||
from .lib import get_main_window, IS_HEADLESS
|
||||
from .commands import reset_frame_range
|
||||
|
||||
from .workfile_template_builder import (
|
||||
create_placeholder,
|
||||
|
|
@ -50,7 +49,6 @@ def install():
|
|||
parent="MayaWindow"
|
||||
)
|
||||
|
||||
renderer = cmds.getAttr('defaultRenderGlobals.currentRenderer').lower()
|
||||
# Create context menu
|
||||
context_label = "{}, {}".format(
|
||||
legacy_io.Session["AVALON_ASSET"],
|
||||
|
|
@ -114,7 +112,7 @@ def install():
|
|||
|
||||
cmds.menuItem(
|
||||
"Reset Frame Range",
|
||||
command=lambda *args: reset_frame_range()
|
||||
command=lambda *args: lib.reset_frame_range()
|
||||
)
|
||||
|
||||
cmds.menuItem(
|
||||
|
|
|
|||
|
|
@ -514,6 +514,9 @@ def check_lock_on_current_file():
|
|||
|
||||
# add the lock file when opening the file
|
||||
filepath = current_file()
|
||||
# Skip if current file is 'untitled'
|
||||
if not filepath:
|
||||
return
|
||||
|
||||
if is_workfile_locked(filepath):
|
||||
# add lockfile dialog
|
||||
|
|
@ -680,10 +683,12 @@ def before_workfile_save(event):
|
|||
|
||||
def after_workfile_save(event):
|
||||
workfile_name = event["filename"]
|
||||
if handle_workfile_locks():
|
||||
if workfile_name:
|
||||
if not is_workfile_locked(workfile_name):
|
||||
create_workfile_lock(workfile_name)
|
||||
if (
|
||||
handle_workfile_locks()
|
||||
and workfile_name
|
||||
and not is_workfile_locked(workfile_name)
|
||||
):
|
||||
create_workfile_lock(workfile_name)
|
||||
|
||||
|
||||
class MayaDirmap(HostDirmap):
|
||||
|
|
|
|||
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Add a link
Reference in a new issue