mirror of
https://github.com/ynput/ayon-core.git
synced 2025-12-25 05:14:40 +01:00
Merge branch 'develop' into bugfix/OP-2834_Fix-extract-playblast
This commit is contained in:
commit
224fbba95b
462 changed files with 20058 additions and 9972 deletions
15
.github/pull_request_template.md
vendored
15
.github/pull_request_template.md
vendored
|
|
@ -1,16 +1,9 @@
|
|||
## Brief description
|
||||
First sentence is brief description.
|
||||
|
||||
## Description
|
||||
Next paragraf is more elaborate text with more info. This will be displayed for example in collapsed form under the first sentence in a changelog.
|
||||
## Changelog Description
|
||||
Paragraphs contain detailed information on the changes made to the product or service, providing an in-depth description of the updates and enhancements. They can be used to explain the reasoning behind the changes, or to highlight the importance of the new features. Paragraphs can often include links to further information or support documentation.
|
||||
|
||||
## Additional info
|
||||
The rest will be ignored in changelog and should contain any additional
|
||||
technical information.
|
||||
|
||||
## Documentation (add _"type: documentation"_ label)
|
||||
[feature_documentation](future_url_after_it_will_be_merged)
|
||||
Paragraphs of text giving context of additional technical information or code examples.
|
||||
|
||||
## Testing notes:
|
||||
1. start with this step
|
||||
2. follow this step
|
||||
2. follow this step
|
||||
|
|
|
|||
47
.github/workflows/miletone_release_trigger.yml
vendored
Normal file
47
.github/workflows/miletone_release_trigger.yml
vendored
Normal file
|
|
@ -0,0 +1,47 @@
|
|||
name: Milestone Release [trigger]
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
milestone:
|
||||
required: true
|
||||
release-type:
|
||||
type: choice
|
||||
description: What release should be created
|
||||
options:
|
||||
- release
|
||||
- pre-release
|
||||
milestone:
|
||||
types: closed
|
||||
|
||||
|
||||
jobs:
|
||||
milestone-title:
|
||||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
milestone: ${{ steps.milestoneTitle.outputs.value }}
|
||||
steps:
|
||||
- name: Switch input milestone
|
||||
uses: haya14busa/action-cond@v1
|
||||
id: milestoneTitle
|
||||
with:
|
||||
cond: ${{ inputs.milestone == '' }}
|
||||
if_true: ${{ github.event.milestone.title }}
|
||||
if_false: ${{ inputs.milestone }}
|
||||
- name: Print resulted milestone
|
||||
run: |
|
||||
echo "${{ steps.milestoneTitle.outputs.value }}"
|
||||
|
||||
call-ci-tools-milestone-release:
|
||||
needs: milestone-title
|
||||
uses: ynput/ci-tools/.github/workflows/milestone_release_ref.yml@main
|
||||
with:
|
||||
milestone: ${{ needs.milestone-title.outputs.milestone }}
|
||||
repo-owner: ${{ github.event.repository.owner.login }}
|
||||
repo-name: ${{ github.event.repository.name }}
|
||||
version-py-path: "./openpype/version.py"
|
||||
pyproject-path: "./pyproject.toml"
|
||||
secrets:
|
||||
token: ${{ secrets.YNPUT_BOT_TOKEN }}
|
||||
user_email: ${{ secrets.CI_EMAIL }}
|
||||
user_name: ${{ secrets.CI_USER }}
|
||||
76
.github/workflows/release.yml
vendored
76
.github/workflows/release.yml
vendored
|
|
@ -1,76 +0,0 @@
|
|||
name: Stable Release
|
||||
|
||||
on:
|
||||
release:
|
||||
types:
|
||||
- prereleased
|
||||
|
||||
jobs:
|
||||
create_release:
|
||||
runs-on: ubuntu-latest
|
||||
if: github.actor != 'pypebot'
|
||||
|
||||
steps:
|
||||
- name: 🚛 Checkout Code
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Set up Python
|
||||
uses: actions/setup-python@v2
|
||||
with:
|
||||
python-version: 3.9
|
||||
- name: Install Python requirements
|
||||
run: pip install gitpython semver PyGithub
|
||||
|
||||
- name: 💉 Inject new version into files
|
||||
id: version
|
||||
run: |
|
||||
NEW_VERSION=$(python ./tools/ci_tools.py --finalize ${GITHUB_REF#refs/*/})
|
||||
LAST_VERSION=$(python ./tools/ci_tools.py --lastversion release)
|
||||
|
||||
echo "current_version=${GITHUB_REF#refs/*/}" >> $GITHUB_OUTPUT
|
||||
echo "last_release=${LAST_VERSION}" >> $GITHUB_OUTPUT
|
||||
echo "release_tag=${NEW_VERSION}" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: 💾 Commit and Tag
|
||||
id: git_commit
|
||||
if: steps.version.outputs.release_tag != 'skip'
|
||||
run: |
|
||||
git config user.email ${{ secrets.CI_EMAIL }}
|
||||
git config user.name ${{ secrets.CI_USER }}
|
||||
git add .
|
||||
git commit -m "[Automated] Release"
|
||||
tag_name="${{ steps.version.outputs.release_tag }}"
|
||||
git tag -a $tag_name -m "stable release"
|
||||
|
||||
- name: 🔏 Push to protected main branch
|
||||
if: steps.version.outputs.release_tag != 'skip'
|
||||
uses: CasperWA/push-protected@v2.10.0
|
||||
with:
|
||||
token: ${{ secrets.YNPUT_BOT_TOKEN }}
|
||||
branch: main
|
||||
tags: true
|
||||
unprotect_reviews: true
|
||||
|
||||
- name: 🚀 Github Release
|
||||
if: steps.version.outputs.release_tag != 'skip'
|
||||
uses: ncipollo/release-action@v1
|
||||
with:
|
||||
tag: ${{ steps.version.outputs.release_tag }}
|
||||
token: ${{ secrets.YNPUT_BOT_TOKEN }}
|
||||
|
||||
- name: ☠ Delete Pre-release
|
||||
if: steps.version.outputs.release_tag != 'skip'
|
||||
uses: cb80/delrel@latest
|
||||
with:
|
||||
tag: "${{ steps.version.outputs.current_version }}"
|
||||
|
||||
- name: 🔁 Merge main back to develop
|
||||
if: steps.version.outputs.release_tag != 'skip'
|
||||
uses: everlytic/branch-merge@1.1.0
|
||||
with:
|
||||
github_token: ${{ secrets.YNPUT_BOT_TOKEN }}
|
||||
source_ref: 'main'
|
||||
target_branch: 'develop'
|
||||
commit_message_template: '[Automated] Merged release {source_ref} into {target_branch}'
|
||||
2031
CHANGELOG.md
2031
CHANGELOG.md
File diff suppressed because it is too large
Load diff
|
|
@ -367,11 +367,15 @@ def run(script):
|
|||
"--timeout",
|
||||
help="Provide specific timeout value for test case",
|
||||
default=None)
|
||||
@click.option("-so",
|
||||
"--setup_only",
|
||||
help="Only create dbs, do not run tests",
|
||||
default=None)
|
||||
def runtests(folder, mark, pyargs, test_data_folder, persist, app_variant,
|
||||
timeout):
|
||||
timeout, setup_only):
|
||||
"""Run all automatic tests after proper initialization via start.py"""
|
||||
PypeCommands().run_tests(folder, mark, pyargs, test_data_folder,
|
||||
persist, app_variant, timeout)
|
||||
persist, app_variant, timeout, setup_only)
|
||||
|
||||
|
||||
@main.command()
|
||||
|
|
|
|||
|
|
@ -14,6 +14,7 @@ class AddLastWorkfileToLaunchArgs(PreLaunchHook):
|
|||
# Execute after workfile template copy
|
||||
order = 10
|
||||
app_groups = [
|
||||
"3dsmax",
|
||||
"maya",
|
||||
"nuke",
|
||||
"nukex",
|
||||
|
|
|
|||
|
|
@ -13,7 +13,7 @@ class LaunchFoundryAppsWindows(PreLaunchHook):
|
|||
|
||||
# Should be as last hook because must change launch arguments to string
|
||||
order = 1000
|
||||
app_groups = ["nuke", "nukex", "hiero", "nukestudio"]
|
||||
app_groups = ["nuke", "nukeassist", "nukex", "hiero", "nukestudio"]
|
||||
platforms = ["windows"]
|
||||
|
||||
def execute(self):
|
||||
|
|
|
|||
|
|
@ -39,7 +39,6 @@ class HostDirmap(object):
|
|||
self._project_settings = project_settings
|
||||
self._sync_module = sync_module # to limit reinit of Modules
|
||||
self._log = None
|
||||
self._mapping = None # cache mapping
|
||||
|
||||
@property
|
||||
def sync_module(self):
|
||||
|
|
@ -70,29 +69,28 @@ class HostDirmap(object):
|
|||
"""Run host dependent remapping from source_path to destination_path"""
|
||||
pass
|
||||
|
||||
def process_dirmap(self):
|
||||
def process_dirmap(self, mapping=None):
|
||||
# type: (dict) -> None
|
||||
"""Go through all paths in Settings and set them using `dirmap`.
|
||||
|
||||
If artists has Site Sync enabled, take dirmap mapping directly from
|
||||
Local Settings when artist is syncing workfile locally.
|
||||
|
||||
Args:
|
||||
project_settings (dict): Settings for current project.
|
||||
"""
|
||||
|
||||
if not self._mapping:
|
||||
self._mapping = self.get_mappings(self.project_settings)
|
||||
if not self._mapping:
|
||||
if not mapping:
|
||||
mapping = self.get_mappings()
|
||||
if not mapping:
|
||||
return
|
||||
|
||||
self.log.info("Processing directory mapping ...")
|
||||
self.on_enable_dirmap()
|
||||
self.log.info("mapping:: {}".format(self._mapping))
|
||||
|
||||
for k, sp in enumerate(self._mapping["source-path"]):
|
||||
dst = self._mapping["destination-path"][k]
|
||||
for k, sp in enumerate(mapping["source-path"]):
|
||||
dst = mapping["destination-path"][k]
|
||||
try:
|
||||
# add trailing slash if missing
|
||||
sp = os.path.join(sp, '')
|
||||
dst = os.path.join(dst, '')
|
||||
print("{} -> {}".format(sp, dst))
|
||||
self.dirmap_routine(sp, dst)
|
||||
except IndexError:
|
||||
|
|
@ -110,28 +108,24 @@ class HostDirmap(object):
|
|||
)
|
||||
continue
|
||||
|
||||
def get_mappings(self, project_settings):
|
||||
def get_mappings(self):
|
||||
"""Get translation from source-path to destination-path.
|
||||
|
||||
It checks if Site Sync is enabled and user chose to use local
|
||||
site, in that case configuration in Local Settings takes precedence
|
||||
"""
|
||||
|
||||
local_mapping = self._get_local_sync_dirmap(project_settings)
|
||||
dirmap_label = "{}-dirmap".format(self.host_name)
|
||||
if (
|
||||
not self.project_settings[self.host_name].get(dirmap_label)
|
||||
and not local_mapping
|
||||
):
|
||||
return {}
|
||||
mapping_settings = self.project_settings[self.host_name][dirmap_label]
|
||||
mapping_enabled = mapping_settings["enabled"] or bool(local_mapping)
|
||||
mapping_sett = self.project_settings[self.host_name].get(dirmap_label,
|
||||
{})
|
||||
local_mapping = self._get_local_sync_dirmap()
|
||||
mapping_enabled = mapping_sett.get("enabled") or bool(local_mapping)
|
||||
if not mapping_enabled:
|
||||
return {}
|
||||
|
||||
mapping = (
|
||||
local_mapping
|
||||
or mapping_settings["paths"]
|
||||
or mapping_sett["paths"]
|
||||
or {}
|
||||
)
|
||||
|
||||
|
|
@ -141,28 +135,27 @@ class HostDirmap(object):
|
|||
or not mapping.get("source-path")
|
||||
):
|
||||
return {}
|
||||
self.log.info("Processing directory mapping ...")
|
||||
self.log.info("mapping:: {}".format(mapping))
|
||||
return mapping
|
||||
|
||||
def _get_local_sync_dirmap(self, project_settings):
|
||||
def _get_local_sync_dirmap(self):
|
||||
"""
|
||||
Returns dirmap if synch to local project is enabled.
|
||||
|
||||
Only valid mapping is from roots of remote site to local site set
|
||||
in Local Settings.
|
||||
|
||||
Args:
|
||||
project_settings (dict)
|
||||
Returns:
|
||||
dict : { "source-path": [XXX], "destination-path": [YYYY]}
|
||||
"""
|
||||
project_name = os.getenv("AVALON_PROJECT")
|
||||
|
||||
mapping = {}
|
||||
|
||||
if not project_settings["global"]["sync_server"]["enabled"]:
|
||||
if (not self.sync_module.enabled or
|
||||
project_name not in self.sync_module.get_enabled_projects()):
|
||||
return mapping
|
||||
|
||||
project_name = os.getenv("AVALON_PROJECT")
|
||||
|
||||
active_site = self.sync_module.get_local_normalized_site(
|
||||
self.sync_module.get_active_site(project_name))
|
||||
remote_site = self.sync_module.get_local_normalized_site(
|
||||
|
|
@ -171,11 +164,7 @@ class HostDirmap(object):
|
|||
"active {} - remote {}".format(active_site, remote_site)
|
||||
)
|
||||
|
||||
if (
|
||||
active_site == "local"
|
||||
and project_name in self.sync_module.get_enabled_projects()
|
||||
and active_site != remote_site
|
||||
):
|
||||
if active_site == "local" and active_site != remote_site:
|
||||
sync_settings = self.sync_module.get_sync_project_setting(
|
||||
project_name,
|
||||
exclude_locals=False,
|
||||
|
|
@ -188,7 +177,15 @@ class HostDirmap(object):
|
|||
|
||||
self.log.debug("local overrides {}".format(active_overrides))
|
||||
self.log.debug("remote overrides {}".format(remote_overrides))
|
||||
|
||||
current_platform = platform.system().lower()
|
||||
remote_provider = self.sync_module.get_provider_for_site(
|
||||
project_name, remote_site
|
||||
)
|
||||
# dirmap has sense only with regular disk provider, in the workfile
|
||||
# wont be root on cloud or sftp provider
|
||||
if remote_provider != "local_drive":
|
||||
remote_site = "studio"
|
||||
for root_name, active_site_dir in active_overrides.items():
|
||||
remote_site_dir = (
|
||||
remote_overrides.get(root_name)
|
||||
|
|
|
|||
|
|
@ -31,10 +31,13 @@ from .lib import (
|
|||
lsattrs,
|
||||
read,
|
||||
maintained_selection,
|
||||
maintained_time,
|
||||
get_selection,
|
||||
# unique_name,
|
||||
)
|
||||
|
||||
from .capture import capture
|
||||
|
||||
|
||||
__all__ = [
|
||||
"install",
|
||||
|
|
@ -56,9 +59,11 @@ __all__ = [
|
|||
|
||||
# Utility functions
|
||||
"maintained_selection",
|
||||
"maintained_time",
|
||||
"lsattr",
|
||||
"lsattrs",
|
||||
"read",
|
||||
"get_selection",
|
||||
"capture",
|
||||
# "unique_name",
|
||||
]
|
||||
|
|
|
|||
278
openpype/hosts/blender/api/capture.py
Normal file
278
openpype/hosts/blender/api/capture.py
Normal file
|
|
@ -0,0 +1,278 @@
|
|||
|
||||
"""Blender Capture
|
||||
Playblasting with independent viewport, camera and display options
|
||||
"""
|
||||
import contextlib
|
||||
import bpy
|
||||
|
||||
from .lib import maintained_time
|
||||
from .plugin import deselect_all, create_blender_context
|
||||
|
||||
|
||||
def capture(
|
||||
camera=None,
|
||||
width=None,
|
||||
height=None,
|
||||
filename=None,
|
||||
start_frame=None,
|
||||
end_frame=None,
|
||||
step_frame=None,
|
||||
sound=None,
|
||||
isolate=None,
|
||||
maintain_aspect_ratio=True,
|
||||
overwrite=False,
|
||||
image_settings=None,
|
||||
display_options=None
|
||||
):
|
||||
"""Playblast in an independent windows
|
||||
Arguments:
|
||||
camera (str, optional): Name of camera, defaults to "Camera"
|
||||
width (int, optional): Width of output in pixels
|
||||
height (int, optional): Height of output in pixels
|
||||
filename (str, optional): Name of output file path. Defaults to current
|
||||
render output path.
|
||||
start_frame (int, optional): Defaults to current start frame.
|
||||
end_frame (int, optional): Defaults to current end frame.
|
||||
step_frame (int, optional): Defaults to 1.
|
||||
sound (str, optional): Specify the sound node to be used during
|
||||
playblast. When None (default) no sound will be used.
|
||||
isolate (list): List of nodes to isolate upon capturing
|
||||
maintain_aspect_ratio (bool, optional): Modify height in order to
|
||||
maintain aspect ratio.
|
||||
overwrite (bool, optional): Whether or not to overwrite if file
|
||||
already exists. If disabled and file exists and error will be
|
||||
raised.
|
||||
image_settings (dict, optional): Supplied image settings for render,
|
||||
using `ImageSettings`
|
||||
display_options (dict, optional): Supplied display options for render
|
||||
"""
|
||||
|
||||
scene = bpy.context.scene
|
||||
camera = camera or "Camera"
|
||||
|
||||
# Ensure camera exists.
|
||||
if camera not in scene.objects and camera != "AUTO":
|
||||
raise RuntimeError("Camera does not exist: {0}".format(camera))
|
||||
|
||||
# Ensure resolution.
|
||||
if width and height:
|
||||
maintain_aspect_ratio = False
|
||||
width = width or scene.render.resolution_x
|
||||
height = height or scene.render.resolution_y
|
||||
if maintain_aspect_ratio:
|
||||
ratio = scene.render.resolution_x / scene.render.resolution_y
|
||||
height = round(width / ratio)
|
||||
|
||||
# Get frame range.
|
||||
if start_frame is None:
|
||||
start_frame = scene.frame_start
|
||||
if end_frame is None:
|
||||
end_frame = scene.frame_end
|
||||
if step_frame is None:
|
||||
step_frame = 1
|
||||
frame_range = (start_frame, end_frame, step_frame)
|
||||
|
||||
if filename is None:
|
||||
filename = scene.render.filepath
|
||||
|
||||
render_options = {
|
||||
"filepath": "{}.".format(filename.rstrip(".")),
|
||||
"resolution_x": width,
|
||||
"resolution_y": height,
|
||||
"use_overwrite": overwrite,
|
||||
}
|
||||
|
||||
with _independent_window() as window:
|
||||
|
||||
applied_view(window, camera, isolate, options=display_options)
|
||||
|
||||
with contextlib.ExitStack() as stack:
|
||||
stack.enter_context(maintain_camera(window, camera))
|
||||
stack.enter_context(applied_frame_range(window, *frame_range))
|
||||
stack.enter_context(applied_render_options(window, render_options))
|
||||
stack.enter_context(applied_image_settings(window, image_settings))
|
||||
stack.enter_context(maintained_time())
|
||||
|
||||
bpy.ops.render.opengl(
|
||||
animation=True,
|
||||
render_keyed_only=False,
|
||||
sequencer=False,
|
||||
write_still=False,
|
||||
view_context=True
|
||||
)
|
||||
|
||||
return filename
|
||||
|
||||
|
||||
ImageSettings = {
|
||||
"file_format": "FFMPEG",
|
||||
"color_mode": "RGB",
|
||||
"ffmpeg": {
|
||||
"format": "QUICKTIME",
|
||||
"use_autosplit": False,
|
||||
"codec": "H264",
|
||||
"constant_rate_factor": "MEDIUM",
|
||||
"gopsize": 18,
|
||||
"use_max_b_frames": False,
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
def isolate_objects(window, objects):
|
||||
"""Isolate selection"""
|
||||
deselect_all()
|
||||
|
||||
for obj in objects:
|
||||
obj.select_set(True)
|
||||
|
||||
context = create_blender_context(selected=objects, window=window)
|
||||
|
||||
bpy.ops.view3d.view_axis(context, type="FRONT")
|
||||
bpy.ops.view3d.localview(context)
|
||||
|
||||
deselect_all()
|
||||
|
||||
|
||||
def _apply_options(entity, options):
|
||||
for option, value in options.items():
|
||||
if isinstance(value, dict):
|
||||
_apply_options(getattr(entity, option), value)
|
||||
else:
|
||||
setattr(entity, option, value)
|
||||
|
||||
|
||||
def applied_view(window, camera, isolate=None, options=None):
|
||||
"""Apply view options to window."""
|
||||
area = window.screen.areas[0]
|
||||
space = area.spaces[0]
|
||||
|
||||
area.ui_type = "VIEW_3D"
|
||||
|
||||
meshes = [obj for obj in window.scene.objects if obj.type == "MESH"]
|
||||
|
||||
if camera == "AUTO":
|
||||
space.region_3d.view_perspective = "ORTHO"
|
||||
isolate_objects(window, isolate or meshes)
|
||||
else:
|
||||
isolate_objects(window, isolate or meshes)
|
||||
space.camera = window.scene.objects.get(camera)
|
||||
space.region_3d.view_perspective = "CAMERA"
|
||||
|
||||
if isinstance(options, dict):
|
||||
_apply_options(space, options)
|
||||
else:
|
||||
space.shading.type = "SOLID"
|
||||
space.shading.color_type = "MATERIAL"
|
||||
space.show_gizmo = False
|
||||
space.overlay.show_overlays = False
|
||||
|
||||
|
||||
@contextlib.contextmanager
|
||||
def applied_frame_range(window, start, end, step):
|
||||
"""Context manager for setting frame range."""
|
||||
# Store current frame range
|
||||
current_frame_start = window.scene.frame_start
|
||||
current_frame_end = window.scene.frame_end
|
||||
current_frame_step = window.scene.frame_step
|
||||
# Apply frame range
|
||||
window.scene.frame_start = start
|
||||
window.scene.frame_end = end
|
||||
window.scene.frame_step = step
|
||||
try:
|
||||
yield
|
||||
finally:
|
||||
# Restore frame range
|
||||
window.scene.frame_start = current_frame_start
|
||||
window.scene.frame_end = current_frame_end
|
||||
window.scene.frame_step = current_frame_step
|
||||
|
||||
|
||||
@contextlib.contextmanager
|
||||
def applied_render_options(window, options):
|
||||
"""Context manager for setting render options."""
|
||||
render = window.scene.render
|
||||
|
||||
# Store current settings
|
||||
original = {}
|
||||
for opt in options.copy():
|
||||
try:
|
||||
original[opt] = getattr(render, opt)
|
||||
except ValueError:
|
||||
options.pop(opt)
|
||||
|
||||
# Apply settings
|
||||
_apply_options(render, options)
|
||||
|
||||
try:
|
||||
yield
|
||||
finally:
|
||||
# Restore previous settings
|
||||
_apply_options(render, original)
|
||||
|
||||
|
||||
@contextlib.contextmanager
|
||||
def applied_image_settings(window, options):
|
||||
"""Context manager to override image settings."""
|
||||
|
||||
options = options or ImageSettings.copy()
|
||||
ffmpeg = options.pop("ffmpeg", {})
|
||||
render = window.scene.render
|
||||
|
||||
# Store current image settings
|
||||
original = {}
|
||||
for opt in options.copy():
|
||||
try:
|
||||
original[opt] = getattr(render.image_settings, opt)
|
||||
except ValueError:
|
||||
options.pop(opt)
|
||||
|
||||
# Store current ffmpeg settings
|
||||
original_ffmpeg = {}
|
||||
for opt in ffmpeg.copy():
|
||||
try:
|
||||
original_ffmpeg[opt] = getattr(render.ffmpeg, opt)
|
||||
except ValueError:
|
||||
ffmpeg.pop(opt)
|
||||
|
||||
# Apply image settings
|
||||
for opt, value in options.items():
|
||||
setattr(render.image_settings, opt, value)
|
||||
|
||||
# Apply ffmpeg settings
|
||||
for opt, value in ffmpeg.items():
|
||||
setattr(render.ffmpeg, opt, value)
|
||||
|
||||
try:
|
||||
yield
|
||||
finally:
|
||||
# Restore previous settings
|
||||
for opt, value in original.items():
|
||||
setattr(render.image_settings, opt, value)
|
||||
for opt, value in original_ffmpeg.items():
|
||||
setattr(render.ffmpeg, opt, value)
|
||||
|
||||
|
||||
@contextlib.contextmanager
|
||||
def maintain_camera(window, camera):
|
||||
"""Context manager to override camera."""
|
||||
current_camera = window.scene.camera
|
||||
if camera in window.scene.objects:
|
||||
window.scene.camera = window.scene.objects.get(camera)
|
||||
try:
|
||||
yield
|
||||
finally:
|
||||
window.scene.camera = current_camera
|
||||
|
||||
|
||||
@contextlib.contextmanager
|
||||
def _independent_window():
|
||||
"""Create capture-window context."""
|
||||
context = create_blender_context()
|
||||
current_windows = set(bpy.context.window_manager.windows)
|
||||
bpy.ops.wm.window_new(context)
|
||||
window = list(set(bpy.context.window_manager.windows) - current_windows)[0]
|
||||
context["window"] = window
|
||||
try:
|
||||
yield window
|
||||
finally:
|
||||
bpy.ops.wm.window_close(context)
|
||||
|
|
@ -284,3 +284,13 @@ def maintained_selection():
|
|||
# This could happen if the active node was deleted during the
|
||||
# context.
|
||||
log.exception("Failed to set active object.")
|
||||
|
||||
|
||||
@contextlib.contextmanager
|
||||
def maintained_time():
|
||||
"""Maintain current frame during context."""
|
||||
current_time = bpy.context.scene.frame_current
|
||||
try:
|
||||
yield
|
||||
finally:
|
||||
bpy.context.scene.frame_current = current_time
|
||||
|
|
|
|||
|
|
@ -382,8 +382,8 @@ class TOPBAR_MT_avalon(bpy.types.Menu):
|
|||
layout.operator(LaunchLibrary.bl_idname, text="Library...")
|
||||
layout.separator()
|
||||
layout.operator(LaunchWorkFiles.bl_idname, text="Work Files...")
|
||||
# TODO (jasper): maybe add 'Reload Pipeline', 'Reset Frame Range' and
|
||||
# 'Reset Resolution'?
|
||||
# TODO (jasper): maybe add 'Reload Pipeline', 'Set Frame Range' and
|
||||
# 'Set Resolution'?
|
||||
|
||||
|
||||
def draw_avalon_menu(self, context):
|
||||
|
|
|
|||
|
|
@ -62,7 +62,8 @@ def prepare_data(data, container_name=None):
|
|||
|
||||
|
||||
def create_blender_context(active: Optional[bpy.types.Object] = None,
|
||||
selected: Optional[bpy.types.Object] = None,):
|
||||
selected: Optional[bpy.types.Object] = None,
|
||||
window: Optional[bpy.types.Window] = None):
|
||||
"""Create a new Blender context. If an object is passed as
|
||||
parameter, it is set as selected and active.
|
||||
"""
|
||||
|
|
@ -72,7 +73,9 @@ def create_blender_context(active: Optional[bpy.types.Object] = None,
|
|||
|
||||
override_context = bpy.context.copy()
|
||||
|
||||
for win in bpy.context.window_manager.windows:
|
||||
windows = [window] if window else bpy.context.window_manager.windows
|
||||
|
||||
for win in windows:
|
||||
for area in win.screen.areas:
|
||||
if area.type == 'VIEW_3D':
|
||||
for region in area.regions:
|
||||
|
|
|
|||
47
openpype/hosts/blender/plugins/create/create_review.py
Normal file
47
openpype/hosts/blender/plugins/create/create_review.py
Normal file
|
|
@ -0,0 +1,47 @@
|
|||
"""Create review."""
|
||||
|
||||
import bpy
|
||||
|
||||
from openpype.pipeline import legacy_io
|
||||
from openpype.hosts.blender.api import plugin, lib, ops
|
||||
from openpype.hosts.blender.api.pipeline import AVALON_INSTANCES
|
||||
|
||||
|
||||
class CreateReview(plugin.Creator):
|
||||
"""Single baked camera"""
|
||||
|
||||
name = "reviewDefault"
|
||||
label = "Review"
|
||||
family = "review"
|
||||
icon = "video-camera"
|
||||
|
||||
def process(self):
|
||||
""" Run the creator on Blender main thread"""
|
||||
mti = ops.MainThreadItem(self._process)
|
||||
ops.execute_in_main_thread(mti)
|
||||
|
||||
def _process(self):
|
||||
# Get Instance Container or create it if it does not exist
|
||||
instances = bpy.data.collections.get(AVALON_INSTANCES)
|
||||
if not instances:
|
||||
instances = bpy.data.collections.new(name=AVALON_INSTANCES)
|
||||
bpy.context.scene.collection.children.link(instances)
|
||||
|
||||
# Create instance object
|
||||
asset = self.data["asset"]
|
||||
subset = self.data["subset"]
|
||||
name = plugin.asset_name(asset, subset)
|
||||
asset_group = bpy.data.collections.new(name=name)
|
||||
instances.children.link(asset_group)
|
||||
self.data['task'] = legacy_io.Session.get('AVALON_TASK')
|
||||
lib.imprint(asset_group, self.data)
|
||||
|
||||
if (self.options or {}).get("useSelection"):
|
||||
selected = lib.get_selection()
|
||||
for obj in selected:
|
||||
asset_group.objects.link(obj)
|
||||
elif (self.options or {}).get("asset_group"):
|
||||
obj = (self.options or {}).get("asset_group")
|
||||
asset_group.objects.link(obj)
|
||||
|
||||
return asset_group
|
||||
64
openpype/hosts/blender/plugins/publish/collect_review.py
Normal file
64
openpype/hosts/blender/plugins/publish/collect_review.py
Normal file
|
|
@ -0,0 +1,64 @@
|
|||
import bpy
|
||||
|
||||
import pyblish.api
|
||||
from openpype.pipeline import legacy_io
|
||||
|
||||
|
||||
class CollectReview(pyblish.api.InstancePlugin):
|
||||
"""Collect Review data
|
||||
|
||||
"""
|
||||
|
||||
order = pyblish.api.CollectorOrder + 0.3
|
||||
label = "Collect Review Data"
|
||||
families = ["review"]
|
||||
|
||||
def process(self, instance):
|
||||
|
||||
self.log.debug(f"instance: {instance}")
|
||||
|
||||
# get cameras
|
||||
cameras = [
|
||||
obj
|
||||
for obj in instance
|
||||
if isinstance(obj, bpy.types.Object) and obj.type == "CAMERA"
|
||||
]
|
||||
|
||||
assert len(cameras) == 1, (
|
||||
f"Not a single camera found in extraction: {cameras}"
|
||||
)
|
||||
camera = cameras[0].name
|
||||
self.log.debug(f"camera: {camera}")
|
||||
|
||||
# get isolate objects list from meshes instance members .
|
||||
isolate_objects = [
|
||||
obj
|
||||
for obj in instance
|
||||
if isinstance(obj, bpy.types.Object) and obj.type == "MESH"
|
||||
]
|
||||
|
||||
if not instance.data.get("remove"):
|
||||
|
||||
task = legacy_io.Session.get("AVALON_TASK")
|
||||
|
||||
instance.data.update({
|
||||
"subset": f"{task}Review",
|
||||
"review_camera": camera,
|
||||
"frameStart": instance.context.data["frameStart"],
|
||||
"frameEnd": instance.context.data["frameEnd"],
|
||||
"fps": instance.context.data["fps"],
|
||||
"isolate": isolate_objects,
|
||||
})
|
||||
|
||||
self.log.debug(f"instance data: {instance.data}")
|
||||
|
||||
# TODO : Collect audio
|
||||
audio_tracks = []
|
||||
instance.data["audio"] = []
|
||||
for track in audio_tracks:
|
||||
instance.data["audio"].append(
|
||||
{
|
||||
"offset": track.offset.get(),
|
||||
"filename": track.filename.get(),
|
||||
}
|
||||
)
|
||||
123
openpype/hosts/blender/plugins/publish/extract_playblast.py
Normal file
123
openpype/hosts/blender/plugins/publish/extract_playblast.py
Normal file
|
|
@ -0,0 +1,123 @@
|
|||
import os
|
||||
import clique
|
||||
|
||||
import bpy
|
||||
|
||||
import pyblish.api
|
||||
from openpype.pipeline import publish
|
||||
from openpype.hosts.blender.api import capture
|
||||
from openpype.hosts.blender.api.lib import maintained_time
|
||||
|
||||
|
||||
class ExtractPlayblast(publish.Extractor):
|
||||
"""
|
||||
Extract viewport playblast.
|
||||
|
||||
Takes review camera and creates review Quicktime video based on viewport
|
||||
capture.
|
||||
"""
|
||||
|
||||
label = "Extract Playblast"
|
||||
hosts = ["blender"]
|
||||
families = ["review"]
|
||||
optional = True
|
||||
order = pyblish.api.ExtractorOrder + 0.01
|
||||
|
||||
def process(self, instance):
|
||||
self.log.info("Extracting capture..")
|
||||
|
||||
self.log.info(instance.data)
|
||||
|
||||
# get scene fps
|
||||
fps = instance.data.get("fps")
|
||||
if fps is None:
|
||||
fps = bpy.context.scene.render.fps
|
||||
instance.data["fps"] = fps
|
||||
|
||||
self.log.info(f"fps: {fps}")
|
||||
|
||||
# If start and end frames cannot be determined,
|
||||
# get them from Blender timeline.
|
||||
start = instance.data.get("frameStart", bpy.context.scene.frame_start)
|
||||
end = instance.data.get("frameEnd", bpy.context.scene.frame_end)
|
||||
|
||||
self.log.info(f"start: {start}, end: {end}")
|
||||
assert end > start, "Invalid time range !"
|
||||
|
||||
# get cameras
|
||||
camera = instance.data("review_camera", None)
|
||||
|
||||
# get isolate objects list
|
||||
isolate = instance.data("isolate", None)
|
||||
|
||||
# get ouput path
|
||||
stagingdir = self.staging_dir(instance)
|
||||
filename = instance.name
|
||||
path = os.path.join(stagingdir, filename)
|
||||
|
||||
self.log.info(f"Outputting images to {path}")
|
||||
|
||||
project_settings = instance.context.data["project_settings"]["blender"]
|
||||
presets = project_settings["publish"]["ExtractPlayblast"]["presets"]
|
||||
preset = presets.get("default")
|
||||
preset.update({
|
||||
"camera": camera,
|
||||
"start_frame": start,
|
||||
"end_frame": end,
|
||||
"filename": path,
|
||||
"overwrite": True,
|
||||
"isolate": isolate,
|
||||
})
|
||||
preset.setdefault(
|
||||
"image_settings",
|
||||
{
|
||||
"file_format": "PNG",
|
||||
"color_mode": "RGB",
|
||||
"color_depth": "8",
|
||||
"compression": 15,
|
||||
},
|
||||
)
|
||||
|
||||
with maintained_time():
|
||||
path = capture(**preset)
|
||||
|
||||
self.log.debug(f"playblast path {path}")
|
||||
|
||||
collected_files = os.listdir(stagingdir)
|
||||
collections, remainder = clique.assemble(
|
||||
collected_files,
|
||||
patterns=[f"{filename}\\.{clique.DIGITS_PATTERN}\\.png$"],
|
||||
)
|
||||
|
||||
if len(collections) > 1:
|
||||
raise RuntimeError(
|
||||
f"More than one collection found in stagingdir: {stagingdir}"
|
||||
)
|
||||
elif len(collections) == 0:
|
||||
raise RuntimeError(
|
||||
f"No collection found in stagingdir: {stagingdir}"
|
||||
)
|
||||
|
||||
frame_collection = collections[0]
|
||||
|
||||
self.log.info(f"We found collection of interest {frame_collection}")
|
||||
|
||||
instance.data.setdefault("representations", [])
|
||||
|
||||
tags = ["review"]
|
||||
if not instance.data.get("keepImages"):
|
||||
tags.append("delete")
|
||||
|
||||
representation = {
|
||||
"name": "png",
|
||||
"ext": "png",
|
||||
"files": list(frame_collection),
|
||||
"stagingDir": stagingdir,
|
||||
"frameStart": start,
|
||||
"frameEnd": end,
|
||||
"fps": fps,
|
||||
"preview": True,
|
||||
"tags": tags,
|
||||
"camera_name": camera
|
||||
}
|
||||
instance.data["representations"].append(representation)
|
||||
99
openpype/hosts/blender/plugins/publish/extract_thumbnail.py
Normal file
99
openpype/hosts/blender/plugins/publish/extract_thumbnail.py
Normal file
|
|
@ -0,0 +1,99 @@
|
|||
import os
|
||||
import glob
|
||||
|
||||
import pyblish.api
|
||||
from openpype.pipeline import publish
|
||||
from openpype.hosts.blender.api import capture
|
||||
from openpype.hosts.blender.api.lib import maintained_time
|
||||
|
||||
import bpy
|
||||
|
||||
|
||||
class ExtractThumbnail(publish.Extractor):
|
||||
"""Extract viewport thumbnail.
|
||||
|
||||
Takes review camera and creates a thumbnail based on viewport
|
||||
capture.
|
||||
|
||||
"""
|
||||
|
||||
label = "Extract Thumbnail"
|
||||
hosts = ["blender"]
|
||||
families = ["review"]
|
||||
order = pyblish.api.ExtractorOrder + 0.01
|
||||
presets = {}
|
||||
|
||||
def process(self, instance):
|
||||
self.log.info("Extracting capture..")
|
||||
|
||||
stagingdir = self.staging_dir(instance)
|
||||
filename = instance.name
|
||||
path = os.path.join(stagingdir, filename)
|
||||
|
||||
self.log.info(f"Outputting images to {path}")
|
||||
|
||||
camera = instance.data.get("review_camera", "AUTO")
|
||||
start = instance.data.get("frameStart", bpy.context.scene.frame_start)
|
||||
family = instance.data.get("family")
|
||||
isolate = instance.data("isolate", None)
|
||||
|
||||
preset = self.presets.get(family, {})
|
||||
|
||||
preset.update({
|
||||
"camera": camera,
|
||||
"start_frame": start,
|
||||
"end_frame": start,
|
||||
"filename": path,
|
||||
"overwrite": True,
|
||||
"isolate": isolate,
|
||||
})
|
||||
preset.setdefault(
|
||||
"image_settings",
|
||||
{
|
||||
"file_format": "JPEG",
|
||||
"color_mode": "RGB",
|
||||
"quality": 100,
|
||||
},
|
||||
)
|
||||
|
||||
with maintained_time():
|
||||
path = capture(**preset)
|
||||
|
||||
thumbnail = os.path.basename(self._fix_output_path(path))
|
||||
|
||||
self.log.info(f"thumbnail: {thumbnail}")
|
||||
|
||||
instance.data.setdefault("representations", [])
|
||||
|
||||
representation = {
|
||||
"name": "thumbnail",
|
||||
"ext": "jpg",
|
||||
"files": thumbnail,
|
||||
"stagingDir": stagingdir,
|
||||
"thumbnail": True
|
||||
}
|
||||
instance.data["representations"].append(representation)
|
||||
|
||||
def _fix_output_path(self, filepath):
|
||||
""""Workaround to return correct filepath.
|
||||
|
||||
To workaround this we just glob.glob() for any file extensions and
|
||||
assume the latest modified file is the correct file and return it.
|
||||
|
||||
"""
|
||||
# Catch cancelled playblast
|
||||
if filepath is None:
|
||||
self.log.warning(
|
||||
"Playblast did not result in output path. "
|
||||
"Playblast is probably interrupted."
|
||||
)
|
||||
return None
|
||||
|
||||
if not os.path.exists(filepath):
|
||||
files = glob.glob(f"{filepath}.*.jpg")
|
||||
|
||||
if not files:
|
||||
raise RuntimeError(f"Couldn't find playblast from: {filepath}")
|
||||
filepath = max(files, key=os.path.getmtime)
|
||||
|
||||
return filepath
|
||||
|
|
@ -702,6 +702,37 @@ class ClipLoader(LoaderPlugin):
|
|||
|
||||
_mapping = None
|
||||
|
||||
def apply_settings(cls, project_settings, system_settings):
|
||||
|
||||
plugin_type_settings = (
|
||||
project_settings
|
||||
.get("flame", {})
|
||||
.get("load", {})
|
||||
)
|
||||
|
||||
if not plugin_type_settings:
|
||||
return
|
||||
|
||||
plugin_name = cls.__name__
|
||||
|
||||
plugin_settings = None
|
||||
# Look for plugin settings in host specific settings
|
||||
if plugin_name in plugin_type_settings:
|
||||
plugin_settings = plugin_type_settings[plugin_name]
|
||||
|
||||
if not plugin_settings:
|
||||
return
|
||||
|
||||
print(">>> We have preset for {}".format(plugin_name))
|
||||
for option, value in plugin_settings.items():
|
||||
if option == "enabled" and value is False:
|
||||
print(" - is disabled by preset")
|
||||
elif option == "representations":
|
||||
continue
|
||||
else:
|
||||
print(" - setting `{}`: `{}`".format(option, value))
|
||||
setattr(cls, option, value)
|
||||
|
||||
def get_colorspace(self, context):
|
||||
"""Get colorspace name
|
||||
|
||||
|
|
|
|||
|
|
@ -4,6 +4,10 @@ import flame
|
|||
from pprint import pformat
|
||||
import openpype.hosts.flame.api as opfapi
|
||||
from openpype.lib import StringTemplate
|
||||
from openpype.lib.transcoding import (
|
||||
VIDEO_EXTENSIONS,
|
||||
IMAGE_EXTENSIONS
|
||||
)
|
||||
|
||||
|
||||
class LoadClip(opfapi.ClipLoader):
|
||||
|
|
@ -14,7 +18,10 @@ class LoadClip(opfapi.ClipLoader):
|
|||
"""
|
||||
|
||||
families = ["render2d", "source", "plate", "render", "review"]
|
||||
representations = ["exr", "dpx", "jpg", "jpeg", "png", "h264"]
|
||||
representations = ["*"]
|
||||
extensions = set(
|
||||
ext.lstrip(".") for ext in IMAGE_EXTENSIONS.union(VIDEO_EXTENSIONS)
|
||||
)
|
||||
|
||||
label = "Load as clip"
|
||||
order = -10
|
||||
|
|
|
|||
|
|
@ -4,7 +4,10 @@ import flame
|
|||
from pprint import pformat
|
||||
import openpype.hosts.flame.api as opfapi
|
||||
from openpype.lib import StringTemplate
|
||||
|
||||
from openpype.lib.transcoding import (
|
||||
VIDEO_EXTENSIONS,
|
||||
IMAGE_EXTENSIONS
|
||||
)
|
||||
|
||||
class LoadClipBatch(opfapi.ClipLoader):
|
||||
"""Load a subset to timeline as clip
|
||||
|
|
@ -14,7 +17,10 @@ class LoadClipBatch(opfapi.ClipLoader):
|
|||
"""
|
||||
|
||||
families = ["render2d", "source", "plate", "render", "review"]
|
||||
representations = ["exr", "dpx", "jpg", "jpeg", "png", "h264"]
|
||||
representations = ["*"]
|
||||
extensions = set(
|
||||
ext.lstrip(".") for ext in IMAGE_EXTENSIONS.union(VIDEO_EXTENSIONS)
|
||||
)
|
||||
|
||||
label = "Load as clip to current batch"
|
||||
order = -10
|
||||
|
|
|
|||
|
|
@ -1,20 +1,11 @@
|
|||
from .pipeline import (
|
||||
install,
|
||||
uninstall,
|
||||
|
||||
FusionHost,
|
||||
ls,
|
||||
|
||||
imprint_container,
|
||||
parse_container
|
||||
)
|
||||
|
||||
from .workio import (
|
||||
open_file,
|
||||
save_file,
|
||||
current_file,
|
||||
has_unsaved_changes,
|
||||
file_extensions,
|
||||
work_root
|
||||
parse_container,
|
||||
list_instances,
|
||||
remove_instance
|
||||
)
|
||||
|
||||
from .lib import (
|
||||
|
|
@ -30,21 +21,11 @@ from .menu import launch_openpype_menu
|
|||
|
||||
__all__ = [
|
||||
# pipeline
|
||||
"install",
|
||||
"uninstall",
|
||||
"ls",
|
||||
|
||||
"imprint_container",
|
||||
"parse_container",
|
||||
|
||||
# workio
|
||||
"open_file",
|
||||
"save_file",
|
||||
"current_file",
|
||||
"has_unsaved_changes",
|
||||
"file_extensions",
|
||||
"work_root",
|
||||
|
||||
# lib
|
||||
"maintained_selection",
|
||||
"update_frame_range",
|
||||
|
|
|
|||
54
openpype/hosts/fusion/api/action.py
Normal file
54
openpype/hosts/fusion/api/action.py
Normal file
|
|
@ -0,0 +1,54 @@
|
|||
import pyblish.api
|
||||
|
||||
|
||||
from openpype.hosts.fusion.api.lib import get_current_comp
|
||||
from openpype.pipeline.publish import get_errored_instances_from_context
|
||||
|
||||
|
||||
class SelectInvalidAction(pyblish.api.Action):
|
||||
"""Select invalid nodes in Maya when plug-in failed.
|
||||
|
||||
To retrieve the invalid nodes this assumes a static `get_invalid()`
|
||||
method is available on the plugin.
|
||||
|
||||
"""
|
||||
label = "Select invalid"
|
||||
on = "failed" # This action is only available on a failed plug-in
|
||||
icon = "search" # Icon from Awesome Icon
|
||||
|
||||
def process(self, context, plugin):
|
||||
errored_instances = get_errored_instances_from_context(context)
|
||||
|
||||
# Apply pyblish.logic to get the instances for the plug-in
|
||||
instances = pyblish.api.instances_by_plugin(errored_instances, plugin)
|
||||
|
||||
# Get the invalid nodes for the plug-ins
|
||||
self.log.info("Finding invalid nodes..")
|
||||
invalid = list()
|
||||
for instance in instances:
|
||||
invalid_nodes = plugin.get_invalid(instance)
|
||||
if invalid_nodes:
|
||||
if isinstance(invalid_nodes, (list, tuple)):
|
||||
invalid.extend(invalid_nodes)
|
||||
else:
|
||||
self.log.warning("Plug-in returned to be invalid, "
|
||||
"but has no selectable nodes.")
|
||||
|
||||
if not invalid:
|
||||
# Assume relevant comp is current comp and clear selection
|
||||
self.log.info("No invalid tools found.")
|
||||
comp = get_current_comp()
|
||||
flow = comp.CurrentFrame.FlowView
|
||||
flow.Select() # No args equals clearing selection
|
||||
return
|
||||
|
||||
# Assume a single comp
|
||||
first_tool = invalid[0]
|
||||
comp = first_tool.Comp()
|
||||
flow = comp.CurrentFrame.FlowView
|
||||
flow.Select() # No args equals clearing selection
|
||||
names = set()
|
||||
for tool in invalid:
|
||||
flow.Select(tool, True)
|
||||
names.add(tool.Name)
|
||||
self.log.info("Selecting invalid tools: %s" % ", ".join(sorted(names)))
|
||||
|
|
@ -210,7 +210,8 @@ def switch_item(container,
|
|||
if any(not x for x in [asset_name, subset_name, representation_name]):
|
||||
repre_id = container["representation"]
|
||||
representation = get_representation_by_id(project_name, repre_id)
|
||||
repre_parent_docs = get_representation_parents(representation)
|
||||
repre_parent_docs = get_representation_parents(
|
||||
project_name, representation)
|
||||
if repre_parent_docs:
|
||||
version, subset, asset, _ = repre_parent_docs
|
||||
else:
|
||||
|
|
|
|||
|
|
@ -7,11 +7,11 @@ from openpype.style import load_stylesheet
|
|||
from openpype.lib import register_event_callback
|
||||
from openpype.hosts.fusion.scripts import (
|
||||
set_rendermode,
|
||||
duplicate_with_inputs
|
||||
duplicate_with_inputs,
|
||||
)
|
||||
from openpype.hosts.fusion.api.lib import (
|
||||
set_asset_framerange,
|
||||
set_asset_resolution
|
||||
set_asset_resolution,
|
||||
)
|
||||
from openpype.pipeline import legacy_io
|
||||
from openpype.resources import get_openpype_icon_filepath
|
||||
|
|
@ -45,17 +45,19 @@ class OpenPypeMenu(QtWidgets.QWidget):
|
|||
self.setWindowTitle("OpenPype")
|
||||
|
||||
asset_label = QtWidgets.QLabel("Context", self)
|
||||
asset_label.setStyleSheet("""QLabel {
|
||||
asset_label.setStyleSheet(
|
||||
"""QLabel {
|
||||
font-size: 14px;
|
||||
font-weight: 600;
|
||||
color: #5f9fb8;
|
||||
}""")
|
||||
}"""
|
||||
)
|
||||
asset_label.setAlignment(QtCore.Qt.AlignHCenter)
|
||||
|
||||
workfiles_btn = QtWidgets.QPushButton("Workfiles...", self)
|
||||
create_btn = QtWidgets.QPushButton("Create...", self)
|
||||
publish_btn = QtWidgets.QPushButton("Publish...", self)
|
||||
load_btn = QtWidgets.QPushButton("Load...", self)
|
||||
publish_btn = QtWidgets.QPushButton("Publish...", self)
|
||||
manager_btn = QtWidgets.QPushButton("Manage...", self)
|
||||
libload_btn = QtWidgets.QPushButton("Library...", self)
|
||||
rendermode_btn = QtWidgets.QPushButton("Set render mode...", self)
|
||||
|
|
@ -108,7 +110,8 @@ class OpenPypeMenu(QtWidgets.QWidget):
|
|||
libload_btn.clicked.connect(self.on_libload_clicked)
|
||||
rendermode_btn.clicked.connect(self.on_rendermode_clicked)
|
||||
duplicate_with_inputs_btn.clicked.connect(
|
||||
self.on_duplicate_with_inputs_clicked)
|
||||
self.on_duplicate_with_inputs_clicked
|
||||
)
|
||||
set_resolution_btn.clicked.connect(self.on_set_resolution_clicked)
|
||||
set_framerange_btn.clicked.connect(self.on_set_framerange_clicked)
|
||||
|
||||
|
|
@ -130,7 +133,6 @@ class OpenPypeMenu(QtWidgets.QWidget):
|
|||
self.asset_label.setText(label)
|
||||
|
||||
def register_callback(self, name, fn):
|
||||
|
||||
# Create a wrapper callback that we only store
|
||||
# for as long as we want it to persist as callback
|
||||
def _callback(*args):
|
||||
|
|
@ -146,10 +148,10 @@ class OpenPypeMenu(QtWidgets.QWidget):
|
|||
host_tools.show_workfiles()
|
||||
|
||||
def on_create_clicked(self):
|
||||
host_tools.show_creator()
|
||||
host_tools.show_publisher(tab="create")
|
||||
|
||||
def on_publish_clicked(self):
|
||||
host_tools.show_publish()
|
||||
host_tools.show_publisher(tab="publish")
|
||||
|
||||
def on_load_clicked(self):
|
||||
host_tools.show_loader(use_context=True)
|
||||
|
|
|
|||
|
|
@ -4,6 +4,7 @@ Basic avalon integration
|
|||
import os
|
||||
import sys
|
||||
import logging
|
||||
import contextlib
|
||||
|
||||
import pyblish.api
|
||||
from qtpy import QtCore
|
||||
|
|
@ -17,15 +18,14 @@ from openpype.pipeline import (
|
|||
register_loader_plugin_path,
|
||||
register_creator_plugin_path,
|
||||
register_inventory_action_path,
|
||||
deregister_loader_plugin_path,
|
||||
deregister_creator_plugin_path,
|
||||
deregister_inventory_action_path,
|
||||
AVALON_CONTAINER_ID,
|
||||
)
|
||||
from openpype.pipeline.load import any_outdated_containers
|
||||
from openpype.hosts.fusion import FUSION_HOST_DIR
|
||||
from openpype.host import HostBase, IWorkfileHost, ILoadHost, IPublishHost
|
||||
from openpype.tools.utils import host_tools
|
||||
|
||||
|
||||
from .lib import (
|
||||
get_current_comp,
|
||||
comp_lock_and_undo_chunk,
|
||||
|
|
@ -66,94 +66,98 @@ class FusionLogHandler(logging.Handler):
|
|||
self.print(entry)
|
||||
|
||||
|
||||
def install():
|
||||
"""Install fusion-specific functionality of OpenPype.
|
||||
class FusionHost(HostBase, IWorkfileHost, ILoadHost, IPublishHost):
|
||||
name = "fusion"
|
||||
|
||||
This is where you install menus and register families, data
|
||||
and loaders into fusion.
|
||||
def install(self):
|
||||
"""Install fusion-specific functionality of OpenPype.
|
||||
|
||||
It is called automatically when installing via
|
||||
`openpype.pipeline.install_host(openpype.hosts.fusion.api)`
|
||||
This is where you install menus and register families, data
|
||||
and loaders into fusion.
|
||||
|
||||
See the Maya equivalent for inspiration on how to implement this.
|
||||
It is called automatically when installing via
|
||||
`openpype.pipeline.install_host(openpype.hosts.fusion.api)`
|
||||
|
||||
"""
|
||||
# Remove all handlers associated with the root logger object, because
|
||||
# that one always logs as "warnings" incorrectly.
|
||||
for handler in logging.root.handlers[:]:
|
||||
logging.root.removeHandler(handler)
|
||||
See the Maya equivalent for inspiration on how to implement this.
|
||||
|
||||
# Attach default logging handler that prints to active comp
|
||||
logger = logging.getLogger()
|
||||
formatter = logging.Formatter(fmt="%(message)s\n")
|
||||
handler = FusionLogHandler()
|
||||
handler.setFormatter(formatter)
|
||||
logger.addHandler(handler)
|
||||
logger.setLevel(logging.DEBUG)
|
||||
"""
|
||||
# Remove all handlers associated with the root logger object, because
|
||||
# that one always logs as "warnings" incorrectly.
|
||||
for handler in logging.root.handlers[:]:
|
||||
logging.root.removeHandler(handler)
|
||||
|
||||
pyblish.api.register_host("fusion")
|
||||
pyblish.api.register_plugin_path(PUBLISH_PATH)
|
||||
log.info("Registering Fusion plug-ins..")
|
||||
# Attach default logging handler that prints to active comp
|
||||
logger = logging.getLogger()
|
||||
formatter = logging.Formatter(fmt="%(message)s\n")
|
||||
handler = FusionLogHandler()
|
||||
handler.setFormatter(formatter)
|
||||
logger.addHandler(handler)
|
||||
logger.setLevel(logging.DEBUG)
|
||||
|
||||
register_loader_plugin_path(LOAD_PATH)
|
||||
register_creator_plugin_path(CREATE_PATH)
|
||||
register_inventory_action_path(INVENTORY_PATH)
|
||||
pyblish.api.register_host("fusion")
|
||||
pyblish.api.register_plugin_path(PUBLISH_PATH)
|
||||
log.info("Registering Fusion plug-ins..")
|
||||
|
||||
pyblish.api.register_callback(
|
||||
"instanceToggled", on_pyblish_instance_toggled
|
||||
)
|
||||
register_loader_plugin_path(LOAD_PATH)
|
||||
register_creator_plugin_path(CREATE_PATH)
|
||||
register_inventory_action_path(INVENTORY_PATH)
|
||||
|
||||
# Register events
|
||||
register_event_callback("open", on_after_open)
|
||||
register_event_callback("save", on_save)
|
||||
register_event_callback("new", on_new)
|
||||
# Register events
|
||||
register_event_callback("open", on_after_open)
|
||||
register_event_callback("save", on_save)
|
||||
register_event_callback("new", on_new)
|
||||
|
||||
# region workfile io api
|
||||
def has_unsaved_changes(self):
|
||||
comp = get_current_comp()
|
||||
return comp.GetAttrs()["COMPB_Modified"]
|
||||
|
||||
def uninstall():
|
||||
"""Uninstall all that was installed
|
||||
def get_workfile_extensions(self):
|
||||
return [".comp"]
|
||||
|
||||
This is where you undo everything that was done in `install()`.
|
||||
That means, removing menus, deregistering families and data
|
||||
and everything. It should be as though `install()` was never run,
|
||||
because odds are calling this function means the user is interested
|
||||
in re-installing shortly afterwards. If, for example, he has been
|
||||
modifying the menu or registered families.
|
||||
def save_workfile(self, dst_path=None):
|
||||
comp = get_current_comp()
|
||||
comp.Save(dst_path)
|
||||
|
||||
"""
|
||||
pyblish.api.deregister_host("fusion")
|
||||
pyblish.api.deregister_plugin_path(PUBLISH_PATH)
|
||||
log.info("Deregistering Fusion plug-ins..")
|
||||
def open_workfile(self, filepath):
|
||||
# Hack to get fusion, see
|
||||
# openpype.hosts.fusion.api.pipeline.get_current_comp()
|
||||
fusion = getattr(sys.modules["__main__"], "fusion", None)
|
||||
|
||||
deregister_loader_plugin_path(LOAD_PATH)
|
||||
deregister_creator_plugin_path(CREATE_PATH)
|
||||
deregister_inventory_action_path(INVENTORY_PATH)
|
||||
return fusion.LoadComp(filepath)
|
||||
|
||||
pyblish.api.deregister_callback(
|
||||
"instanceToggled", on_pyblish_instance_toggled
|
||||
)
|
||||
def get_current_workfile(self):
|
||||
comp = get_current_comp()
|
||||
current_filepath = comp.GetAttrs()["COMPS_FileName"]
|
||||
if not current_filepath:
|
||||
return None
|
||||
|
||||
return current_filepath
|
||||
|
||||
def on_pyblish_instance_toggled(instance, old_value, new_value):
|
||||
"""Toggle saver tool passthrough states on instance toggles."""
|
||||
comp = instance.context.data.get("currentComp")
|
||||
if not comp:
|
||||
return
|
||||
def work_root(self, session):
|
||||
work_dir = session["AVALON_WORKDIR"]
|
||||
scene_dir = session.get("AVALON_SCENEDIR")
|
||||
if scene_dir:
|
||||
return os.path.join(work_dir, scene_dir)
|
||||
else:
|
||||
return work_dir
|
||||
# endregion
|
||||
|
||||
savers = [tool for tool in instance if
|
||||
getattr(tool, "ID", None) == "Saver"]
|
||||
if not savers:
|
||||
return
|
||||
@contextlib.contextmanager
|
||||
def maintained_selection(self):
|
||||
from .lib import maintained_selection
|
||||
return maintained_selection()
|
||||
|
||||
# Whether instances should be passthrough based on new value
|
||||
passthrough = not new_value
|
||||
with comp_lock_and_undo_chunk(comp,
|
||||
undo_queue_name="Change instance "
|
||||
"active state"):
|
||||
for tool in savers:
|
||||
attrs = tool.GetAttrs()
|
||||
current = attrs["TOOLB_PassThrough"]
|
||||
if current != passthrough:
|
||||
tool.SetAttrs({"TOOLB_PassThrough": passthrough})
|
||||
def get_containers(self):
|
||||
return ls()
|
||||
|
||||
def update_context_data(self, data, changes):
|
||||
comp = get_current_comp()
|
||||
comp.SetData("openpype", data)
|
||||
|
||||
def get_context_data(self):
|
||||
comp = get_current_comp()
|
||||
return comp.GetData("openpype") or {}
|
||||
|
||||
|
||||
def on_new(event):
|
||||
|
|
@ -283,9 +287,51 @@ def parse_container(tool):
|
|||
return container
|
||||
|
||||
|
||||
# TODO: Function below is currently unused prototypes
|
||||
def list_instances(creator_id=None):
|
||||
"""Return created instances in current workfile which will be published.
|
||||
Returns:
|
||||
(list) of dictionaries matching instances format
|
||||
"""
|
||||
|
||||
comp = get_current_comp()
|
||||
tools = comp.GetToolList(False).values()
|
||||
|
||||
instance_signature = {
|
||||
"id": "pyblish.avalon.instance",
|
||||
"identifier": creator_id
|
||||
}
|
||||
instances = []
|
||||
for tool in tools:
|
||||
|
||||
data = tool.GetData('openpype')
|
||||
if not isinstance(data, dict):
|
||||
continue
|
||||
|
||||
if data.get("id") != instance_signature["id"]:
|
||||
continue
|
||||
|
||||
if creator_id and data.get("identifier") != creator_id:
|
||||
continue
|
||||
|
||||
instances.append(tool)
|
||||
|
||||
return instances
|
||||
|
||||
|
||||
# TODO: Function below is currently unused prototypes
|
||||
def remove_instance(instance):
|
||||
"""Remove instance from current workfile.
|
||||
|
||||
Args:
|
||||
instance (dict): instance representation from subsetmanager model
|
||||
"""
|
||||
# Assume instance is a Fusion tool directly
|
||||
instance["tool"].Delete()
|
||||
|
||||
|
||||
class FusionEventThread(QtCore.QThread):
|
||||
"""QThread which will periodically ping Fusion app for any events.
|
||||
|
||||
The fusion.UIManager must be set up to be notified of events before they'll
|
||||
be reported by this thread, for example:
|
||||
fusion.UIManager.AddNotify("Comp_Save", None)
|
||||
|
|
|
|||
|
|
@ -1,45 +0,0 @@
|
|||
"""Host API required Work Files tool"""
|
||||
import sys
|
||||
import os
|
||||
|
||||
from .lib import get_current_comp
|
||||
|
||||
|
||||
def file_extensions():
|
||||
return [".comp"]
|
||||
|
||||
|
||||
def has_unsaved_changes():
|
||||
comp = get_current_comp()
|
||||
return comp.GetAttrs()["COMPB_Modified"]
|
||||
|
||||
|
||||
def save_file(filepath):
|
||||
comp = get_current_comp()
|
||||
comp.Save(filepath)
|
||||
|
||||
|
||||
def open_file(filepath):
|
||||
# Hack to get fusion, see
|
||||
# openpype.hosts.fusion.api.pipeline.get_current_comp()
|
||||
fusion = getattr(sys.modules["__main__"], "fusion", None)
|
||||
|
||||
return fusion.LoadComp(filepath)
|
||||
|
||||
|
||||
def current_file():
|
||||
comp = get_current_comp()
|
||||
current_filepath = comp.GetAttrs()["COMPS_FileName"]
|
||||
if not current_filepath:
|
||||
return None
|
||||
|
||||
return current_filepath
|
||||
|
||||
|
||||
def work_root(session):
|
||||
work_dir = session["AVALON_WORKDIR"]
|
||||
scene_dir = session.get("AVALON_SCENEDIR")
|
||||
if scene_dir:
|
||||
return os.path.join(work_dir, scene_dir)
|
||||
else:
|
||||
return work_dir
|
||||
|
|
@ -13,11 +13,11 @@ def main(env):
|
|||
# However the contents of that folder can conflict with Qt library dlls
|
||||
# so we make sure to move out of it to avoid DLL Load Failed errors.
|
||||
os.chdir("..")
|
||||
from openpype.hosts.fusion import api
|
||||
from openpype.hosts.fusion.api import FusionHost
|
||||
from openpype.hosts.fusion.api import menu
|
||||
|
||||
# activate resolve from pype
|
||||
install_host(api)
|
||||
install_host(FusionHost())
|
||||
|
||||
log = Logger.get_logger(__name__)
|
||||
log.info(f"Registered host: {registered_host()}")
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
import os
|
||||
import platform
|
||||
from openpype.lib import PreLaunchHook
|
||||
|
||||
from openpype.lib import PreLaunchHook, ApplicationLaunchFailed
|
||||
from openpype.pipeline.colorspace import get_imageio_config
|
||||
from openpype.pipeline.template_data import get_template_data_with_names
|
||||
|
||||
|
||||
class FusionPreLaunchOCIO(PreLaunchHook):
|
||||
|
|
@ -11,24 +11,22 @@ class FusionPreLaunchOCIO(PreLaunchHook):
|
|||
def execute(self):
|
||||
"""Hook entry method."""
|
||||
|
||||
# get image io
|
||||
project_settings = self.data["project_settings"]
|
||||
template_data = get_template_data_with_names(
|
||||
project_name=self.data["project_name"],
|
||||
asset_name=self.data["asset_name"],
|
||||
task_name=self.data["task_name"],
|
||||
host_name=self.host_name,
|
||||
system_settings=self.data["system_settings"]
|
||||
)
|
||||
|
||||
# make sure anatomy settings are having flame key
|
||||
imageio_fusion = project_settings["fusion"]["imageio"]
|
||||
|
||||
ocio = imageio_fusion.get("ocio")
|
||||
enabled = ocio.get("enabled", False)
|
||||
if not enabled:
|
||||
return
|
||||
|
||||
platform_key = platform.system().lower()
|
||||
ocio_path = ocio["configFilePath"][platform_key]
|
||||
if not ocio_path:
|
||||
raise ApplicationLaunchFailed(
|
||||
"Fusion OCIO is enabled in project settings but no OCIO config"
|
||||
f"path is set for your current platform: {platform_key}"
|
||||
)
|
||||
config_data = get_imageio_config(
|
||||
project_name=self.data["project_name"],
|
||||
host_name=self.host_name,
|
||||
project_settings=self.data["project_settings"],
|
||||
anatomy_data=template_data,
|
||||
anatomy=self.data["anatomy"]
|
||||
)
|
||||
ocio_path = config_data["path"]
|
||||
|
||||
self.log.info(f"Setting OCIO config path: {ocio_path}")
|
||||
self.launch_context.env["OCIO"] = os.pathsep.join(ocio_path)
|
||||
self.launch_context.env["OCIO"] = ocio_path
|
||||
|
|
|
|||
|
|
@ -36,7 +36,7 @@ class FusionPrelaunch(PreLaunchHook):
|
|||
"Make sure the environment in fusion settings has "
|
||||
"'FUSION_PYTHON3_HOME' set correctly and make sure "
|
||||
"Python 3 is installed in the given path."
|
||||
f"\n\nPYTHON36: {fusion_python3_home}"
|
||||
f"\n\nPYTHON PATH: {fusion_python3_home}"
|
||||
)
|
||||
|
||||
self.log.info(f"Setting {py3_var}: '{py3_dir}'...")
|
||||
|
|
|
|||
|
|
@ -1,49 +0,0 @@
|
|||
import os
|
||||
|
||||
from openpype.pipeline import (
|
||||
LegacyCreator,
|
||||
legacy_io
|
||||
)
|
||||
from openpype.hosts.fusion.api import (
|
||||
get_current_comp,
|
||||
comp_lock_and_undo_chunk
|
||||
)
|
||||
|
||||
|
||||
class CreateOpenEXRSaver(LegacyCreator):
|
||||
|
||||
name = "openexrDefault"
|
||||
label = "Create OpenEXR Saver"
|
||||
hosts = ["fusion"]
|
||||
family = "render"
|
||||
defaults = ["Main"]
|
||||
|
||||
def process(self):
|
||||
|
||||
file_format = "OpenEXRFormat"
|
||||
|
||||
comp = get_current_comp()
|
||||
|
||||
workdir = os.path.normpath(legacy_io.Session["AVALON_WORKDIR"])
|
||||
|
||||
filename = "{}..exr".format(self.name)
|
||||
filepath = os.path.join(workdir, "render", filename)
|
||||
|
||||
with comp_lock_and_undo_chunk(comp):
|
||||
args = (-32768, -32768) # Magical position numbers
|
||||
saver = comp.AddTool("Saver", *args)
|
||||
saver.SetAttrs({"TOOLS_Name": self.name})
|
||||
|
||||
# Setting input attributes is different from basic attributes
|
||||
# Not confused with "MainInputAttributes" which
|
||||
saver["Clip"] = filepath
|
||||
saver["OutputFormat"] = file_format
|
||||
|
||||
# Check file format settings are available
|
||||
if saver[file_format] is None:
|
||||
raise RuntimeError("File format is not set to {}, "
|
||||
"this is a bug".format(file_format))
|
||||
|
||||
# Set file format attributes
|
||||
saver[file_format]["Depth"] = 1 # int8 | int16 | float32 | other
|
||||
saver[file_format]["SaveAlpha"] = 0
|
||||
215
openpype/hosts/fusion/plugins/create/create_saver.py
Normal file
215
openpype/hosts/fusion/plugins/create/create_saver.py
Normal file
|
|
@ -0,0 +1,215 @@
|
|||
import os
|
||||
|
||||
import qtawesome
|
||||
|
||||
from openpype.hosts.fusion.api import (
|
||||
get_current_comp,
|
||||
comp_lock_and_undo_chunk
|
||||
)
|
||||
|
||||
from openpype.lib import BoolDef
|
||||
from openpype.pipeline import (
|
||||
legacy_io,
|
||||
Creator,
|
||||
CreatedInstance
|
||||
)
|
||||
from openpype.client import get_asset_by_name
|
||||
|
||||
|
||||
class CreateSaver(Creator):
|
||||
identifier = "io.openpype.creators.fusion.saver"
|
||||
name = "saver"
|
||||
label = "Saver"
|
||||
family = "render"
|
||||
default_variants = ["Main"]
|
||||
|
||||
description = "Fusion Saver to generate image sequence"
|
||||
|
||||
def create(self, subset_name, instance_data, pre_create_data):
|
||||
|
||||
# TODO: Add pre_create attributes to choose file format?
|
||||
file_format = "OpenEXRFormat"
|
||||
|
||||
comp = get_current_comp()
|
||||
with comp_lock_and_undo_chunk(comp):
|
||||
args = (-32768, -32768) # Magical position numbers
|
||||
saver = comp.AddTool("Saver", *args)
|
||||
|
||||
instance_data["subset"] = subset_name
|
||||
self._update_tool_with_data(saver, data=instance_data)
|
||||
|
||||
saver["OutputFormat"] = file_format
|
||||
|
||||
# Check file format settings are available
|
||||
if saver[file_format] is None:
|
||||
raise RuntimeError(
|
||||
f"File format is not set to {file_format}, this is a bug"
|
||||
)
|
||||
|
||||
# Set file format attributes
|
||||
saver[file_format]["Depth"] = 0 # Auto | float16 | float32
|
||||
# TODO Is this needed?
|
||||
saver[file_format]["SaveAlpha"] = 1
|
||||
|
||||
self._imprint(saver, instance_data)
|
||||
|
||||
# Register the CreatedInstance
|
||||
instance = CreatedInstance(
|
||||
family=self.family,
|
||||
subset_name=subset_name,
|
||||
data=instance_data,
|
||||
creator=self)
|
||||
|
||||
# Insert the transient data
|
||||
instance.transient_data["tool"] = saver
|
||||
|
||||
self._add_instance_to_context(instance)
|
||||
|
||||
return instance
|
||||
|
||||
def collect_instances(self):
|
||||
|
||||
comp = get_current_comp()
|
||||
tools = comp.GetToolList(False, "Saver").values()
|
||||
for tool in tools:
|
||||
|
||||
data = self.get_managed_tool_data(tool)
|
||||
if not data:
|
||||
data = self._collect_unmanaged_saver(tool)
|
||||
|
||||
# Add instance
|
||||
created_instance = CreatedInstance.from_existing(data, self)
|
||||
|
||||
# Collect transient data
|
||||
created_instance.transient_data["tool"] = tool
|
||||
|
||||
self._add_instance_to_context(created_instance)
|
||||
|
||||
def get_icon(self):
|
||||
return qtawesome.icon("fa.eye", color="white")
|
||||
|
||||
def update_instances(self, update_list):
|
||||
for created_inst, _changes in update_list:
|
||||
|
||||
new_data = created_inst.data_to_store()
|
||||
tool = created_inst.transient_data["tool"]
|
||||
self._update_tool_with_data(tool, new_data)
|
||||
self._imprint(tool, new_data)
|
||||
|
||||
def remove_instances(self, instances):
|
||||
for instance in instances:
|
||||
# Remove the tool from the scene
|
||||
|
||||
tool = instance.transient_data["tool"]
|
||||
if tool:
|
||||
tool.Delete()
|
||||
|
||||
# Remove the collected CreatedInstance to remove from UI directly
|
||||
self._remove_instance_from_context(instance)
|
||||
|
||||
def _imprint(self, tool, data):
|
||||
# Save all data in a "openpype.{key}" = value data
|
||||
|
||||
active = data.pop("active", None)
|
||||
if active is not None:
|
||||
# Use active value to set the passthrough state
|
||||
tool.SetAttrs({"TOOLB_PassThrough": not active})
|
||||
|
||||
for key, value in data.items():
|
||||
tool.SetData(f"openpype.{key}", value)
|
||||
|
||||
def _update_tool_with_data(self, tool, data):
|
||||
"""Update tool node name and output path based on subset data"""
|
||||
if "subset" not in data:
|
||||
return
|
||||
|
||||
original_subset = tool.GetData("openpype.subset")
|
||||
subset = data["subset"]
|
||||
if original_subset != subset:
|
||||
# Subset change detected
|
||||
# Update output filepath
|
||||
workdir = os.path.normpath(legacy_io.Session["AVALON_WORKDIR"])
|
||||
filename = f"{subset}..exr"
|
||||
filepath = os.path.join(workdir, "render", subset, filename)
|
||||
tool["Clip"] = filepath
|
||||
|
||||
# Rename tool
|
||||
if tool.Name != subset:
|
||||
print(f"Renaming {tool.Name} -> {subset}")
|
||||
tool.SetAttrs({"TOOLS_Name": subset})
|
||||
|
||||
def _collect_unmanaged_saver(self, tool):
|
||||
|
||||
# TODO: this should not be done this way - this should actually
|
||||
# get the data as stored on the tool explicitly (however)
|
||||
# that would disallow any 'regular saver' to be collected
|
||||
# unless the instance data is stored on it to begin with
|
||||
|
||||
print("Collecting unmanaged saver..")
|
||||
comp = tool.Comp()
|
||||
|
||||
# Allow regular non-managed savers to also be picked up
|
||||
project = legacy_io.Session["AVALON_PROJECT"]
|
||||
asset = legacy_io.Session["AVALON_ASSET"]
|
||||
task = legacy_io.Session["AVALON_TASK"]
|
||||
|
||||
asset_doc = get_asset_by_name(project_name=project,
|
||||
asset_name=asset)
|
||||
|
||||
path = tool["Clip"][comp.TIME_UNDEFINED]
|
||||
fname = os.path.basename(path)
|
||||
fname, _ext = os.path.splitext(fname)
|
||||
variant = fname.rstrip(".")
|
||||
subset = self.get_subset_name(
|
||||
variant=variant,
|
||||
task_name=task,
|
||||
asset_doc=asset_doc,
|
||||
project_name=project,
|
||||
)
|
||||
|
||||
attrs = tool.GetAttrs()
|
||||
passthrough = attrs["TOOLB_PassThrough"]
|
||||
return {
|
||||
# Required data
|
||||
"project": project,
|
||||
"asset": asset,
|
||||
"subset": subset,
|
||||
"task": task,
|
||||
"variant": variant,
|
||||
"active": not passthrough,
|
||||
"family": self.family,
|
||||
|
||||
# Unique identifier for instance and this creator
|
||||
"id": "pyblish.avalon.instance",
|
||||
"creator_identifier": self.identifier
|
||||
}
|
||||
|
||||
def get_managed_tool_data(self, tool):
|
||||
"""Return data of the tool if it matches creator identifier"""
|
||||
data = tool.GetData('openpype')
|
||||
if not isinstance(data, dict):
|
||||
return
|
||||
|
||||
required = {
|
||||
"id": "pyblish.avalon.instance",
|
||||
"creator_identifier": self.identifier
|
||||
}
|
||||
for key, value in required.items():
|
||||
if key not in data or data[key] != value:
|
||||
return
|
||||
|
||||
# Get active state from the actual tool state
|
||||
attrs = tool.GetAttrs()
|
||||
passthrough = attrs["TOOLB_PassThrough"]
|
||||
data["active"] = not passthrough
|
||||
|
||||
return data
|
||||
|
||||
def get_instance_attr_defs(self):
|
||||
return [
|
||||
BoolDef(
|
||||
"review",
|
||||
default=True,
|
||||
label="Review"
|
||||
)
|
||||
]
|
||||
109
openpype/hosts/fusion/plugins/create/create_workfile.py
Normal file
109
openpype/hosts/fusion/plugins/create/create_workfile.py
Normal file
|
|
@ -0,0 +1,109 @@
|
|||
import qtawesome
|
||||
|
||||
from openpype.hosts.fusion.api import (
|
||||
get_current_comp
|
||||
)
|
||||
from openpype.client import get_asset_by_name
|
||||
from openpype.pipeline import (
|
||||
AutoCreator,
|
||||
CreatedInstance,
|
||||
legacy_io,
|
||||
)
|
||||
|
||||
|
||||
class FusionWorkfileCreator(AutoCreator):
|
||||
identifier = "workfile"
|
||||
family = "workfile"
|
||||
label = "Workfile"
|
||||
|
||||
default_variant = "Main"
|
||||
|
||||
create_allow_context_change = False
|
||||
|
||||
data_key = "openpype_workfile"
|
||||
|
||||
def collect_instances(self):
|
||||
|
||||
comp = get_current_comp()
|
||||
data = comp.GetData(self.data_key)
|
||||
if not data:
|
||||
return
|
||||
|
||||
instance = CreatedInstance(
|
||||
family=self.family,
|
||||
subset_name=data["subset"],
|
||||
data=data,
|
||||
creator=self
|
||||
)
|
||||
instance.transient_data["comp"] = comp
|
||||
|
||||
self._add_instance_to_context(instance)
|
||||
|
||||
def update_instances(self, update_list):
|
||||
for created_inst, _changes in update_list:
|
||||
comp = created_inst.transient_data["comp"]
|
||||
if not hasattr(comp, "SetData"):
|
||||
# Comp is not alive anymore, likely closed by the user
|
||||
self.log.error("Workfile comp not found for existing instance."
|
||||
" Comp might have been closed in the meantime.")
|
||||
continue
|
||||
|
||||
# Imprint data into the comp
|
||||
data = created_inst.data_to_store()
|
||||
comp.SetData(self.data_key, data)
|
||||
|
||||
def create(self, options=None):
|
||||
|
||||
comp = get_current_comp()
|
||||
if not comp:
|
||||
self.log.error("Unable to find current comp")
|
||||
return
|
||||
|
||||
existing_instance = None
|
||||
for instance in self.create_context.instances:
|
||||
if instance.family == self.family:
|
||||
existing_instance = instance
|
||||
break
|
||||
|
||||
project_name = legacy_io.Session["AVALON_PROJECT"]
|
||||
asset_name = legacy_io.Session["AVALON_ASSET"]
|
||||
task_name = legacy_io.Session["AVALON_TASK"]
|
||||
host_name = legacy_io.Session["AVALON_APP"]
|
||||
|
||||
if existing_instance is None:
|
||||
asset_doc = get_asset_by_name(project_name, asset_name)
|
||||
subset_name = self.get_subset_name(
|
||||
self.default_variant, task_name, asset_doc,
|
||||
project_name, host_name
|
||||
)
|
||||
data = {
|
||||
"asset": asset_name,
|
||||
"task": task_name,
|
||||
"variant": self.default_variant
|
||||
}
|
||||
data.update(self.get_dynamic_data(
|
||||
self.default_variant, task_name, asset_doc,
|
||||
project_name, host_name, None
|
||||
))
|
||||
|
||||
new_instance = CreatedInstance(
|
||||
self.family, subset_name, data, self
|
||||
)
|
||||
new_instance.transient_data["comp"] = comp
|
||||
self._add_instance_to_context(new_instance)
|
||||
|
||||
elif (
|
||||
existing_instance["asset"] != asset_name
|
||||
or existing_instance["task"] != task_name
|
||||
):
|
||||
asset_doc = get_asset_by_name(project_name, asset_name)
|
||||
subset_name = self.get_subset_name(
|
||||
self.default_variant, task_name, asset_doc,
|
||||
project_name, host_name
|
||||
)
|
||||
existing_instance["asset"] = asset_name
|
||||
existing_instance["task"] = task_name
|
||||
existing_instance["subset"] = subset_name
|
||||
|
||||
def get_icon(self):
|
||||
return qtawesome.icon("fa.file-o", color="white")
|
||||
|
|
@ -15,6 +15,7 @@ class FusionSetFrameRangeLoader(load.LoaderPlugin):
|
|||
"pointcache",
|
||||
"render"]
|
||||
representations = ["*"]
|
||||
extensions = {"*"}
|
||||
|
||||
label = "Set frame range"
|
||||
order = 11
|
||||
|
|
|
|||
|
|
@ -13,7 +13,8 @@ class FusionLoadAlembicMesh(load.LoaderPlugin):
|
|||
"""Load Alembic mesh into Fusion"""
|
||||
|
||||
families = ["pointcache", "model"]
|
||||
representations = ["abc"]
|
||||
representations = ["*"]
|
||||
extensions = {"abc"}
|
||||
|
||||
label = "Load alembic mesh"
|
||||
order = -10
|
||||
|
|
|
|||
|
|
@ -14,7 +14,8 @@ class FusionLoadFBXMesh(load.LoaderPlugin):
|
|||
"""Load FBX mesh into Fusion"""
|
||||
|
||||
families = ["*"]
|
||||
representations = ["fbx"]
|
||||
representations = ["*"]
|
||||
extensions = {"fbx"}
|
||||
|
||||
label = "Load FBX mesh"
|
||||
order = -10
|
||||
|
|
|
|||
|
|
@ -1,17 +1,19 @@
|
|||
import os
|
||||
import contextlib
|
||||
|
||||
from openpype.client import get_version_by_id
|
||||
from openpype.pipeline import (
|
||||
load,
|
||||
legacy_io,
|
||||
get_representation_path,
|
||||
import openpype.pipeline.load as load
|
||||
from openpype.pipeline.load import (
|
||||
get_representation_context,
|
||||
get_representation_path_from_context
|
||||
)
|
||||
from openpype.hosts.fusion.api import (
|
||||
imprint_container,
|
||||
get_current_comp,
|
||||
comp_lock_and_undo_chunk
|
||||
)
|
||||
from openpype.lib.transcoding import (
|
||||
IMAGE_EXTENSIONS,
|
||||
VIDEO_EXTENSIONS
|
||||
)
|
||||
|
||||
comp = get_current_comp()
|
||||
|
||||
|
|
@ -129,6 +131,9 @@ class FusionLoadSequence(load.LoaderPlugin):
|
|||
|
||||
families = ["imagesequence", "review", "render", "plate"]
|
||||
representations = ["*"]
|
||||
extensions = set(
|
||||
ext.lstrip(".") for ext in IMAGE_EXTENSIONS.union(VIDEO_EXTENSIONS)
|
||||
)
|
||||
|
||||
label = "Load sequence"
|
||||
order = -10
|
||||
|
|
@ -141,7 +146,7 @@ class FusionLoadSequence(load.LoaderPlugin):
|
|||
namespace = context['asset']['name']
|
||||
|
||||
# Use the first file for now
|
||||
path = self._get_first_image(os.path.dirname(self.fname))
|
||||
path = get_representation_path_from_context(context)
|
||||
|
||||
# Create the Loader with the filename path set
|
||||
comp = get_current_comp()
|
||||
|
|
@ -210,13 +215,11 @@ class FusionLoadSequence(load.LoaderPlugin):
|
|||
assert tool.ID == "Loader", "Must be Loader"
|
||||
comp = tool.Comp()
|
||||
|
||||
root = os.path.dirname(get_representation_path(representation))
|
||||
path = self._get_first_image(root)
|
||||
context = get_representation_context(representation)
|
||||
path = get_representation_path_from_context(context)
|
||||
|
||||
# Get start frame from version data
|
||||
project_name = legacy_io.active_project()
|
||||
version = get_version_by_id(project_name, representation["parent"])
|
||||
start = self._get_start(version, tool)
|
||||
start = self._get_start(context["version"], tool)
|
||||
|
||||
with comp_lock_and_undo_chunk(comp, "Update Loader"):
|
||||
|
||||
|
|
@ -249,11 +252,6 @@ class FusionLoadSequence(load.LoaderPlugin):
|
|||
with comp_lock_and_undo_chunk(comp, "Remove Loader"):
|
||||
tool.Delete()
|
||||
|
||||
def _get_first_image(self, root):
|
||||
"""Get first file in representation root"""
|
||||
files = sorted(os.listdir(root))
|
||||
return os.path.join(root, files[0])
|
||||
|
||||
def _get_start(self, version_doc, tool):
|
||||
"""Return real start frame of published files (incl. handles)"""
|
||||
data = version_doc["data"]
|
||||
|
|
|
|||
|
|
@ -1,5 +1,3 @@
|
|||
import os
|
||||
|
||||
import pyblish.api
|
||||
|
||||
from openpype.hosts.fusion.api import get_current_comp
|
||||
|
|
|
|||
|
|
@ -0,0 +1,43 @@
|
|||
import pyblish.api
|
||||
|
||||
|
||||
def get_comp_render_range(comp):
|
||||
"""Return comp's start-end render range and global start-end range."""
|
||||
comp_attrs = comp.GetAttrs()
|
||||
start = comp_attrs["COMPN_RenderStart"]
|
||||
end = comp_attrs["COMPN_RenderEnd"]
|
||||
global_start = comp_attrs["COMPN_GlobalStart"]
|
||||
global_end = comp_attrs["COMPN_GlobalEnd"]
|
||||
|
||||
# Whenever render ranges are undefined fall back
|
||||
# to the comp's global start and end
|
||||
if start == -1000000000:
|
||||
start = global_start
|
||||
if end == -1000000000:
|
||||
end = global_end
|
||||
|
||||
return start, end, global_start, global_end
|
||||
|
||||
|
||||
class CollectFusionCompFrameRanges(pyblish.api.ContextPlugin):
|
||||
"""Collect current comp"""
|
||||
|
||||
# We run this after CollectorOrder - 0.1 otherwise it gets
|
||||
# overridden by global plug-in `CollectContextEntities`
|
||||
order = pyblish.api.CollectorOrder - 0.05
|
||||
label = "Collect Comp Frame Ranges"
|
||||
hosts = ["fusion"]
|
||||
|
||||
def process(self, context):
|
||||
"""Collect all image sequence tools"""
|
||||
|
||||
comp = context.data["currentComp"]
|
||||
|
||||
# Store comp render ranges
|
||||
start, end, global_start, global_end = get_comp_render_range(comp)
|
||||
context.data["frameStart"] = int(start)
|
||||
context.data["frameEnd"] = int(end)
|
||||
context.data["frameStartHandle"] = int(global_start)
|
||||
context.data["frameEndHandle"] = int(global_end)
|
||||
context.data["handleStart"] = int(start) - int(global_start)
|
||||
context.data["handleEnd"] = int(global_end) - int(end)
|
||||
|
|
@ -1,5 +1,3 @@
|
|||
from bson.objectid import ObjectId
|
||||
|
||||
import pyblish.api
|
||||
|
||||
from openpype.pipeline import registered_host
|
||||
|
|
@ -97,10 +95,15 @@ class CollectUpstreamInputs(pyblish.api.InstancePlugin):
|
|||
label = "Collect Inputs"
|
||||
order = pyblish.api.CollectorOrder + 0.2
|
||||
hosts = ["fusion"]
|
||||
families = ["render"]
|
||||
|
||||
def process(self, instance):
|
||||
|
||||
# Get all upstream and include itself
|
||||
if not any(instance[:]):
|
||||
self.log.debug("No tool found in instance, skipping..")
|
||||
return
|
||||
|
||||
tool = instance[0]
|
||||
nodes = list(iter_upstream(tool))
|
||||
nodes.append(tool)
|
||||
|
|
@ -108,7 +111,6 @@ class CollectUpstreamInputs(pyblish.api.InstancePlugin):
|
|||
# Collect containers for the given set of nodes
|
||||
containers = collect_input_containers(nodes)
|
||||
|
||||
inputs = [ObjectId(c["representation"]) for c in containers]
|
||||
inputs = [c["representation"] for c in containers]
|
||||
instance.data["inputRepresentations"] = inputs
|
||||
|
||||
self.log.info("Collected inputs: %s" % inputs)
|
||||
|
|
|
|||
|
|
@ -3,25 +3,7 @@ import os
|
|||
import pyblish.api
|
||||
|
||||
|
||||
def get_comp_render_range(comp):
|
||||
"""Return comp's start-end render range and global start-end range."""
|
||||
comp_attrs = comp.GetAttrs()
|
||||
start = comp_attrs["COMPN_RenderStart"]
|
||||
end = comp_attrs["COMPN_RenderEnd"]
|
||||
global_start = comp_attrs["COMPN_GlobalStart"]
|
||||
global_end = comp_attrs["COMPN_GlobalEnd"]
|
||||
|
||||
# Whenever render ranges are undefined fall back
|
||||
# to the comp's global start and end
|
||||
if start == -1000000000:
|
||||
start = global_start
|
||||
if end == -1000000000:
|
||||
end = global_end
|
||||
|
||||
return start, end, global_start, global_end
|
||||
|
||||
|
||||
class CollectInstances(pyblish.api.ContextPlugin):
|
||||
class CollectInstanceData(pyblish.api.InstancePlugin):
|
||||
"""Collect Fusion saver instances
|
||||
|
||||
This additionally stores the Comp start and end render range in the
|
||||
|
|
@ -30,76 +12,68 @@ class CollectInstances(pyblish.api.ContextPlugin):
|
|||
"""
|
||||
|
||||
order = pyblish.api.CollectorOrder
|
||||
label = "Collect Instances"
|
||||
label = "Collect Instances Data"
|
||||
hosts = ["fusion"]
|
||||
|
||||
def process(self, context):
|
||||
def process(self, instance):
|
||||
"""Collect all image sequence tools"""
|
||||
|
||||
from openpype.hosts.fusion.api.lib import get_frame_path
|
||||
context = instance.context
|
||||
|
||||
comp = context.data["currentComp"]
|
||||
# Include creator attributes directly as instance data
|
||||
creator_attributes = instance.data["creator_attributes"]
|
||||
instance.data.update(creator_attributes)
|
||||
|
||||
# Get all savers in the comp
|
||||
tools = comp.GetToolList(False).values()
|
||||
savers = [tool for tool in tools if tool.ID == "Saver"]
|
||||
# Include start and end render frame in label
|
||||
subset = instance.data["subset"]
|
||||
start = context.data["frameStart"]
|
||||
end = context.data["frameEnd"]
|
||||
label = "{subset} ({start}-{end})".format(subset=subset,
|
||||
start=int(start),
|
||||
end=int(end))
|
||||
instance.data.update({
|
||||
"label": label,
|
||||
|
||||
start, end, global_start, global_end = get_comp_render_range(comp)
|
||||
context.data["frameStart"] = int(start)
|
||||
context.data["frameEnd"] = int(end)
|
||||
context.data["frameStartHandle"] = int(global_start)
|
||||
context.data["frameEndHandle"] = int(global_end)
|
||||
# todo: Allow custom frame range per instance
|
||||
"frameStart": context.data["frameStart"],
|
||||
"frameEnd": context.data["frameEnd"],
|
||||
"frameStartHandle": context.data["frameStartHandle"],
|
||||
"frameEndHandle": context.data["frameStartHandle"],
|
||||
"handleStart": context.data["handleStart"],
|
||||
"handleEnd": context.data["handleEnd"],
|
||||
"fps": context.data["fps"],
|
||||
})
|
||||
|
||||
for tool in savers:
|
||||
# Add review family if the instance is marked as 'review'
|
||||
# This could be done through a 'review' Creator attribute.
|
||||
if instance.data.get("review", False):
|
||||
self.log.info("Adding review family..")
|
||||
instance.data["families"].append("review")
|
||||
|
||||
if instance.data["family"] == "render":
|
||||
# TODO: This should probably move into a collector of
|
||||
# its own for the "render" family
|
||||
from openpype.hosts.fusion.api.lib import get_frame_path
|
||||
comp = context.data["currentComp"]
|
||||
|
||||
# This is only the case for savers currently but not
|
||||
# for workfile instances. So we assume saver here.
|
||||
tool = instance.data["transientData"]["tool"]
|
||||
path = tool["Clip"][comp.TIME_UNDEFINED]
|
||||
|
||||
tool_attrs = tool.GetAttrs()
|
||||
active = not tool_attrs["TOOLB_PassThrough"]
|
||||
|
||||
if not path:
|
||||
self.log.warning("Skipping saver because it "
|
||||
"has no path set: {}".format(tool.Name))
|
||||
continue
|
||||
|
||||
filename = os.path.basename(path)
|
||||
head, padding, tail = get_frame_path(filename)
|
||||
ext = os.path.splitext(path)[1]
|
||||
assert tail == ext, ("Tail does not match %s" % ext)
|
||||
subset = head.rstrip("_. ") # subset is head of the filename
|
||||
|
||||
# Include start and end render frame in label
|
||||
label = "{subset} ({start}-{end})".format(subset=subset,
|
||||
start=int(start),
|
||||
end=int(end))
|
||||
|
||||
instance = context.create_instance(subset)
|
||||
instance.data.update({
|
||||
"asset": os.environ["AVALON_ASSET"], # todo: not a constant
|
||||
"subset": subset,
|
||||
"path": path,
|
||||
"outputDir": os.path.dirname(path),
|
||||
"ext": ext, # todo: should be redundant
|
||||
"label": label,
|
||||
"frameStart": context.data["frameStart"],
|
||||
"frameEnd": context.data["frameEnd"],
|
||||
"frameStartHandle": context.data["frameStartHandle"],
|
||||
"frameEndHandle": context.data["frameStartHandle"],
|
||||
"fps": context.data["fps"],
|
||||
"families": ["render", "review"],
|
||||
"family": "render",
|
||||
"active": active,
|
||||
"publish": active # backwards compatibility
|
||||
"ext": ext, # todo: should be redundant?
|
||||
|
||||
# Backwards compatibility: embed tool in instance.data
|
||||
"tool": tool
|
||||
})
|
||||
|
||||
# Add tool itself as member
|
||||
instance.append(tool)
|
||||
|
||||
self.log.info("Found: \"%s\" " % path)
|
||||
|
||||
# Sort/grouped by family (preserving local index)
|
||||
context[:] = sorted(context, key=self.sort_by_family)
|
||||
|
||||
return context
|
||||
|
||||
def sort_by_family(self, instance):
|
||||
"""Sort by family"""
|
||||
return instance.data.get("families", instance.data.get("family"))
|
||||
|
|
|
|||
26
openpype/hosts/fusion/plugins/publish/collect_workfile.py
Normal file
26
openpype/hosts/fusion/plugins/publish/collect_workfile.py
Normal file
|
|
@ -0,0 +1,26 @@
|
|||
import os
|
||||
|
||||
import pyblish.api
|
||||
|
||||
|
||||
class CollectFusionWorkfile(pyblish.api.InstancePlugin):
|
||||
"""Collect Fusion workfile representation."""
|
||||
|
||||
order = pyblish.api.CollectorOrder + 0.1
|
||||
label = "Collect Workfile"
|
||||
hosts = ["fusion"]
|
||||
families = ["workfile"]
|
||||
|
||||
def process(self, instance):
|
||||
|
||||
current_file = instance.context.data["currentFile"]
|
||||
|
||||
folder, file = os.path.split(current_file)
|
||||
filename, ext = os.path.splitext(file)
|
||||
|
||||
instance.data['representations'] = [{
|
||||
'name': ext.lstrip("."),
|
||||
'ext': ext.lstrip("."),
|
||||
'files': file,
|
||||
"stagingDir": folder,
|
||||
}]
|
||||
|
|
@ -11,7 +11,7 @@ class FusionIncrementCurrentFile(pyblish.api.ContextPlugin):
|
|||
label = "Increment current file"
|
||||
order = pyblish.api.IntegratorOrder + 9.0
|
||||
hosts = ["fusion"]
|
||||
families = ["render.farm"]
|
||||
families = ["workfile"]
|
||||
optional = True
|
||||
|
||||
def process(self, context):
|
||||
|
|
@ -1,11 +1,11 @@
|
|||
import os
|
||||
from pprint import pformat
|
||||
|
||||
import pyblish.api
|
||||
from openpype.pipeline import publish
|
||||
from openpype.hosts.fusion.api import comp_lock_and_undo_chunk
|
||||
|
||||
|
||||
class Fusionlocal(pyblish.api.InstancePlugin):
|
||||
class Fusionlocal(pyblish.api.InstancePlugin,
|
||||
publish.ColormanagedPyblishPluginMixin):
|
||||
"""Render the current Fusion composition locally.
|
||||
|
||||
Extract the result of savers by starting a comp render
|
||||
|
|
@ -19,55 +19,82 @@ class Fusionlocal(pyblish.api.InstancePlugin):
|
|||
families = ["render.local"]
|
||||
|
||||
def process(self, instance):
|
||||
|
||||
# This plug-in runs only once and thus assumes all instances
|
||||
# currently will render the same frame range
|
||||
context = instance.context
|
||||
key = "__hasRun{}".format(self.__class__.__name__)
|
||||
if context.data.get(key, False):
|
||||
return
|
||||
else:
|
||||
context.data[key] = True
|
||||
|
||||
current_comp = context.data["currentComp"]
|
||||
# Start render
|
||||
self.render_once(context)
|
||||
|
||||
# Log render status
|
||||
self.log.info(
|
||||
"Rendered '{nm}' for asset '{ast}' under the task '{tsk}'".format(
|
||||
nm=instance.data["name"],
|
||||
ast=instance.data["asset"],
|
||||
tsk=instance.data["task"],
|
||||
)
|
||||
)
|
||||
|
||||
frame_start = context.data["frameStartHandle"]
|
||||
frame_end = context.data["frameEndHandle"]
|
||||
path = instance.data["path"]
|
||||
output_dir = instance.data["outputDir"]
|
||||
|
||||
ext = os.path.splitext(os.path.basename(path))[-1]
|
||||
basename = os.path.basename(path)
|
||||
head, ext = os.path.splitext(basename)
|
||||
files = [
|
||||
f"{head}{str(frame).zfill(4)}{ext}"
|
||||
for frame in range(frame_start, frame_end + 1)
|
||||
]
|
||||
repre = {
|
||||
"name": ext[1:],
|
||||
"ext": ext[1:],
|
||||
"frameStart": f"%0{len(str(frame_end))}d" % frame_start,
|
||||
"files": files,
|
||||
"stagingDir": output_dir,
|
||||
}
|
||||
|
||||
self.log.info("Starting render")
|
||||
self.log.info("Start frame: {}".format(frame_start))
|
||||
self.log.info("End frame: {}".format(frame_end))
|
||||
|
||||
with comp_lock_and_undo_chunk(current_comp):
|
||||
result = current_comp.Render({
|
||||
"Start": frame_start,
|
||||
"End": frame_end,
|
||||
"Wait": True
|
||||
})
|
||||
self.set_representation_colorspace(
|
||||
representation=repre,
|
||||
context=context,
|
||||
)
|
||||
|
||||
if "representations" not in instance.data:
|
||||
instance.data["representations"] = []
|
||||
|
||||
collected_frames = os.listdir(output_dir)
|
||||
repre = {
|
||||
'name': ext[1:],
|
||||
'ext': ext[1:],
|
||||
'frameStart': "%0{}d".format(len(str(frame_end))) % frame_start,
|
||||
'files': collected_frames,
|
||||
"stagingDir": output_dir,
|
||||
}
|
||||
instance.data["representations"].append(repre)
|
||||
|
||||
# review representation
|
||||
repre_preview = repre.copy()
|
||||
repre_preview["name"] = repre_preview["ext"] = "mp4"
|
||||
repre_preview["tags"] = ["review", "preview", "ftrackreview", "delete"]
|
||||
instance.data["representations"].append(repre_preview)
|
||||
if instance.data.get("review", False):
|
||||
repre["tags"] = ["review", "ftrackreview"]
|
||||
|
||||
self.log.debug(f"_ instance.data: {pformat(instance.data)}")
|
||||
def render_once(self, context):
|
||||
"""Render context comp only once, even with more render instances"""
|
||||
|
||||
if not result:
|
||||
# This plug-in assumes all render nodes get rendered at the same time
|
||||
# to speed up the rendering. The check below makes sure that we only
|
||||
# execute the rendering once and not for each instance.
|
||||
key = f"__hasRun{self.__class__.__name__}"
|
||||
if key not in context.data:
|
||||
# We initialize as false to indicate it wasn't successful yet
|
||||
# so we can keep track of whether Fusion succeeded
|
||||
context.data[key] = False
|
||||
|
||||
current_comp = context.data["currentComp"]
|
||||
frame_start = context.data["frameStartHandle"]
|
||||
frame_end = context.data["frameEndHandle"]
|
||||
|
||||
self.log.info("Starting Fusion render")
|
||||
self.log.info(f"Start frame: {frame_start}")
|
||||
self.log.info(f"End frame: {frame_end}")
|
||||
|
||||
with comp_lock_and_undo_chunk(current_comp):
|
||||
result = current_comp.Render(
|
||||
{
|
||||
"Start": frame_start,
|
||||
"End": frame_end,
|
||||
"Wait": True,
|
||||
}
|
||||
)
|
||||
|
||||
context.data[key] = bool(result)
|
||||
|
||||
if context.data[key] is False:
|
||||
raise RuntimeError("Comp render failed")
|
||||
|
|
|
|||
|
|
@ -7,7 +7,7 @@ class FusionSaveComp(pyblish.api.ContextPlugin):
|
|||
label = "Save current file"
|
||||
order = pyblish.api.ExtractorOrder - 0.49
|
||||
hosts = ["fusion"]
|
||||
families = ["render"]
|
||||
families = ["render", "workfile"]
|
||||
|
||||
def process(self, context):
|
||||
|
||||
|
|
|
|||
|
|
@ -1,6 +1,9 @@
|
|||
import pyblish.api
|
||||
|
||||
from openpype.pipeline.publish import RepairAction
|
||||
from openpype.pipeline import PublishValidationError
|
||||
|
||||
from openpype.hosts.fusion.api.action import SelectInvalidAction
|
||||
|
||||
|
||||
class ValidateBackgroundDepth(pyblish.api.InstancePlugin):
|
||||
|
|
@ -8,11 +11,12 @@ class ValidateBackgroundDepth(pyblish.api.InstancePlugin):
|
|||
|
||||
order = pyblish.api.ValidatorOrder
|
||||
label = "Validate Background Depth 32 bit"
|
||||
actions = [RepairAction]
|
||||
hosts = ["fusion"]
|
||||
families = ["render"]
|
||||
optional = True
|
||||
|
||||
actions = [SelectInvalidAction, RepairAction]
|
||||
|
||||
@classmethod
|
||||
def get_invalid(cls, instance):
|
||||
|
||||
|
|
@ -29,8 +33,10 @@ class ValidateBackgroundDepth(pyblish.api.InstancePlugin):
|
|||
def process(self, instance):
|
||||
invalid = self.get_invalid(instance)
|
||||
if invalid:
|
||||
raise RuntimeError("Found %i nodes which are not set to float32"
|
||||
% len(invalid))
|
||||
raise PublishValidationError(
|
||||
"Found {} Backgrounds tools which"
|
||||
" are not set to float32".format(len(invalid)),
|
||||
title=self.label)
|
||||
|
||||
@classmethod
|
||||
def repair(cls, instance):
|
||||
|
|
|
|||
|
|
@ -1,6 +1,7 @@
|
|||
import os
|
||||
|
||||
import pyblish.api
|
||||
from openpype.pipeline import PublishValidationError
|
||||
|
||||
|
||||
class ValidateFusionCompSaved(pyblish.api.ContextPlugin):
|
||||
|
|
@ -19,10 +20,12 @@ class ValidateFusionCompSaved(pyblish.api.ContextPlugin):
|
|||
|
||||
filename = attrs["COMPS_FileName"]
|
||||
if not filename:
|
||||
raise RuntimeError("Comp is not saved.")
|
||||
raise PublishValidationError("Comp is not saved.",
|
||||
title=self.label)
|
||||
|
||||
if not os.path.exists(filename):
|
||||
raise RuntimeError("Comp file does not exist: %s" % filename)
|
||||
raise PublishValidationError(
|
||||
"Comp file does not exist: %s" % filename, title=self.label)
|
||||
|
||||
if attrs["COMPB_Modified"]:
|
||||
self.log.warning("Comp is modified. Save your comp to ensure your "
|
||||
|
|
|
|||
|
|
@ -1,6 +1,9 @@
|
|||
import pyblish.api
|
||||
|
||||
from openpype.pipeline.publish import RepairAction
|
||||
from openpype.pipeline import PublishValidationError
|
||||
|
||||
from openpype.hosts.fusion.api.action import SelectInvalidAction
|
||||
|
||||
|
||||
class ValidateCreateFolderChecked(pyblish.api.InstancePlugin):
|
||||
|
|
@ -15,6 +18,7 @@ class ValidateCreateFolderChecked(pyblish.api.InstancePlugin):
|
|||
label = "Validate Create Folder Checked"
|
||||
families = ["render"]
|
||||
hosts = ["fusion"]
|
||||
actions = [SelectInvalidAction]
|
||||
|
||||
@classmethod
|
||||
def get_invalid(cls, instance):
|
||||
|
|
@ -31,8 +35,9 @@ class ValidateCreateFolderChecked(pyblish.api.InstancePlugin):
|
|||
def process(self, instance):
|
||||
invalid = self.get_invalid(instance)
|
||||
if invalid:
|
||||
raise RuntimeError("Found Saver with Create Folder During "
|
||||
"Render checked off")
|
||||
raise PublishValidationError(
|
||||
"Found Saver with Create Folder During Render checked off",
|
||||
title=self.label)
|
||||
|
||||
@classmethod
|
||||
def repair(cls, instance):
|
||||
|
|
|
|||
|
|
@ -1,6 +1,9 @@
|
|||
import os
|
||||
|
||||
import pyblish.api
|
||||
from openpype.pipeline import PublishValidationError
|
||||
|
||||
from openpype.hosts.fusion.api.action import SelectInvalidAction
|
||||
|
||||
|
||||
class ValidateFilenameHasExtension(pyblish.api.InstancePlugin):
|
||||
|
|
@ -16,11 +19,13 @@ class ValidateFilenameHasExtension(pyblish.api.InstancePlugin):
|
|||
label = "Validate Filename Has Extension"
|
||||
families = ["render"]
|
||||
hosts = ["fusion"]
|
||||
actions = [SelectInvalidAction]
|
||||
|
||||
def process(self, instance):
|
||||
invalid = self.get_invalid(instance)
|
||||
if invalid:
|
||||
raise RuntimeError("Found Saver without an extension")
|
||||
raise PublishValidationError("Found Saver without an extension",
|
||||
title=self.label)
|
||||
|
||||
@classmethod
|
||||
def get_invalid(cls, instance):
|
||||
|
|
|
|||
|
|
@ -1,4 +1,7 @@
|
|||
import pyblish.api
|
||||
from openpype.pipeline import PublishValidationError
|
||||
|
||||
from openpype.hosts.fusion.api.action import SelectInvalidAction
|
||||
|
||||
|
||||
class ValidateSaverHasInput(pyblish.api.InstancePlugin):
|
||||
|
|
@ -12,6 +15,7 @@ class ValidateSaverHasInput(pyblish.api.InstancePlugin):
|
|||
label = "Validate Saver Has Input"
|
||||
families = ["render"]
|
||||
hosts = ["fusion"]
|
||||
actions = [SelectInvalidAction]
|
||||
|
||||
@classmethod
|
||||
def get_invalid(cls, instance):
|
||||
|
|
@ -25,5 +29,8 @@ class ValidateSaverHasInput(pyblish.api.InstancePlugin):
|
|||
def process(self, instance):
|
||||
invalid = self.get_invalid(instance)
|
||||
if invalid:
|
||||
raise RuntimeError("Saver has no incoming connection: "
|
||||
"{} ({})".format(instance, invalid[0].Name))
|
||||
saver_name = invalid[0].Name
|
||||
raise PublishValidationError(
|
||||
"Saver has no incoming connection: {} ({})".format(instance,
|
||||
saver_name),
|
||||
title=self.label)
|
||||
|
|
|
|||
|
|
@ -1,4 +1,7 @@
|
|||
import pyblish.api
|
||||
from openpype.pipeline import PublishValidationError
|
||||
|
||||
from openpype.hosts.fusion.api.action import SelectInvalidAction
|
||||
|
||||
|
||||
class ValidateSaverPassthrough(pyblish.api.ContextPlugin):
|
||||
|
|
@ -8,6 +11,7 @@ class ValidateSaverPassthrough(pyblish.api.ContextPlugin):
|
|||
label = "Validate Saver Passthrough"
|
||||
families = ["render"]
|
||||
hosts = ["fusion"]
|
||||
actions = [SelectInvalidAction]
|
||||
|
||||
def process(self, context):
|
||||
|
||||
|
|
@ -27,8 +31,9 @@ class ValidateSaverPassthrough(pyblish.api.ContextPlugin):
|
|||
if invalid_instances:
|
||||
self.log.info("Reset pyblish to collect your current scene state, "
|
||||
"that should fix error.")
|
||||
raise RuntimeError("Invalid instances: "
|
||||
"{0}".format(invalid_instances))
|
||||
raise PublishValidationError(
|
||||
"Invalid instances: {0}".format(invalid_instances),
|
||||
title=self.label)
|
||||
|
||||
def is_invalid(self, instance):
|
||||
|
||||
|
|
@ -36,7 +41,7 @@ class ValidateSaverPassthrough(pyblish.api.ContextPlugin):
|
|||
attr = saver.GetAttrs()
|
||||
active = not attr["TOOLB_PassThrough"]
|
||||
|
||||
if active != instance.data["publish"]:
|
||||
if active != instance.data.get("publish", True):
|
||||
self.log.info("Saver has different passthrough state than "
|
||||
"Pyblish: {} ({})".format(instance, saver.Name))
|
||||
return [saver]
|
||||
|
|
|
|||
|
|
@ -0,0 +1,55 @@
|
|||
from collections import defaultdict
|
||||
|
||||
import pyblish.api
|
||||
from openpype.pipeline import PublishValidationError
|
||||
|
||||
from openpype.hosts.fusion.api.action import SelectInvalidAction
|
||||
|
||||
|
||||
class ValidateUniqueSubsets(pyblish.api.ContextPlugin):
|
||||
"""Ensure all instances have a unique subset name"""
|
||||
|
||||
order = pyblish.api.ValidatorOrder
|
||||
label = "Validate Unique Subsets"
|
||||
families = ["render"]
|
||||
hosts = ["fusion"]
|
||||
actions = [SelectInvalidAction]
|
||||
|
||||
@classmethod
|
||||
def get_invalid(cls, context):
|
||||
|
||||
# Collect instances per subset per asset
|
||||
instances_per_subset_asset = defaultdict(lambda: defaultdict(list))
|
||||
for instance in context:
|
||||
asset = instance.data.get("asset", context.data.get("asset"))
|
||||
subset = instance.data.get("subset", context.data.get("subset"))
|
||||
instances_per_subset_asset[asset][subset].append(instance)
|
||||
|
||||
# Find which asset + subset combination has more than one instance
|
||||
# Those are considered invalid because they'd integrate to the same
|
||||
# destination.
|
||||
invalid = []
|
||||
for asset, instances_per_subset in instances_per_subset_asset.items():
|
||||
for subset, instances in instances_per_subset.items():
|
||||
if len(instances) > 1:
|
||||
cls.log.warning(
|
||||
"{asset} > {subset} used by more than "
|
||||
"one instance: {instances}".format(
|
||||
asset=asset,
|
||||
subset=subset,
|
||||
instances=instances
|
||||
)
|
||||
)
|
||||
invalid.extend(instances)
|
||||
|
||||
# Return tools for the invalid instances so they can be selected
|
||||
invalid = [instance.data["tool"] for instance in invalid]
|
||||
|
||||
return invalid
|
||||
|
||||
def process(self, context):
|
||||
invalid = self.get_invalid(context)
|
||||
if invalid:
|
||||
raise PublishValidationError("Multiple instances are set to "
|
||||
"the same asset > subset.",
|
||||
title=self.label)
|
||||
|
|
@ -126,10 +126,6 @@ def check_inventory():
|
|||
|
||||
def application_launch(event):
|
||||
"""Event that is executed after Harmony is launched."""
|
||||
# FIXME: This is breaking server <-> client communication.
|
||||
# It is now moved so it it manually called.
|
||||
# ensure_scene_settings()
|
||||
# check_inventory()
|
||||
# fills OPENPYPE_HARMONY_JS
|
||||
pype_harmony_path = Path(__file__).parent.parent / "js" / "PypeHarmony.js"
|
||||
pype_harmony_js = pype_harmony_path.read_text()
|
||||
|
|
@ -146,6 +142,9 @@ def application_launch(event):
|
|||
harmony.send({"script": script})
|
||||
inject_avalon_js()
|
||||
|
||||
ensure_scene_settings()
|
||||
check_inventory()
|
||||
|
||||
|
||||
def export_template(backdrops, nodes, filepath):
|
||||
"""Export Template to file.
|
||||
|
|
|
|||
|
|
@ -20,8 +20,9 @@ class ImageSequenceLoader(load.LoaderPlugin):
|
|||
Stores the imported asset in a container named after the asset.
|
||||
"""
|
||||
|
||||
families = ["shot", "render", "image", "plate", "reference"]
|
||||
representations = ["jpeg", "png", "jpg"]
|
||||
families = ["shot", "render", "image", "plate", "reference", "review"]
|
||||
representations = ["*"]
|
||||
extensions = {"jpeg", "png", "jpg"}
|
||||
|
||||
def load(self, context, name=None, namespace=None, data=None):
|
||||
"""Plugin entry point.
|
||||
|
|
|
|||
|
|
@ -6,6 +6,10 @@ from openpype.pipeline import (
|
|||
legacy_io,
|
||||
get_representation_path,
|
||||
)
|
||||
from openpype.lib.transcoding import (
|
||||
VIDEO_EXTENSIONS,
|
||||
IMAGE_EXTENSIONS
|
||||
)
|
||||
import openpype.hosts.hiero.api as phiero
|
||||
|
||||
|
||||
|
|
@ -17,7 +21,10 @@ class LoadClip(phiero.SequenceLoader):
|
|||
"""
|
||||
|
||||
families = ["render2d", "source", "plate", "render", "review"]
|
||||
representations = ["exr", "dpx", "jpg", "jpeg", "png", "h264"]
|
||||
representations = ["*"]
|
||||
extensions = set(
|
||||
ext.lstrip(".") for ext in IMAGE_EXTENSIONS.union(VIDEO_EXTENSIONS)
|
||||
)
|
||||
|
||||
label = "Load as clip"
|
||||
order = -10
|
||||
|
|
@ -34,6 +41,38 @@ class LoadClip(phiero.SequenceLoader):
|
|||
|
||||
clip_name_template = "{asset}_{subset}_{representation}"
|
||||
|
||||
def apply_settings(cls, project_settings, system_settings):
|
||||
|
||||
plugin_type_settings = (
|
||||
project_settings
|
||||
.get("hiero", {})
|
||||
.get("load", {})
|
||||
)
|
||||
|
||||
if not plugin_type_settings:
|
||||
return
|
||||
|
||||
plugin_name = cls.__name__
|
||||
|
||||
plugin_settings = None
|
||||
# Look for plugin settings in host specific settings
|
||||
if plugin_name in plugin_type_settings:
|
||||
plugin_settings = plugin_type_settings[plugin_name]
|
||||
|
||||
if not plugin_settings:
|
||||
return
|
||||
|
||||
print(">>> We have preset for {}".format(plugin_name))
|
||||
for option, value in plugin_settings.items():
|
||||
if option == "enabled" and value is False:
|
||||
print(" - is disabled by preset")
|
||||
elif option == "representations":
|
||||
continue
|
||||
else:
|
||||
print(" - setting `{}`: `{}`".format(option, value))
|
||||
setattr(cls, option, value)
|
||||
|
||||
|
||||
def load(self, context, name, namespace, options):
|
||||
# add clip name template to options
|
||||
options.update({
|
||||
|
|
|
|||
|
|
@ -19,8 +19,9 @@ from openpype.lib import Logger
|
|||
class LoadEffects(load.LoaderPlugin):
|
||||
"""Loading colorspace soft effect exported from nukestudio"""
|
||||
|
||||
representations = ["effectJson"]
|
||||
families = ["effect"]
|
||||
representations = ["*"]
|
||||
extension = {"json"}
|
||||
|
||||
label = "Load Effects"
|
||||
order = 0
|
||||
|
|
|
|||
|
|
@ -120,13 +120,9 @@ class CollectClipEffects(pyblish.api.InstancePlugin):
|
|||
track = sitem.parentTrack().name()
|
||||
# node serialization
|
||||
node = sitem.node()
|
||||
node_serialized = self.node_serialisation(node)
|
||||
node_serialized = self.node_serialization(node)
|
||||
node_name = sitem.name()
|
||||
|
||||
if "_" in node_name:
|
||||
node_class = re.sub(r"(?:_)[_0-9]+", "", node_name) # more numbers
|
||||
else:
|
||||
node_class = re.sub(r"\d+", "", node_name) # one number
|
||||
node_class = node.Class()
|
||||
|
||||
# collect timelineIn/Out
|
||||
effect_t_in = int(sitem.timelineIn())
|
||||
|
|
@ -148,7 +144,7 @@ class CollectClipEffects(pyblish.api.InstancePlugin):
|
|||
"node": node_serialized
|
||||
}}
|
||||
|
||||
def node_serialisation(self, node):
|
||||
def node_serialization(self, node):
|
||||
node_serialized = {}
|
||||
|
||||
# adding ignoring knob keys
|
||||
|
|
|
|||
|
|
@ -1,5 +1,3 @@
|
|||
from bson.objectid import ObjectId
|
||||
|
||||
import pyblish.api
|
||||
|
||||
from openpype.pipeline import registered_host
|
||||
|
|
@ -117,7 +115,6 @@ class CollectUpstreamInputs(pyblish.api.InstancePlugin):
|
|||
# Collect containers for the given set of nodes
|
||||
containers = collect_input_containers(nodes)
|
||||
|
||||
inputs = [ObjectId(c["representation"]) for c in containers]
|
||||
inputs = [c["representation"] for c in containers]
|
||||
instance.data["inputRepresentations"] = inputs
|
||||
|
||||
self.log.info("Collected inputs: %s" % inputs)
|
||||
|
|
|
|||
|
|
@ -10,7 +10,7 @@
|
|||
import hou
|
||||
from openpype.tools.utils import host_tools
|
||||
parent = hou.qt.mainWindow()
|
||||
host_tools.show_creator(parent)
|
||||
host_tools.show_publisher(parent, tab="create")
|
||||
]]></scriptCode>
|
||||
</scriptItem>
|
||||
|
||||
|
|
@ -30,7 +30,7 @@ host_tools.show_loader(parent=parent, use_context=True)
|
|||
import hou
|
||||
from openpype.tools.utils import host_tools
|
||||
parent = hou.qt.mainWindow()
|
||||
host_tools.show_publisher(parent)
|
||||
host_tools.show_publisher(parent, tab="publish")
|
||||
]]></scriptCode>
|
||||
</scriptItem>
|
||||
|
||||
|
|
@ -66,8 +66,8 @@ host_tools.show_workfiles(parent)
|
|||
]]></scriptCode>
|
||||
</scriptItem>
|
||||
|
||||
<scriptItem id="reset_frame_range">
|
||||
<label>Reset Frame Range</label>
|
||||
<scriptItem id="set_frame_range">
|
||||
<label>Set Frame Range</label>
|
||||
<scriptCode><![CDATA[
|
||||
import openpype.hosts.houdini.api.lib
|
||||
openpype.hosts.houdini.api.lib.reset_framerange()
|
||||
|
|
|
|||
|
|
@ -120,3 +120,51 @@ def get_all_children(parent, node_type=None):
|
|||
|
||||
return ([x for x in child_list if rt.superClassOf(x) == node_type]
|
||||
if node_type else child_list)
|
||||
|
||||
|
||||
def get_current_renderer():
|
||||
"""get current renderer"""
|
||||
return rt.renderers.production
|
||||
|
||||
|
||||
def get_default_render_folder(project_setting=None):
|
||||
return (project_setting["max"]
|
||||
["RenderSettings"]
|
||||
["default_render_image_folder"])
|
||||
|
||||
|
||||
def set_framerange(start_frame, end_frame):
|
||||
"""
|
||||
Note:
|
||||
Frame range can be specified in different types. Possible values are:
|
||||
* `1` - Single frame.
|
||||
* `2` - Active time segment ( animationRange ).
|
||||
* `3` - User specified Range.
|
||||
* `4` - User specified Frame pickup string (for example `1,3,5-12`).
|
||||
|
||||
Todo:
|
||||
Current type is hard-coded, there should be a custom setting for this.
|
||||
"""
|
||||
rt.rendTimeType = 4
|
||||
if start_frame is not None and end_frame is not None:
|
||||
frame_range = "{0}-{1}".format(start_frame, end_frame)
|
||||
rt.rendPickupFrames = frame_range
|
||||
|
||||
|
||||
def get_multipass_setting(project_setting=None):
|
||||
return (project_setting["max"]
|
||||
["RenderSettings"]
|
||||
["multipass"])
|
||||
|
||||
|
||||
def get_max_version():
|
||||
"""
|
||||
Args:
|
||||
get max version date for deadline
|
||||
|
||||
Returns:
|
||||
#(25000, 62, 0, 25, 0, 0, 997, 2023, "")
|
||||
max_info[7] = max version date
|
||||
"""
|
||||
max_info = rt.maxversion()
|
||||
return max_info[7]
|
||||
|
|
|
|||
114
openpype/hosts/max/api/lib_renderproducts.py
Normal file
114
openpype/hosts/max/api/lib_renderproducts.py
Normal file
|
|
@ -0,0 +1,114 @@
|
|||
# Render Element Example : For scanline render, VRay
|
||||
# https://help.autodesk.com/view/MAXDEV/2022/ENU/?guid=GUID-E8F75D47-B998-4800-A3A5-610E22913CFC
|
||||
# arnold
|
||||
# https://help.autodesk.com/view/ARNOL/ENU/?guid=arnold_for_3ds_max_ax_maxscript_commands_ax_renderview_commands_html
|
||||
import os
|
||||
from pymxs import runtime as rt
|
||||
from openpype.hosts.max.api.lib import (
|
||||
get_current_renderer,
|
||||
get_default_render_folder
|
||||
)
|
||||
from openpype.settings import get_project_settings
|
||||
from openpype.pipeline import legacy_io
|
||||
|
||||
|
||||
class RenderProducts(object):
|
||||
|
||||
def __init__(self, project_settings=None):
|
||||
self._project_settings = project_settings
|
||||
if not self._project_settings:
|
||||
self._project_settings = get_project_settings(
|
||||
legacy_io.Session["AVALON_PROJECT"]
|
||||
)
|
||||
|
||||
def render_product(self, container):
|
||||
folder = rt.maxFilePath
|
||||
file = rt.maxFileName
|
||||
folder = folder.replace("\\", "/")
|
||||
setting = self._project_settings
|
||||
render_folder = get_default_render_folder(setting)
|
||||
filename, ext = os.path.splitext(file)
|
||||
|
||||
output_file = os.path.join(folder,
|
||||
render_folder,
|
||||
filename,
|
||||
container)
|
||||
|
||||
img_fmt = self._project_settings["max"]["RenderSettings"]["image_format"] # noqa
|
||||
full_render_list = []
|
||||
beauty = self.beauty_render_product(output_file, img_fmt)
|
||||
full_render_list.append(beauty)
|
||||
|
||||
renderer_class = get_current_renderer()
|
||||
renderer = str(renderer_class).split(":")[0]
|
||||
|
||||
if renderer == "VUE_File_Renderer":
|
||||
return full_render_list
|
||||
|
||||
if renderer in [
|
||||
"ART_Renderer",
|
||||
"Redshift_Renderer",
|
||||
"V_Ray_6_Hotfix_3",
|
||||
"V_Ray_GPU_6_Hotfix_3",
|
||||
"Default_Scanline_Renderer",
|
||||
"Quicksilver_Hardware_Renderer",
|
||||
]:
|
||||
render_elem_list = self.render_elements_product(output_file,
|
||||
img_fmt)
|
||||
if render_elem_list:
|
||||
full_render_list.extend(iter(render_elem_list))
|
||||
return full_render_list
|
||||
|
||||
if renderer == "Arnold":
|
||||
aov_list = self.arnold_render_product(output_file,
|
||||
img_fmt)
|
||||
if aov_list:
|
||||
full_render_list.extend(iter(aov_list))
|
||||
return full_render_list
|
||||
|
||||
def beauty_render_product(self, folder, fmt):
|
||||
beauty_output = f"{folder}.####.{fmt}"
|
||||
beauty_output = beauty_output.replace("\\", "/")
|
||||
return beauty_output
|
||||
|
||||
# TODO: Get the arnold render product
|
||||
def arnold_render_product(self, folder, fmt):
|
||||
"""Get all the Arnold AOVs"""
|
||||
aovs = []
|
||||
|
||||
amw = rt.MaxtoAOps.AOVsManagerWindow()
|
||||
aov_mgr = rt.renderers.current.AOVManager
|
||||
# Check if there is any aov group set in AOV manager
|
||||
aov_group_num = len(aov_mgr.drivers)
|
||||
if aov_group_num < 1:
|
||||
return
|
||||
for i in range(aov_group_num):
|
||||
# get the specific AOV group
|
||||
for aov in aov_mgr.drivers[i].aov_list:
|
||||
render_element = f"{folder}_{aov.name}.####.{fmt}"
|
||||
render_element = render_element.replace("\\", "/")
|
||||
aovs.append(render_element)
|
||||
# close the AOVs manager window
|
||||
amw.close()
|
||||
|
||||
return aovs
|
||||
|
||||
def render_elements_product(self, folder, fmt):
|
||||
"""Get all the render element output files. """
|
||||
render_dirname = []
|
||||
|
||||
render_elem = rt.maxOps.GetCurRenderElementMgr()
|
||||
render_elem_num = render_elem.NumRenderElements()
|
||||
# get render elements from the renders
|
||||
for i in range(render_elem_num):
|
||||
renderlayer_name = render_elem.GetRenderElement(i)
|
||||
target, renderpass = str(renderlayer_name).split(":")
|
||||
if renderlayer_name.enabled:
|
||||
render_element = f"{folder}_{renderpass}.####.{fmt}"
|
||||
render_element = render_element.replace("\\", "/")
|
||||
render_dirname.append(render_element)
|
||||
|
||||
return render_dirname
|
||||
|
||||
def image_format(self):
|
||||
return self._project_settings["max"]["RenderSettings"]["image_format"] # noqa
|
||||
168
openpype/hosts/max/api/lib_rendersettings.py
Normal file
168
openpype/hosts/max/api/lib_rendersettings.py
Normal file
|
|
@ -0,0 +1,168 @@
|
|||
import os
|
||||
from pymxs import runtime as rt
|
||||
from openpype.lib import Logger
|
||||
from openpype.settings import get_project_settings
|
||||
from openpype.pipeline import legacy_io
|
||||
from openpype.pipeline.context_tools import get_current_project_asset
|
||||
|
||||
from openpype.hosts.max.api.lib import (
|
||||
set_framerange,
|
||||
get_current_renderer,
|
||||
get_default_render_folder
|
||||
)
|
||||
|
||||
|
||||
class RenderSettings(object):
|
||||
|
||||
log = Logger.get_logger("RenderSettings")
|
||||
|
||||
_aov_chars = {
|
||||
"dot": ".",
|
||||
"dash": "-",
|
||||
"underscore": "_"
|
||||
}
|
||||
|
||||
def __init__(self, project_settings=None):
|
||||
"""
|
||||
Set up the naming convention for the render
|
||||
elements for the deadline submission
|
||||
"""
|
||||
|
||||
self._project_settings = project_settings
|
||||
if not self._project_settings:
|
||||
self._project_settings = get_project_settings(
|
||||
legacy_io.Session["AVALON_PROJECT"]
|
||||
)
|
||||
|
||||
def set_render_camera(self, selection):
|
||||
for sel in selection:
|
||||
# to avoid Attribute Error from pymxs wrapper
|
||||
found = False
|
||||
if rt.classOf(sel) in rt.Camera.classes:
|
||||
found = True
|
||||
rt.viewport.setCamera(sel)
|
||||
break
|
||||
if not found:
|
||||
raise RuntimeError("Camera not found")
|
||||
|
||||
def render_output(self, container):
|
||||
folder = rt.maxFilePath
|
||||
# hard-coded, should be customized in the setting
|
||||
file = rt.maxFileName
|
||||
folder = folder.replace("\\", "/")
|
||||
# hard-coded, set the renderoutput path
|
||||
setting = self._project_settings
|
||||
render_folder = get_default_render_folder(setting)
|
||||
filename, ext = os.path.splitext(file)
|
||||
output_dir = os.path.join(folder,
|
||||
render_folder,
|
||||
filename)
|
||||
if not os.path.exists(output_dir):
|
||||
os.makedirs(output_dir)
|
||||
# hard-coded, should be customized in the setting
|
||||
context = get_current_project_asset()
|
||||
|
||||
# get project resolution
|
||||
width = context["data"].get("resolutionWidth")
|
||||
height = context["data"].get("resolutionHeight")
|
||||
# Set Frame Range
|
||||
frame_start = context["data"].get("frame_start")
|
||||
frame_end = context["data"].get("frame_end")
|
||||
set_framerange(frame_start, frame_end)
|
||||
# get the production render
|
||||
renderer_class = get_current_renderer()
|
||||
renderer = str(renderer_class).split(":")[0]
|
||||
|
||||
img_fmt = self._project_settings["max"]["RenderSettings"]["image_format"] # noqa
|
||||
output = os.path.join(output_dir, container)
|
||||
try:
|
||||
aov_separator = self._aov_chars[(
|
||||
self._project_settings["maya"]
|
||||
["RenderSettings"]
|
||||
["aov_separator"]
|
||||
)]
|
||||
except KeyError:
|
||||
aov_separator = "."
|
||||
output_filename = "{0}..{1}".format(output, img_fmt)
|
||||
output_filename = output_filename.replace("{aov_separator}",
|
||||
aov_separator)
|
||||
rt.rendOutputFilename = output_filename
|
||||
if renderer == "VUE_File_Renderer":
|
||||
return
|
||||
# TODO: Finish the arnold render setup
|
||||
if renderer == "Arnold":
|
||||
self.arnold_setup()
|
||||
|
||||
if renderer in [
|
||||
"ART_Renderer",
|
||||
"Redshift_Renderer",
|
||||
"V_Ray_6_Hotfix_3",
|
||||
"V_Ray_GPU_6_Hotfix_3",
|
||||
"Default_Scanline_Renderer",
|
||||
"Quicksilver_Hardware_Renderer",
|
||||
]:
|
||||
self.render_element_layer(output, width, height, img_fmt)
|
||||
|
||||
rt.rendSaveFile = True
|
||||
|
||||
def arnold_setup(self):
|
||||
# get Arnold RenderView run in the background
|
||||
# for setting up renderable camera
|
||||
arv = rt.MAXToAOps.ArnoldRenderView()
|
||||
render_camera = rt.viewport.GetCamera()
|
||||
arv.setOption("Camera", str(render_camera))
|
||||
|
||||
# TODO: add AOVs and extension
|
||||
img_fmt = self._project_settings["max"]["RenderSettings"]["image_format"] # noqa
|
||||
setup_cmd = (
|
||||
f"""
|
||||
amw = MaxtoAOps.AOVsManagerWindow()
|
||||
amw.close()
|
||||
aovmgr = renderers.current.AOVManager
|
||||
aovmgr.drivers = #()
|
||||
img_fmt = "{img_fmt}"
|
||||
if img_fmt == "png" then driver = ArnoldPNGDriver()
|
||||
if img_fmt == "jpg" then driver = ArnoldJPEGDriver()
|
||||
if img_fmt == "exr" then driver = ArnoldEXRDriver()
|
||||
if img_fmt == "tif" then driver = ArnoldTIFFDriver()
|
||||
if img_fmt == "tiff" then driver = ArnoldTIFFDriver()
|
||||
append aovmgr.drivers driver
|
||||
aovmgr.drivers[1].aov_list = #()
|
||||
""")
|
||||
|
||||
rt.execute(setup_cmd)
|
||||
arv.close()
|
||||
|
||||
def render_element_layer(self, dir, width, height, ext):
|
||||
"""For Renderers with render elements"""
|
||||
rt.renderWidth = width
|
||||
rt.renderHeight = height
|
||||
render_elem = rt.maxOps.GetCurRenderElementMgr()
|
||||
render_elem_num = render_elem.NumRenderElements()
|
||||
if render_elem_num < 0:
|
||||
return
|
||||
|
||||
for i in range(render_elem_num):
|
||||
renderlayer_name = render_elem.GetRenderElement(i)
|
||||
target, renderpass = str(renderlayer_name).split(":")
|
||||
aov_name = "{0}_{1}..{2}".format(dir, renderpass, ext)
|
||||
render_elem.SetRenderElementFileName(i, aov_name)
|
||||
|
||||
def get_render_output(self, container, output_dir):
|
||||
output = os.path.join(output_dir, container)
|
||||
img_fmt = self._project_settings["max"]["RenderSettings"]["image_format"] # noqa
|
||||
output_filename = "{0}..{1}".format(output, img_fmt)
|
||||
return output_filename
|
||||
|
||||
def get_render_element(self):
|
||||
orig_render_elem = []
|
||||
render_elem = rt.maxOps.GetCurRenderElementMgr()
|
||||
render_elem_num = render_elem.NumRenderElements()
|
||||
if render_elem_num < 0:
|
||||
return
|
||||
|
||||
for i in range(render_elem_num):
|
||||
render_element = render_elem.GetRenderElementFilename(i)
|
||||
orig_render_elem.append(render_element)
|
||||
|
||||
return orig_render_elem
|
||||
|
|
@ -101,7 +101,9 @@ class MaxCreator(Creator, MaxCreatorBase):
|
|||
instance_node = rt.getNodeByName(
|
||||
instance.data.get("instance_node"))
|
||||
if instance_node:
|
||||
rt.delete(rt.getNodeByName(instance_node))
|
||||
rt.select(instance_node)
|
||||
rt.execute(f'for o in selection do for c in o.children do c.parent = undefined') # noqa
|
||||
rt.delete(instance_node)
|
||||
|
||||
self._remove_instance_from_context(instance)
|
||||
|
||||
|
|
|
|||
33
openpype/hosts/max/plugins/create/create_render.py
Normal file
33
openpype/hosts/max/plugins/create/create_render.py
Normal file
|
|
@ -0,0 +1,33 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
"""Creator plugin for creating camera."""
|
||||
from openpype.hosts.max.api import plugin
|
||||
from openpype.pipeline import CreatedInstance
|
||||
from openpype.hosts.max.api.lib_rendersettings import RenderSettings
|
||||
|
||||
|
||||
class CreateRender(plugin.MaxCreator):
|
||||
identifier = "io.openpype.creators.max.render"
|
||||
label = "Render"
|
||||
family = "maxrender"
|
||||
icon = "gear"
|
||||
|
||||
def create(self, subset_name, instance_data, pre_create_data):
|
||||
from pymxs import runtime as rt
|
||||
sel_obj = list(rt.selection)
|
||||
instance = super(CreateRender, self).create(
|
||||
subset_name,
|
||||
instance_data,
|
||||
pre_create_data) # type: CreatedInstance
|
||||
container_name = instance.data.get("instance_node")
|
||||
container = rt.getNodeByName(container_name)
|
||||
# TODO: Disable "Add to Containers?" Panel
|
||||
# parent the selected cameras into the container
|
||||
for obj in sel_obj:
|
||||
obj.parent = container
|
||||
# for additional work on the node:
|
||||
# instance_node = rt.getNodeByName(instance.get("instance_node"))
|
||||
|
||||
# set viewport camera for rendering(mandatory for deadline)
|
||||
RenderSettings().set_render_camera(sel_obj)
|
||||
# set output paths for rendering(mandatory for deadline)
|
||||
RenderSettings().render_output(container_name)
|
||||
|
|
@ -1,7 +1,10 @@
|
|||
import os
|
||||
from openpype.pipeline import (
|
||||
load
|
||||
load,
|
||||
get_representation_path
|
||||
)
|
||||
from openpype.hosts.max.api.pipeline import containerise
|
||||
from openpype.hosts.max.api import lib
|
||||
|
||||
|
||||
class FbxLoader(load.LoaderPlugin):
|
||||
|
|
@ -36,14 +39,26 @@ importFile @"{filepath}" #noPrompt using:FBXIMP
|
|||
container_name = f"{name}_CON"
|
||||
|
||||
asset = rt.getNodeByName(f"{name}")
|
||||
# rename the container with "_CON"
|
||||
container = rt.container(name=container_name)
|
||||
asset.Parent = container
|
||||
|
||||
return container
|
||||
return containerise(
|
||||
name, [asset], context, loader=self.__class__.__name__)
|
||||
|
||||
def update(self, container, representation):
|
||||
from pymxs import runtime as rt
|
||||
|
||||
path = get_representation_path(representation)
|
||||
node = rt.getNodeByName(container["instance_node"])
|
||||
|
||||
fbx_objects = self.get_container_children(node)
|
||||
for fbx_object in fbx_objects:
|
||||
fbx_object.source = path
|
||||
|
||||
lib.imprint(container["instance_node"], {
|
||||
"representation": str(representation["_id"])
|
||||
})
|
||||
|
||||
def remove(self, container):
|
||||
from pymxs import runtime as rt
|
||||
|
||||
node = container["node"]
|
||||
node = rt.getNodeByName(container["instance_node"])
|
||||
rt.delete(node)
|
||||
|
|
|
|||
|
|
@ -1,7 +1,9 @@
|
|||
import os
|
||||
from openpype.pipeline import (
|
||||
load
|
||||
load, get_representation_path
|
||||
)
|
||||
from openpype.hosts.max.api.pipeline import containerise
|
||||
from openpype.hosts.max.api import lib
|
||||
|
||||
|
||||
class MaxSceneLoader(load.LoaderPlugin):
|
||||
|
|
@ -35,16 +37,26 @@ class MaxSceneLoader(load.LoaderPlugin):
|
|||
self.log.error("Something failed when loading.")
|
||||
|
||||
max_container = max_containers.pop()
|
||||
container_name = f"{name}_CON"
|
||||
# rename the container with "_CON"
|
||||
# get the original container
|
||||
container = rt.container(name=container_name)
|
||||
max_container.Parent = container
|
||||
|
||||
return container
|
||||
return containerise(
|
||||
name, [max_container], context, loader=self.__class__.__name__)
|
||||
|
||||
def update(self, container, representation):
|
||||
from pymxs import runtime as rt
|
||||
|
||||
path = get_representation_path(representation)
|
||||
node = rt.getNodeByName(container["instance_node"])
|
||||
|
||||
max_objects = self.get_container_children(node)
|
||||
for max_object in max_objects:
|
||||
max_object.source = path
|
||||
|
||||
lib.imprint(container["instance_node"], {
|
||||
"representation": str(representation["_id"])
|
||||
})
|
||||
|
||||
def remove(self, container):
|
||||
from pymxs import runtime as rt
|
||||
|
||||
node = container["node"]
|
||||
node = rt.getNodeByName(container["instance_node"])
|
||||
rt.delete(node)
|
||||
|
|
|
|||
|
|
@ -80,7 +80,7 @@ importFile @"{file_path}" #noPrompt
|
|||
def remove(self, container):
|
||||
from pymxs import runtime as rt
|
||||
|
||||
node = container["node"]
|
||||
node = rt.getNodeByName(container["instance_node"])
|
||||
rt.delete(node)
|
||||
|
||||
@staticmethod
|
||||
|
|
|
|||
67
openpype/hosts/max/plugins/publish/collect_render.py
Normal file
67
openpype/hosts/max/plugins/publish/collect_render.py
Normal file
|
|
@ -0,0 +1,67 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
"""Collect Render"""
|
||||
import os
|
||||
import pyblish.api
|
||||
|
||||
from pymxs import runtime as rt
|
||||
from openpype.pipeline import get_current_asset_name
|
||||
from openpype.hosts.max.api.lib import get_max_version
|
||||
from openpype.hosts.max.api.lib_renderproducts import RenderProducts
|
||||
from openpype.client import get_last_version_by_subset_name
|
||||
|
||||
|
||||
class CollectRender(pyblish.api.InstancePlugin):
|
||||
"""Collect Render for Deadline"""
|
||||
|
||||
order = pyblish.api.CollectorOrder + 0.01
|
||||
label = "Collect 3dsmax Render Layers"
|
||||
hosts = ['max']
|
||||
families = ["maxrender"]
|
||||
|
||||
def process(self, instance):
|
||||
context = instance.context
|
||||
folder = rt.maxFilePath
|
||||
file = rt.maxFileName
|
||||
current_file = os.path.join(folder, file)
|
||||
filepath = current_file.replace("\\", "/")
|
||||
|
||||
context.data['currentFile'] = current_file
|
||||
asset = get_current_asset_name()
|
||||
|
||||
render_layer_files = RenderProducts().render_product(instance.name)
|
||||
folder = folder.replace("\\", "/")
|
||||
|
||||
img_format = RenderProducts().image_format()
|
||||
project_name = context.data["projectName"]
|
||||
asset_doc = context.data["assetEntity"]
|
||||
asset_id = asset_doc["_id"]
|
||||
version_doc = get_last_version_by_subset_name(project_name,
|
||||
instance.name,
|
||||
asset_id)
|
||||
|
||||
self.log.debug("version_doc: {0}".format(version_doc))
|
||||
version_int = 1
|
||||
if version_doc:
|
||||
version_int += int(version_doc["name"])
|
||||
|
||||
self.log.debug(f"Setting {version_int} to context.")
|
||||
context.data["version"] = version_int
|
||||
|
||||
# setup the plugin as 3dsmax for the internal renderer
|
||||
data = {
|
||||
"subset": instance.name,
|
||||
"asset": asset,
|
||||
"publish": True,
|
||||
"maxversion": str(get_max_version()),
|
||||
"imageFormat": img_format,
|
||||
"family": 'maxrender',
|
||||
"families": ['maxrender'],
|
||||
"source": filepath,
|
||||
"expectedFiles": render_layer_files,
|
||||
"plugin": "3dsmax",
|
||||
"frameStart": context.data['frameStart'],
|
||||
"frameEnd": context.data['frameEnd'],
|
||||
"version": version_int
|
||||
}
|
||||
self.log.info("data: {0}".format(data))
|
||||
instance.data.update(data)
|
||||
|
|
@ -4,7 +4,6 @@ from maya import cmds
|
|||
|
||||
from openpype.client import get_asset_by_name, get_project
|
||||
from openpype.pipeline import legacy_io
|
||||
from . import lib
|
||||
|
||||
|
||||
class ToolWindows:
|
||||
|
|
@ -58,54 +57,6 @@ def edit_shader_definitions():
|
|||
window.show()
|
||||
|
||||
|
||||
def reset_frame_range():
|
||||
"""Set frame range to current asset"""
|
||||
|
||||
fps = lib.convert_to_maya_fps(
|
||||
float(legacy_io.Session.get("AVALON_FPS", 25))
|
||||
)
|
||||
lib.set_scene_fps(fps)
|
||||
|
||||
# Set frame start/end
|
||||
project_name = legacy_io.active_project()
|
||||
asset_name = legacy_io.Session["AVALON_ASSET"]
|
||||
asset = get_asset_by_name(project_name, asset_name)
|
||||
|
||||
frame_start = asset["data"].get("frameStart")
|
||||
frame_end = asset["data"].get("frameEnd")
|
||||
# Backwards compatibility
|
||||
if frame_start is None or frame_end is None:
|
||||
frame_start = asset["data"].get("edit_in")
|
||||
frame_end = asset["data"].get("edit_out")
|
||||
|
||||
if frame_start is None or frame_end is None:
|
||||
cmds.warning("No edit information found for %s" % asset_name)
|
||||
return
|
||||
|
||||
handles = asset["data"].get("handles") or 0
|
||||
handle_start = asset["data"].get("handleStart")
|
||||
if handle_start is None:
|
||||
handle_start = handles
|
||||
|
||||
handle_end = asset["data"].get("handleEnd")
|
||||
if handle_end is None:
|
||||
handle_end = handles
|
||||
|
||||
frame_start -= int(handle_start)
|
||||
frame_end += int(handle_end)
|
||||
|
||||
cmds.playbackOptions(minTime=frame_start)
|
||||
cmds.playbackOptions(maxTime=frame_end)
|
||||
cmds.playbackOptions(animationStartTime=frame_start)
|
||||
cmds.playbackOptions(animationEndTime=frame_end)
|
||||
cmds.playbackOptions(minTime=frame_start)
|
||||
cmds.playbackOptions(maxTime=frame_end)
|
||||
cmds.currentTime(frame_start)
|
||||
|
||||
cmds.setAttr("defaultRenderGlobals.startFrame", frame_start)
|
||||
cmds.setAttr("defaultRenderGlobals.endFrame", frame_end)
|
||||
|
||||
|
||||
def _resolution_from_document(doc):
|
||||
if not doc or "data" not in doc:
|
||||
print("Entered document is not valid. \"{}\"".format(str(doc)))
|
||||
|
|
|
|||
|
|
@ -11,6 +11,7 @@ import maya.mel as mel
|
|||
from openpype import resources
|
||||
from openpype.tools.utils import host_tools
|
||||
from .lib import get_main_window
|
||||
from ..tools import show_look_assigner
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
|
@ -112,7 +113,7 @@ def override_toolbox_ui():
|
|||
annotation="Look Manager",
|
||||
label="Look Manager",
|
||||
image=os.path.join(icons, "lookmanager.png"),
|
||||
command=host_tools.show_look_assigner,
|
||||
command=show_look_assigner,
|
||||
width=icon_size,
|
||||
height=icon_size,
|
||||
parent=parent
|
||||
|
|
|
|||
|
|
@ -4,7 +4,6 @@ import os
|
|||
import sys
|
||||
import platform
|
||||
import uuid
|
||||
import math
|
||||
import re
|
||||
|
||||
import json
|
||||
|
|
@ -15,7 +14,7 @@ from math import ceil
|
|||
from six import string_types
|
||||
|
||||
from maya import cmds, mel
|
||||
import maya.api.OpenMaya as om
|
||||
from maya.api import OpenMaya
|
||||
|
||||
from openpype.client import (
|
||||
get_project,
|
||||
|
|
@ -34,7 +33,6 @@ from openpype.pipeline import (
|
|||
registered_host,
|
||||
)
|
||||
from openpype.pipeline.context_tools import get_current_project_asset
|
||||
from .commands import reset_frame_range
|
||||
|
||||
|
||||
self = sys.modules[__name__]
|
||||
|
|
@ -404,9 +402,9 @@ def lsattrs(attrs):
|
|||
|
||||
"""
|
||||
|
||||
dep_fn = om.MFnDependencyNode()
|
||||
dag_fn = om.MFnDagNode()
|
||||
selection_list = om.MSelectionList()
|
||||
dep_fn = OpenMaya.MFnDependencyNode()
|
||||
dag_fn = OpenMaya.MFnDagNode()
|
||||
selection_list = OpenMaya.MSelectionList()
|
||||
|
||||
first_attr = next(iter(attrs))
|
||||
|
||||
|
|
@ -420,7 +418,7 @@ def lsattrs(attrs):
|
|||
matches = set()
|
||||
for i in range(selection_list.length()):
|
||||
node = selection_list.getDependNode(i)
|
||||
if node.hasFn(om.MFn.kDagNode):
|
||||
if node.hasFn(OpenMaya.MFn.kDagNode):
|
||||
fn_node = dag_fn.setObject(node)
|
||||
full_path_names = [path.fullPathName()
|
||||
for path in fn_node.getAllPaths()]
|
||||
|
|
@ -869,11 +867,11 @@ def maintained_selection_api():
|
|||
Warning: This is *not* added to the undo stack.
|
||||
|
||||
"""
|
||||
original = om.MGlobal.getActiveSelectionList()
|
||||
original = OpenMaya.MGlobal.getActiveSelectionList()
|
||||
try:
|
||||
yield
|
||||
finally:
|
||||
om.MGlobal.setActiveSelectionList(original)
|
||||
OpenMaya.MGlobal.setActiveSelectionList(original)
|
||||
|
||||
|
||||
@contextlib.contextmanager
|
||||
|
|
@ -1283,11 +1281,11 @@ def get_id(node):
|
|||
if node is None:
|
||||
return
|
||||
|
||||
sel = om.MSelectionList()
|
||||
sel = OpenMaya.MSelectionList()
|
||||
sel.add(node)
|
||||
|
||||
api_node = sel.getDependNode(0)
|
||||
fn = om.MFnDependencyNode(api_node)
|
||||
fn = OpenMaya.MFnDependencyNode(api_node)
|
||||
|
||||
if not fn.hasAttribute("cbId"):
|
||||
return
|
||||
|
|
@ -1998,7 +1996,7 @@ def set_scene_fps(fps, update=True):
|
|||
'48000': '48000fps'
|
||||
}
|
||||
|
||||
unit = fps_mapping.get(str(fps), None)
|
||||
unit = fps_mapping.get(str(convert_to_maya_fps(fps)), None)
|
||||
if unit is None:
|
||||
raise ValueError("Unsupported FPS value: `%s`" % fps)
|
||||
|
||||
|
|
@ -2065,6 +2063,78 @@ def set_scene_resolution(width, height, pixelAspect):
|
|||
cmds.setAttr("%s.pixelAspect" % control_node, pixelAspect)
|
||||
|
||||
|
||||
def get_frame_range():
|
||||
"""Get the current assets frame range and handles."""
|
||||
|
||||
# Set frame start/end
|
||||
project_name = legacy_io.active_project()
|
||||
asset_name = legacy_io.Session["AVALON_ASSET"]
|
||||
asset = get_asset_by_name(project_name, asset_name)
|
||||
|
||||
frame_start = asset["data"].get("frameStart")
|
||||
frame_end = asset["data"].get("frameEnd")
|
||||
# Backwards compatibility
|
||||
if frame_start is None or frame_end is None:
|
||||
frame_start = asset["data"].get("edit_in")
|
||||
frame_end = asset["data"].get("edit_out")
|
||||
|
||||
if frame_start is None or frame_end is None:
|
||||
cmds.warning("No edit information found for %s" % asset_name)
|
||||
return
|
||||
|
||||
handles = asset["data"].get("handles") or 0
|
||||
handle_start = asset["data"].get("handleStart")
|
||||
if handle_start is None:
|
||||
handle_start = handles
|
||||
|
||||
handle_end = asset["data"].get("handleEnd")
|
||||
if handle_end is None:
|
||||
handle_end = handles
|
||||
|
||||
return {
|
||||
"frameStart": frame_start,
|
||||
"frameEnd": frame_end,
|
||||
"handleStart": handle_start,
|
||||
"handleEnd": handle_end
|
||||
}
|
||||
|
||||
|
||||
def reset_frame_range(playback=True, render=True, fps=True):
|
||||
"""Set frame range to current asset
|
||||
|
||||
Args:
|
||||
playback (bool, Optional): Whether to set the maya timeline playback
|
||||
frame range. Defaults to True.
|
||||
render (bool, Optional): Whether to set the maya render frame range.
|
||||
Defaults to True.
|
||||
fps (bool, Optional): Whether to set scene FPS. Defaults to True.
|
||||
"""
|
||||
|
||||
if fps:
|
||||
fps = convert_to_maya_fps(
|
||||
float(legacy_io.Session.get("AVALON_FPS", 25))
|
||||
)
|
||||
set_scene_fps(fps)
|
||||
|
||||
frame_range = get_frame_range()
|
||||
|
||||
frame_start = frame_range["frameStart"] - int(frame_range["handleStart"])
|
||||
frame_end = frame_range["frameEnd"] + int(frame_range["handleEnd"])
|
||||
|
||||
if playback:
|
||||
cmds.playbackOptions(minTime=frame_start)
|
||||
cmds.playbackOptions(maxTime=frame_end)
|
||||
cmds.playbackOptions(animationStartTime=frame_start)
|
||||
cmds.playbackOptions(animationEndTime=frame_end)
|
||||
cmds.playbackOptions(minTime=frame_start)
|
||||
cmds.playbackOptions(maxTime=frame_end)
|
||||
cmds.currentTime(frame_start)
|
||||
|
||||
if render:
|
||||
cmds.setAttr("defaultRenderGlobals.startFrame", frame_start)
|
||||
cmds.setAttr("defaultRenderGlobals.endFrame", frame_end)
|
||||
|
||||
|
||||
def reset_scene_resolution():
|
||||
"""Apply the scene resolution from the project definition
|
||||
|
||||
|
|
@ -3294,15 +3364,15 @@ def iter_visible_nodes_in_range(nodes, start, end):
|
|||
@memodict
|
||||
def get_visibility_mplug(node):
|
||||
"""Return api 2.0 MPlug with cached memoize decorator"""
|
||||
sel = om.MSelectionList()
|
||||
sel = OpenMaya.MSelectionList()
|
||||
sel.add(node)
|
||||
dag = sel.getDagPath(0)
|
||||
return om.MFnDagNode(dag).findPlug("visibility", True)
|
||||
return OpenMaya.MFnDagNode(dag).findPlug("visibility", True)
|
||||
|
||||
@contextlib.contextmanager
|
||||
def dgcontext(mtime):
|
||||
"""MDGContext context manager"""
|
||||
context = om.MDGContext(mtime)
|
||||
context = OpenMaya.MDGContext(mtime)
|
||||
try:
|
||||
previous = context.makeCurrent()
|
||||
yield context
|
||||
|
|
@ -3311,9 +3381,9 @@ def iter_visible_nodes_in_range(nodes, start, end):
|
|||
|
||||
# We skip the first frame as we already used that frame to check for
|
||||
# overall visibilities. And end+1 to include the end frame.
|
||||
scene_units = om.MTime.uiUnit()
|
||||
scene_units = OpenMaya.MTime.uiUnit()
|
||||
for frame in range(start + 1, end + 1):
|
||||
mtime = om.MTime(frame, unit=scene_units)
|
||||
mtime = OpenMaya.MTime(frame, unit=scene_units)
|
||||
|
||||
# Build little cache so we don't query the same MPlug's value
|
||||
# again if it was checked on this frame and also is a dependency
|
||||
|
|
@ -3407,11 +3477,11 @@ def convert_to_maya_fps(fps):
|
|||
# If input fps is a whole number we'll return.
|
||||
if float(fps).is_integer():
|
||||
# Validate fps is part of Maya's fps selection.
|
||||
if fps not in int_framerates:
|
||||
if int(fps) not in int_framerates:
|
||||
raise ValueError(
|
||||
"Framerate \"{}\" is not supported in Maya".format(fps)
|
||||
)
|
||||
return fps
|
||||
return int(fps)
|
||||
else:
|
||||
# Differences to supported float frame rates.
|
||||
differences = []
|
||||
|
|
@ -3462,3 +3532,146 @@ def write_xgen_file(data, filepath):
|
|||
|
||||
with open(filepath, "w") as f:
|
||||
f.writelines(lines)
|
||||
|
||||
|
||||
def get_color_management_preferences():
|
||||
"""Get and resolve OCIO preferences."""
|
||||
data = {
|
||||
# Is color management enabled.
|
||||
"enabled": cmds.colorManagementPrefs(
|
||||
query=True, cmEnabled=True
|
||||
),
|
||||
"rendering_space": cmds.colorManagementPrefs(
|
||||
query=True, renderingSpaceName=True
|
||||
),
|
||||
"output_transform": cmds.colorManagementPrefs(
|
||||
query=True, outputTransformName=True
|
||||
),
|
||||
"output_transform_enabled": cmds.colorManagementPrefs(
|
||||
query=True, outputTransformEnabled=True
|
||||
),
|
||||
"view_transform": cmds.colorManagementPrefs(
|
||||
query=True, viewTransformName=True
|
||||
)
|
||||
}
|
||||
|
||||
# Split view and display from view_transform. view_transform comes in
|
||||
# format of "{view} ({display})".
|
||||
regex = re.compile(r"^(?P<view>.+) \((?P<display>.+)\)$")
|
||||
match = regex.match(data["view_transform"])
|
||||
data.update({
|
||||
"display": match.group("display"),
|
||||
"view": match.group("view")
|
||||
})
|
||||
|
||||
# Get config absolute path.
|
||||
path = cmds.colorManagementPrefs(
|
||||
query=True, configFilePath=True
|
||||
)
|
||||
|
||||
# The OCIO config supports a custom <MAYA_RESOURCES> token.
|
||||
maya_resources_token = "<MAYA_RESOURCES>"
|
||||
maya_resources_path = OpenMaya.MGlobal.getAbsolutePathToResources()
|
||||
path = path.replace(maya_resources_token, maya_resources_path)
|
||||
|
||||
data["config"] = path
|
||||
|
||||
return data
|
||||
|
||||
|
||||
def get_color_management_output_transform():
|
||||
preferences = get_color_management_preferences()
|
||||
colorspace = preferences["rendering_space"]
|
||||
if preferences["output_transform_enabled"]:
|
||||
colorspace = preferences["output_transform"]
|
||||
return colorspace
|
||||
|
||||
|
||||
def image_info(file_path):
|
||||
# type: (str) -> dict
|
||||
"""Based on tha texture path, get its bit depth and format information.
|
||||
Take reference from makeTx.py in Arnold:
|
||||
ImageInfo(filename): Get Image Information for colorspace
|
||||
AiTextureGetFormat(filename): Get Texture Format
|
||||
AiTextureGetBitDepth(filename): Get Texture bit depth
|
||||
Args:
|
||||
file_path (str): Path to the texture file.
|
||||
Returns:
|
||||
dict: Dictionary with the information about the texture file.
|
||||
"""
|
||||
from arnold import (
|
||||
AiTextureGetBitDepth,
|
||||
AiTextureGetFormat
|
||||
)
|
||||
# Get Texture Information
|
||||
img_info = {'filename': file_path}
|
||||
if os.path.isfile(file_path):
|
||||
img_info['bit_depth'] = AiTextureGetBitDepth(file_path) # noqa
|
||||
img_info['format'] = AiTextureGetFormat(file_path) # noqa
|
||||
else:
|
||||
img_info['bit_depth'] = 8
|
||||
img_info['format'] = "unknown"
|
||||
return img_info
|
||||
|
||||
|
||||
def guess_colorspace(img_info):
|
||||
# type: (dict) -> str
|
||||
"""Guess the colorspace of the input image filename.
|
||||
Note:
|
||||
Reference from makeTx.py
|
||||
Args:
|
||||
img_info (dict): Image info generated by :func:`image_info`
|
||||
Returns:
|
||||
str: color space name use in the `--colorconvert`
|
||||
option of maketx.
|
||||
"""
|
||||
from arnold import (
|
||||
AiTextureInvalidate,
|
||||
# types
|
||||
AI_TYPE_BYTE,
|
||||
AI_TYPE_INT,
|
||||
AI_TYPE_UINT
|
||||
)
|
||||
try:
|
||||
if img_info['bit_depth'] <= 16:
|
||||
if img_info['format'] in (AI_TYPE_BYTE, AI_TYPE_INT, AI_TYPE_UINT): # noqa
|
||||
return 'sRGB'
|
||||
else:
|
||||
return 'linear'
|
||||
# now discard the image file as AiTextureGetFormat has loaded it
|
||||
AiTextureInvalidate(img_info['filename']) # noqa
|
||||
except ValueError:
|
||||
print(("[maketx] Error: Could not guess"
|
||||
"colorspace for {}").format(img_info["filename"]))
|
||||
return "linear"
|
||||
|
||||
|
||||
def len_flattened(components):
|
||||
"""Return the length of the list as if it was flattened.
|
||||
|
||||
Maya will return consecutive components as a single entry
|
||||
when requesting with `maya.cmds.ls` without the `flatten`
|
||||
flag. Though enabling `flatten` on a large list (e.g. millions)
|
||||
will result in a slow result. This command will return the amount
|
||||
of entries in a non-flattened list by parsing the result with
|
||||
regex.
|
||||
|
||||
Args:
|
||||
components (list): The non-flattened components.
|
||||
|
||||
Returns:
|
||||
int: The amount of entries.
|
||||
|
||||
"""
|
||||
assert isinstance(components, (list, tuple))
|
||||
n = 0
|
||||
|
||||
pattern = re.compile(r"\[(\d+):(\d+)\]")
|
||||
for c in components:
|
||||
match = pattern.search(c)
|
||||
if match:
|
||||
start, end = match.groups()
|
||||
n += int(end) - int(start) + 1
|
||||
else:
|
||||
n += 1
|
||||
return n
|
||||
|
|
|
|||
|
|
@ -46,6 +46,7 @@ import attr
|
|||
|
||||
from . import lib
|
||||
from . import lib_rendersetup
|
||||
from openpype.pipeline.colorspace import get_ocio_config_views
|
||||
|
||||
from maya import cmds, mel
|
||||
|
||||
|
|
@ -127,6 +128,7 @@ class RenderProduct(object):
|
|||
"""
|
||||
productName = attr.ib()
|
||||
ext = attr.ib() # extension
|
||||
colorspace = attr.ib() # colorspace
|
||||
aov = attr.ib(default=None) # source aov
|
||||
driver = attr.ib(default=None) # source driver
|
||||
multipart = attr.ib(default=False) # multichannel file
|
||||
|
|
@ -196,12 +198,18 @@ class ARenderProducts:
|
|||
"""Constructor."""
|
||||
self.layer = layer
|
||||
self.render_instance = render_instance
|
||||
self.multipart = False
|
||||
self.multipart = self.get_multipart()
|
||||
|
||||
# Initialize
|
||||
self.layer_data = self._get_layer_data()
|
||||
self.layer_data.products = self.get_render_products()
|
||||
|
||||
def get_multipart(self):
|
||||
raise NotImplementedError(
|
||||
"The render product implementation does not have a "
|
||||
"\"get_multipart\" method."
|
||||
)
|
||||
|
||||
def has_camera_token(self):
|
||||
# type: () -> bool
|
||||
"""Check if camera token is in image prefix.
|
||||
|
|
@ -344,7 +352,6 @@ class ARenderProducts:
|
|||
separator = file_prefix[matches[0].end(1):matches[1].start(1)]
|
||||
return separator
|
||||
|
||||
|
||||
def _get_layer_data(self):
|
||||
# type: () -> LayerMetadata
|
||||
# ______________________________________________
|
||||
|
|
@ -531,16 +538,20 @@ class RenderProductsArnold(ARenderProducts):
|
|||
|
||||
return prefix
|
||||
|
||||
def _get_aov_render_products(self, aov, cameras=None):
|
||||
"""Return all render products for the AOV"""
|
||||
|
||||
products = []
|
||||
aov_name = self._get_attr(aov, "name")
|
||||
def get_multipart(self):
|
||||
multipart = False
|
||||
multilayer = bool(self._get_attr("defaultArnoldDriver.multipart"))
|
||||
merge_AOVs = bool(self._get_attr("defaultArnoldDriver.mergeAOVs"))
|
||||
if multilayer or merge_AOVs:
|
||||
multipart = True
|
||||
|
||||
return multipart
|
||||
|
||||
def _get_aov_render_products(self, aov, cameras=None):
|
||||
"""Return all render products for the AOV"""
|
||||
|
||||
products = []
|
||||
aov_name = self._get_attr(aov, "name")
|
||||
ai_drivers = cmds.listConnections("{}.outputs".format(aov),
|
||||
source=True,
|
||||
destination=False,
|
||||
|
|
@ -553,6 +564,9 @@ class RenderProductsArnold(ARenderProducts):
|
|||
]
|
||||
|
||||
for ai_driver in ai_drivers:
|
||||
colorspace = self._get_colorspace(
|
||||
ai_driver + ".colorManagement"
|
||||
)
|
||||
# todo: check aiAOVDriver.prefix as it could have
|
||||
# a custom path prefix set for this driver
|
||||
|
||||
|
|
@ -590,12 +604,15 @@ class RenderProductsArnold(ARenderProducts):
|
|||
global_aov = self._get_attr(aov, "globalAov")
|
||||
if global_aov:
|
||||
for camera in cameras:
|
||||
product = RenderProduct(productName=name,
|
||||
ext=ext,
|
||||
aov=aov_name,
|
||||
driver=ai_driver,
|
||||
multipart=multipart,
|
||||
camera=camera)
|
||||
product = RenderProduct(
|
||||
productName=name,
|
||||
ext=ext,
|
||||
aov=aov_name,
|
||||
driver=ai_driver,
|
||||
multipart=self.multipart,
|
||||
camera=camera,
|
||||
colorspace=colorspace
|
||||
)
|
||||
products.append(product)
|
||||
|
||||
all_light_groups = self._get_attr(aov, "lightGroups")
|
||||
|
|
@ -603,13 +620,16 @@ class RenderProductsArnold(ARenderProducts):
|
|||
# All light groups is enabled. A single multipart
|
||||
# Render Product
|
||||
for camera in cameras:
|
||||
product = RenderProduct(productName=name + "_lgroups",
|
||||
ext=ext,
|
||||
aov=aov_name,
|
||||
driver=ai_driver,
|
||||
# Always multichannel output
|
||||
multipart=True,
|
||||
camera=camera)
|
||||
product = RenderProduct(
|
||||
productName=name + "_lgroups",
|
||||
ext=ext,
|
||||
aov=aov_name,
|
||||
driver=ai_driver,
|
||||
# Always multichannel output
|
||||
multipart=True,
|
||||
camera=camera,
|
||||
colorspace=colorspace
|
||||
)
|
||||
products.append(product)
|
||||
else:
|
||||
value = self._get_attr(aov, "lightGroupsList")
|
||||
|
|
@ -625,12 +645,36 @@ class RenderProductsArnold(ARenderProducts):
|
|||
aov=aov_name,
|
||||
driver=ai_driver,
|
||||
ext=ext,
|
||||
camera=camera
|
||||
camera=camera,
|
||||
colorspace=colorspace
|
||||
)
|
||||
products.append(product)
|
||||
|
||||
return products
|
||||
|
||||
def _get_colorspace(self, attribute):
|
||||
"""Resolve colorspace from Arnold settings."""
|
||||
|
||||
def _view_transform():
|
||||
preferences = lib.get_color_management_preferences()
|
||||
views_data = get_ocio_config_views(preferences["config"])
|
||||
view_data = views_data[
|
||||
"{}/{}".format(preferences["display"], preferences["view"])
|
||||
]
|
||||
return view_data["colorspace"]
|
||||
|
||||
def _raw():
|
||||
preferences = lib.get_color_management_preferences()
|
||||
return preferences["rendering_space"]
|
||||
|
||||
resolved_values = {
|
||||
"Raw": _raw,
|
||||
"Use View Transform": _view_transform,
|
||||
# Default. Same as Maya Preferences.
|
||||
"Use Output Transform": lib.get_color_management_output_transform
|
||||
}
|
||||
return resolved_values[self._get_attr(attribute)]()
|
||||
|
||||
def get_render_products(self):
|
||||
"""Get all AOVs.
|
||||
|
||||
|
|
@ -659,11 +703,19 @@ class RenderProductsArnold(ARenderProducts):
|
|||
]
|
||||
|
||||
default_ext = self._get_attr("defaultRenderGlobals.imfPluginKey")
|
||||
beauty_products = [RenderProduct(
|
||||
productName="beauty",
|
||||
ext=default_ext,
|
||||
driver="defaultArnoldDriver",
|
||||
camera=camera) for camera in cameras]
|
||||
colorspace = self._get_colorspace(
|
||||
"defaultArnoldDriver.colorManagement"
|
||||
)
|
||||
beauty_products = [
|
||||
RenderProduct(
|
||||
productName="beauty",
|
||||
ext=default_ext,
|
||||
driver="defaultArnoldDriver",
|
||||
camera=camera,
|
||||
colorspace=colorspace
|
||||
) for camera in cameras
|
||||
]
|
||||
|
||||
# AOVs > Legacy > Maya Render View > Mode
|
||||
aovs_enabled = bool(
|
||||
self._get_attr("defaultArnoldRenderOptions.aovMode")
|
||||
|
|
@ -731,6 +783,14 @@ class RenderProductsVray(ARenderProducts):
|
|||
|
||||
renderer = "vray"
|
||||
|
||||
def get_multipart(self):
|
||||
multipart = False
|
||||
image_format = self._get_attr("vraySettings.imageFormatStr")
|
||||
if image_format == "exr (multichannel)":
|
||||
multipart = True
|
||||
|
||||
return multipart
|
||||
|
||||
def get_renderer_prefix(self):
|
||||
# type: () -> str
|
||||
"""Get image prefix for V-Ray.
|
||||
|
|
@ -804,23 +864,29 @@ class RenderProductsVray(ARenderProducts):
|
|||
if not dont_save_rgb:
|
||||
for camera in cameras:
|
||||
products.append(
|
||||
RenderProduct(productName="",
|
||||
ext=default_ext,
|
||||
camera=camera))
|
||||
RenderProduct(
|
||||
productName="",
|
||||
ext=default_ext,
|
||||
camera=camera,
|
||||
colorspace=lib.get_color_management_output_transform(),
|
||||
multipart=self.multipart
|
||||
)
|
||||
)
|
||||
|
||||
# separate alpha file
|
||||
separate_alpha = self._get_attr("vraySettings.separateAlpha")
|
||||
if separate_alpha:
|
||||
for camera in cameras:
|
||||
products.append(
|
||||
RenderProduct(productName="Alpha",
|
||||
ext=default_ext,
|
||||
camera=camera)
|
||||
RenderProduct(
|
||||
productName="Alpha",
|
||||
ext=default_ext,
|
||||
camera=camera,
|
||||
multipart=self.multipart
|
||||
)
|
||||
)
|
||||
|
||||
if image_format_str == "exr (multichannel)":
|
||||
if self.multipart:
|
||||
# AOVs are merged in m-channel file, only main layer is rendered
|
||||
self.multipart = True
|
||||
return products
|
||||
|
||||
# handle aovs from references
|
||||
|
|
@ -858,10 +924,13 @@ class RenderProductsVray(ARenderProducts):
|
|||
|
||||
aov_name = self._get_vray_aov_name(aov)
|
||||
for camera in cameras:
|
||||
product = RenderProduct(productName=aov_name,
|
||||
ext=default_ext,
|
||||
aov=aov,
|
||||
camera=camera)
|
||||
product = RenderProduct(
|
||||
productName=aov_name,
|
||||
ext=default_ext,
|
||||
aov=aov,
|
||||
camera=camera,
|
||||
colorspace=lib.get_color_management_output_transform()
|
||||
)
|
||||
products.append(product)
|
||||
|
||||
return products
|
||||
|
|
@ -979,6 +1048,34 @@ class RenderProductsRedshift(ARenderProducts):
|
|||
renderer = "redshift"
|
||||
unmerged_aovs = {"Cryptomatte"}
|
||||
|
||||
def get_files(self, product):
|
||||
# When outputting AOVs we need to replace Redshift specific AOV tokens
|
||||
# with Maya render tokens for generating file sequences. We validate to
|
||||
# a specific AOV fileprefix so we only need to accout for one
|
||||
# replacement.
|
||||
if not product.multipart and product.driver:
|
||||
file_prefix = self._get_attr(product.driver + ".filePrefix")
|
||||
self.layer_data.filePrefix = file_prefix.replace(
|
||||
"<BeautyPath>/<BeautyFile>",
|
||||
"<Scene>/<RenderLayer>/<RenderLayer>"
|
||||
)
|
||||
|
||||
return super(RenderProductsRedshift, self).get_files(product)
|
||||
|
||||
def get_multipart(self):
|
||||
# For Redshift we don't directly return upon forcing multilayer
|
||||
# due to some AOVs still being written into separate files,
|
||||
# like Cryptomatte.
|
||||
# AOVs are merged in multi-channel file
|
||||
multipart = False
|
||||
force_layer = bool(
|
||||
self._get_attr("redshiftOptions.exrForceMultilayer")
|
||||
)
|
||||
if force_layer:
|
||||
multipart = True
|
||||
|
||||
return multipart
|
||||
|
||||
def get_renderer_prefix(self):
|
||||
"""Get image prefix for Redshift.
|
||||
|
||||
|
|
@ -1018,16 +1115,6 @@ class RenderProductsRedshift(ARenderProducts):
|
|||
for c in self.get_renderable_cameras()
|
||||
]
|
||||
|
||||
# For Redshift we don't directly return upon forcing multilayer
|
||||
# due to some AOVs still being written into separate files,
|
||||
# like Cryptomatte.
|
||||
# AOVs are merged in multi-channel file
|
||||
multipart = False
|
||||
force_layer = bool(self._get_attr("redshiftOptions.exrForceMultilayer")) # noqa
|
||||
exMultipart = bool(self._get_attr("redshiftOptions.exrMultipart"))
|
||||
if exMultipart or force_layer:
|
||||
multipart = True
|
||||
|
||||
# Get Redshift Extension from image format
|
||||
image_format = self._get_attr("redshiftOptions.imageFormat") # integer
|
||||
ext = mel.eval("redshiftGetImageExtension(%i)" % image_format)
|
||||
|
|
@ -1049,7 +1136,7 @@ class RenderProductsRedshift(ARenderProducts):
|
|||
continue
|
||||
|
||||
aov_type = self._get_attr(aov, "aovType")
|
||||
if multipart and aov_type not in self.unmerged_aovs:
|
||||
if self.multipart and aov_type not in self.unmerged_aovs:
|
||||
continue
|
||||
|
||||
# Any AOVs that still get processed, like Cryptomatte
|
||||
|
|
@ -1084,8 +1171,9 @@ class RenderProductsRedshift(ARenderProducts):
|
|||
productName=aov_light_group_name,
|
||||
aov=aov_name,
|
||||
ext=ext,
|
||||
multipart=multipart,
|
||||
camera=camera)
|
||||
multipart=False,
|
||||
camera=camera,
|
||||
driver=aov)
|
||||
products.append(product)
|
||||
|
||||
if light_groups:
|
||||
|
|
@ -1098,8 +1186,9 @@ class RenderProductsRedshift(ARenderProducts):
|
|||
product = RenderProduct(productName=aov_name,
|
||||
aov=aov_name,
|
||||
ext=ext,
|
||||
multipart=multipart,
|
||||
camera=camera)
|
||||
multipart=False,
|
||||
camera=camera,
|
||||
driver=aov)
|
||||
products.append(product)
|
||||
|
||||
# When a Beauty AOV is added manually, it will be rendered as
|
||||
|
|
@ -1114,7 +1203,7 @@ class RenderProductsRedshift(ARenderProducts):
|
|||
products.insert(0,
|
||||
RenderProduct(productName=beauty_name,
|
||||
ext=ext,
|
||||
multipart=multipart,
|
||||
multipart=self.multipart,
|
||||
camera=camera))
|
||||
|
||||
return products
|
||||
|
|
@ -1134,6 +1223,10 @@ class RenderProductsRenderman(ARenderProducts):
|
|||
renderer = "renderman"
|
||||
unmerged_aovs = {"PxrCryptomatte"}
|
||||
|
||||
def get_multipart(self):
|
||||
# Implemented as display specific in "get_render_products".
|
||||
return False
|
||||
|
||||
def get_render_products(self):
|
||||
"""Get all AOVs.
|
||||
|
||||
|
|
@ -1273,6 +1366,10 @@ class RenderProductsMayaHardware(ARenderProducts):
|
|||
{"label": "EXR(exr)", "index": 40, "extension": "exr"}
|
||||
]
|
||||
|
||||
def get_multipart(self):
|
||||
# MayaHardware does not support multipart EXRs.
|
||||
return False
|
||||
|
||||
def _get_extension(self, value):
|
||||
result = None
|
||||
if isinstance(value, int):
|
||||
|
|
@ -1317,7 +1414,12 @@ class RenderProductsMayaHardware(ARenderProducts):
|
|||
|
||||
products = []
|
||||
for cam in self.get_renderable_cameras():
|
||||
product = RenderProduct(productName="beauty", ext=ext, camera=cam)
|
||||
product = RenderProduct(
|
||||
productName="beauty",
|
||||
ext=ext,
|
||||
camera=cam,
|
||||
colorspace=lib.get_color_management_output_transform()
|
||||
)
|
||||
products.append(product)
|
||||
|
||||
return products
|
||||
|
|
|
|||
|
|
@ -14,7 +14,7 @@ from openpype.settings import (
|
|||
from openpype.pipeline import legacy_io
|
||||
from openpype.pipeline import CreatorError
|
||||
from openpype.pipeline.context_tools import get_current_project_asset
|
||||
from openpype.hosts.maya.api.commands import reset_frame_range
|
||||
from openpype.hosts.maya.api.lib import reset_frame_range
|
||||
|
||||
|
||||
class RenderSettings(object):
|
||||
|
|
@ -23,7 +23,8 @@ class RenderSettings(object):
|
|||
'vray': 'vraySettings.fileNamePrefix',
|
||||
'arnold': 'defaultRenderGlobals.imageFilePrefix',
|
||||
'renderman': 'rmanGlobals.imageFileFormat',
|
||||
'redshift': 'defaultRenderGlobals.imageFilePrefix'
|
||||
'redshift': 'defaultRenderGlobals.imageFilePrefix',
|
||||
'mayahardware2': 'defaultRenderGlobals.imageFilePrefix'
|
||||
}
|
||||
|
||||
_image_prefixes = {
|
||||
|
|
@ -157,7 +158,7 @@ class RenderSettings(object):
|
|||
cmds.setAttr(
|
||||
"defaultArnoldDriver.mergeAOVs", multi_exr)
|
||||
self._additional_attribs_setter(additional_options)
|
||||
reset_frame_range()
|
||||
reset_frame_range(playback=False, fps=False, render=True)
|
||||
|
||||
def _set_redshift_settings(self, width, height):
|
||||
"""Sets settings for Redshift."""
|
||||
|
|
@ -335,7 +336,8 @@ class RenderSettings(object):
|
|||
)
|
||||
|
||||
# Set render file format to exr
|
||||
cmds.setAttr("{}.imageFormatStr".format(node), "exr", type="string")
|
||||
ext = vray_render_presets["image_format"]
|
||||
cmds.setAttr("{}.imageFormatStr".format(node), ext, type="string")
|
||||
|
||||
# animType
|
||||
cmds.setAttr("{}.animType".format(node), 1)
|
||||
|
|
|
|||
|
|
@ -12,7 +12,7 @@ from openpype.pipeline.workfile import BuildWorkfile
|
|||
from openpype.tools.utils import host_tools
|
||||
from openpype.hosts.maya.api import lib, lib_rendersettings
|
||||
from .lib import get_main_window, IS_HEADLESS
|
||||
from .commands import reset_frame_range
|
||||
from ..tools import show_look_assigner
|
||||
|
||||
from .workfile_template_builder import (
|
||||
create_placeholder,
|
||||
|
|
@ -112,12 +112,12 @@ def install():
|
|||
)
|
||||
|
||||
cmds.menuItem(
|
||||
"Reset Frame Range",
|
||||
command=lambda *args: reset_frame_range()
|
||||
"Set Frame Range",
|
||||
command=lambda *args: lib.reset_frame_range()
|
||||
)
|
||||
|
||||
cmds.menuItem(
|
||||
"Reset Resolution",
|
||||
"Set Resolution",
|
||||
command=lambda *args: lib.reset_scene_resolution()
|
||||
)
|
||||
|
||||
|
|
@ -140,7 +140,7 @@ def install():
|
|||
|
||||
cmds.menuItem(
|
||||
"Look assigner...",
|
||||
command=lambda *args: host_tools.show_look_assigner(
|
||||
command=lambda *args: show_look_assigner(
|
||||
parent_widget
|
||||
)
|
||||
)
|
||||
|
|
|
|||
|
|
@ -22,6 +22,8 @@ PLACEHOLDER_SET = "PLACEHOLDERS_SET"
|
|||
class MayaTemplateBuilder(AbstractTemplateBuilder):
|
||||
"""Concrete implementation of AbstractTemplateBuilder for maya"""
|
||||
|
||||
use_legacy_creators = True
|
||||
|
||||
def import_template(self, path):
|
||||
"""Import template into current scene.
|
||||
Block if a template is already loaded.
|
||||
|
|
|
|||
|
|
@ -13,6 +13,7 @@ class CreateAnimation(plugin.Creator):
|
|||
icon = "male"
|
||||
write_color_sets = False
|
||||
write_face_sets = False
|
||||
include_user_defined_attributes = False
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
super(CreateAnimation, self).__init__(*args, **kwargs)
|
||||
|
|
@ -47,3 +48,6 @@ class CreateAnimation(plugin.Creator):
|
|||
|
||||
# Default to write normals.
|
||||
self.data["writeNormals"] = True
|
||||
|
||||
value = self.include_user_defined_attributes
|
||||
self.data["includeUserDefinedAttributes"] = value
|
||||
|
|
|
|||
|
|
@ -15,6 +15,7 @@ class CreatePointCache(plugin.Creator):
|
|||
icon = "gears"
|
||||
write_color_sets = False
|
||||
write_face_sets = False
|
||||
include_user_defined_attributes = False
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
super(CreatePointCache, self).__init__(*args, **kwargs)
|
||||
|
|
@ -33,6 +34,8 @@ class CreatePointCache(plugin.Creator):
|
|||
self.data["refresh"] = False # Default to suspend refresh.
|
||||
|
||||
# Add options for custom attributes
|
||||
value = self.include_user_defined_attributes
|
||||
self.data["includeUserDefinedAttributes"] = value
|
||||
self.data["attr"] = ""
|
||||
self.data["attrPrefix"] = ""
|
||||
|
||||
|
|
|
|||
|
|
@ -25,16 +25,20 @@ class CreateReview(plugin.Creator):
|
|||
"depth peeling",
|
||||
"alpha cut"
|
||||
]
|
||||
useMayaTimeline = True
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
super(CreateReview, self).__init__(*args, **kwargs)
|
||||
|
||||
# get basic animation data : start / end / handles / steps
|
||||
data = OrderedDict(**self.data)
|
||||
animation_data = lib.collect_animation_data(fps=True)
|
||||
for key, value in animation_data.items():
|
||||
|
||||
# Option for using Maya or asset frame range in settings.
|
||||
frame_range = lib.get_frame_range()
|
||||
if self.useMayaTimeline:
|
||||
frame_range = lib.collect_animation_data(fps=True)
|
||||
for key, value in frame_range.items():
|
||||
data[key] = value
|
||||
|
||||
data["fps"] = lib.collect_animation_data(fps=True)["fps"]
|
||||
data["review_width"] = self.Width
|
||||
data["review_height"] = self.Height
|
||||
data["isolate"] = self.isolate
|
||||
|
|
|
|||
|
|
@ -9,6 +9,9 @@ class CreateVrayProxy(plugin.Creator):
|
|||
family = "vrayproxy"
|
||||
icon = "gears"
|
||||
|
||||
vrmesh = True
|
||||
alembic = True
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
super(CreateVrayProxy, self).__init__(*args, **kwargs)
|
||||
|
||||
|
|
@ -18,3 +21,6 @@ class CreateVrayProxy(plugin.Creator):
|
|||
|
||||
# Write vertex colors
|
||||
self.data["vertexColors"] = False
|
||||
|
||||
self.data["vrmesh"] = self.vrmesh
|
||||
self.data["alembic"] = self.alembic
|
||||
|
|
|
|||
|
|
@ -134,7 +134,7 @@ class ConnectGeometry(InventoryAction):
|
|||
bool
|
||||
"""
|
||||
|
||||
from Qt import QtWidgets
|
||||
from qtpy import QtWidgets
|
||||
|
||||
accept = QtWidgets.QMessageBox.Ok
|
||||
if show_cancel:
|
||||
|
|
|
|||
|
|
@ -149,7 +149,7 @@ class ConnectXgen(InventoryAction):
|
|||
bool
|
||||
"""
|
||||
|
||||
from Qt import QtWidgets
|
||||
from qtpy import QtWidgets
|
||||
|
||||
accept = QtWidgets.QMessageBox.Ok
|
||||
if show_cancel:
|
||||
|
|
|
|||
178
openpype/hosts/maya/plugins/inventory/connect_yeti_rig.py
Normal file
178
openpype/hosts/maya/plugins/inventory/connect_yeti_rig.py
Normal file
|
|
@ -0,0 +1,178 @@
|
|||
import os
|
||||
import json
|
||||
from collections import defaultdict
|
||||
|
||||
from maya import cmds
|
||||
|
||||
from openpype.pipeline import (
|
||||
InventoryAction, get_representation_context, get_representation_path
|
||||
)
|
||||
from openpype.hosts.maya.api.lib import get_container_members, get_id
|
||||
|
||||
|
||||
class ConnectYetiRig(InventoryAction):
|
||||
"""Connect Yeti Rig with an animation or pointcache."""
|
||||
|
||||
label = "Connect Yeti Rig"
|
||||
icon = "link"
|
||||
color = "white"
|
||||
|
||||
def process(self, containers):
|
||||
# Validate selection is more than 1.
|
||||
message = (
|
||||
"Only 1 container selected. 2+ containers needed for this action."
|
||||
)
|
||||
if len(containers) == 1:
|
||||
self.display_warning(message)
|
||||
return
|
||||
|
||||
# Categorize containers by family.
|
||||
containers_by_family = defaultdict(list)
|
||||
for container in containers:
|
||||
family = get_representation_context(
|
||||
container["representation"]
|
||||
)["subset"]["data"]["family"]
|
||||
containers_by_family[family].append(container)
|
||||
|
||||
# Validate to only 1 source container.
|
||||
source_containers = containers_by_family.get("animation", [])
|
||||
source_containers += containers_by_family.get("pointcache", [])
|
||||
source_container_namespaces = [
|
||||
x["namespace"] for x in source_containers
|
||||
]
|
||||
message = (
|
||||
"{} animation containers selected:\n\n{}\n\nOnly select 1 of type "
|
||||
"\"animation\" or \"pointcache\".".format(
|
||||
len(source_containers), source_container_namespaces
|
||||
)
|
||||
)
|
||||
if len(source_containers) != 1:
|
||||
self.display_warning(message)
|
||||
return
|
||||
|
||||
source_container = source_containers[0]
|
||||
source_ids = self.nodes_by_id(source_container)
|
||||
|
||||
# Target containers.
|
||||
target_ids = {}
|
||||
inputs = []
|
||||
|
||||
yeti_rig_containers = containers_by_family.get("yetiRig")
|
||||
if not yeti_rig_containers:
|
||||
self.display_warning(
|
||||
"Select at least one yetiRig container"
|
||||
)
|
||||
return
|
||||
|
||||
for container in yeti_rig_containers:
|
||||
target_ids.update(self.nodes_by_id(container))
|
||||
|
||||
maya_file = get_representation_path(
|
||||
get_representation_context(
|
||||
container["representation"]
|
||||
)["representation"]
|
||||
)
|
||||
_, ext = os.path.splitext(maya_file)
|
||||
settings_file = maya_file.replace(ext, ".rigsettings")
|
||||
if not os.path.exists(settings_file):
|
||||
continue
|
||||
|
||||
with open(settings_file) as f:
|
||||
inputs.extend(json.load(f)["inputs"])
|
||||
|
||||
# Compare loaded connections to scene.
|
||||
for input in inputs:
|
||||
source_node = source_ids.get(input["sourceID"])
|
||||
target_node = target_ids.get(input["destinationID"])
|
||||
|
||||
if not source_node or not target_node:
|
||||
self.log.debug(
|
||||
"Could not find nodes for input:\n" +
|
||||
json.dumps(input, indent=4, sort_keys=True)
|
||||
)
|
||||
continue
|
||||
source_attr, target_attr = input["connections"]
|
||||
|
||||
if not cmds.attributeQuery(
|
||||
source_attr, node=source_node, exists=True
|
||||
):
|
||||
self.log.debug(
|
||||
"Could not find attribute {} on node {} for "
|
||||
"input:\n{}".format(
|
||||
source_attr,
|
||||
source_node,
|
||||
json.dumps(input, indent=4, sort_keys=True)
|
||||
)
|
||||
)
|
||||
continue
|
||||
|
||||
if not cmds.attributeQuery(
|
||||
target_attr, node=target_node, exists=True
|
||||
):
|
||||
self.log.debug(
|
||||
"Could not find attribute {} on node {} for "
|
||||
"input:\n{}".format(
|
||||
target_attr,
|
||||
target_node,
|
||||
json.dumps(input, indent=4, sort_keys=True)
|
||||
)
|
||||
)
|
||||
continue
|
||||
|
||||
source_plug = "{}.{}".format(
|
||||
source_node, source_attr
|
||||
)
|
||||
target_plug = "{}.{}".format(
|
||||
target_node, target_attr
|
||||
)
|
||||
if cmds.isConnected(
|
||||
source_plug, target_plug, ignoreUnitConversion=True
|
||||
):
|
||||
self.log.debug(
|
||||
"Connection already exists: {} -> {}".format(
|
||||
source_plug, target_plug
|
||||
)
|
||||
)
|
||||
continue
|
||||
|
||||
cmds.connectAttr(source_plug, target_plug, force=True)
|
||||
self.log.debug(
|
||||
"Connected attributes: {} -> {}".format(
|
||||
source_plug, target_plug
|
||||
)
|
||||
)
|
||||
|
||||
def nodes_by_id(self, container):
|
||||
ids = {}
|
||||
for member in get_container_members(container):
|
||||
id = get_id(member)
|
||||
if not id:
|
||||
continue
|
||||
ids[id] = member
|
||||
|
||||
return ids
|
||||
|
||||
def display_warning(self, message, show_cancel=False):
|
||||
"""Show feedback to user.
|
||||
|
||||
Returns:
|
||||
bool
|
||||
"""
|
||||
|
||||
from qtpy import QtWidgets
|
||||
|
||||
accept = QtWidgets.QMessageBox.Ok
|
||||
if show_cancel:
|
||||
buttons = accept | QtWidgets.QMessageBox.Cancel
|
||||
else:
|
||||
buttons = accept
|
||||
|
||||
state = QtWidgets.QMessageBox.warning(
|
||||
None,
|
||||
"",
|
||||
message,
|
||||
buttons=buttons,
|
||||
defaultButton=accept
|
||||
)
|
||||
|
||||
return state == accept
|
||||
|
|
@ -2,7 +2,6 @@ import os
|
|||
import clique
|
||||
|
||||
import maya.cmds as cmds
|
||||
import mtoa.ui.arnoldmenu
|
||||
|
||||
from openpype.settings import get_project_settings
|
||||
from openpype.pipeline import (
|
||||
|
|
@ -36,6 +35,11 @@ class ArnoldStandinLoader(load.LoaderPlugin):
|
|||
color = "orange"
|
||||
|
||||
def load(self, context, name, namespace, options):
|
||||
|
||||
# Make sure to load arnold before importing `mtoa.ui.arnoldmenu`
|
||||
cmds.loadPlugin("mtoa", quiet=True)
|
||||
import mtoa.ui.arnoldmenu
|
||||
|
||||
version = context['version']
|
||||
version_data = version.get("data", {})
|
||||
|
||||
|
|
|
|||
|
|
@ -26,6 +26,7 @@ class ReferenceLoader(openpype.hosts.maya.api.plugin.ReferenceLoader):
|
|||
"rig",
|
||||
"camerarig",
|
||||
"staticMesh",
|
||||
"skeletalMesh",
|
||||
"mvLook"]
|
||||
|
||||
representations = ["ma", "abc", "fbx", "mb"]
|
||||
|
|
|
|||
|
|
@ -3,7 +3,7 @@ import os
|
|||
import maya.cmds as cmds
|
||||
import xgenm
|
||||
|
||||
from Qt import QtWidgets
|
||||
from qtpy import QtWidgets
|
||||
|
||||
import openpype.hosts.maya.api.plugin
|
||||
from openpype.hosts.maya.api.lib import (
|
||||
|
|
|
|||
|
|
@ -1,17 +1,12 @@
|
|||
import os
|
||||
from collections import defaultdict
|
||||
import maya.cmds as cmds
|
||||
|
||||
from openpype.settings import get_project_settings
|
||||
from openpype.settings import get_current_project_settings
|
||||
import openpype.hosts.maya.api.plugin
|
||||
from openpype.hosts.maya.api import lib
|
||||
|
||||
|
||||
class YetiRigLoader(openpype.hosts.maya.api.plugin.ReferenceLoader):
|
||||
"""
|
||||
This loader will load Yeti rig. You can select something in scene and if it
|
||||
has same ID as mesh published with rig, their shapes will be linked
|
||||
together.
|
||||
"""
|
||||
"""This loader will load Yeti rig."""
|
||||
|
||||
families = ["yetiRig"]
|
||||
representations = ["ma"]
|
||||
|
|
@ -22,72 +17,31 @@ class YetiRigLoader(openpype.hosts.maya.api.plugin.ReferenceLoader):
|
|||
color = "orange"
|
||||
|
||||
def process_reference(
|
||||
self, context, name=None, namespace=None, options=None):
|
||||
self, context, name=None, namespace=None, options=None
|
||||
):
|
||||
|
||||
import maya.cmds as cmds
|
||||
|
||||
# get roots of selected hierarchies
|
||||
selected_roots = []
|
||||
for sel in cmds.ls(sl=True, long=True):
|
||||
selected_roots.append(sel.split("|")[1])
|
||||
|
||||
# get all objects under those roots
|
||||
selected_hierarchy = []
|
||||
for root in selected_roots:
|
||||
selected_hierarchy.append(cmds.listRelatives(
|
||||
root,
|
||||
allDescendents=True) or [])
|
||||
|
||||
# flatten the list and filter only shapes
|
||||
shapes_flat = []
|
||||
for root in selected_hierarchy:
|
||||
shapes = cmds.ls(root, long=True, type="mesh") or []
|
||||
for shape in shapes:
|
||||
shapes_flat.append(shape)
|
||||
|
||||
# create dictionary of cbId and shape nodes
|
||||
scene_lookup = defaultdict(list)
|
||||
for node in shapes_flat:
|
||||
cb_id = lib.get_id(node)
|
||||
scene_lookup[cb_id] = node
|
||||
|
||||
# load rig
|
||||
group_name = "{}:{}".format(namespace, name)
|
||||
with lib.maintained_selection():
|
||||
file_url = self.prepare_root_value(self.fname,
|
||||
context["project"]["name"])
|
||||
nodes = cmds.file(file_url,
|
||||
namespace=namespace,
|
||||
reference=True,
|
||||
returnNewNodes=True,
|
||||
groupReference=True,
|
||||
groupName="{}:{}".format(namespace, name))
|
||||
file_url = self.prepare_root_value(
|
||||
self.fname, context["project"]["name"]
|
||||
)
|
||||
nodes = cmds.file(
|
||||
file_url,
|
||||
namespace=namespace,
|
||||
reference=True,
|
||||
returnNewNodes=True,
|
||||
groupReference=True,
|
||||
groupName=group_name
|
||||
)
|
||||
|
||||
# for every shape node we've just loaded find matching shape by its
|
||||
# cbId in selection. If found outMesh of scene shape will connect to
|
||||
# inMesh of loaded shape.
|
||||
for destination_node in nodes:
|
||||
source_node = scene_lookup[lib.get_id(destination_node)]
|
||||
if source_node:
|
||||
self.log.info("found: {}".format(source_node))
|
||||
self.log.info(
|
||||
"creating connection to {}".format(destination_node))
|
||||
|
||||
cmds.connectAttr("{}.outMesh".format(source_node),
|
||||
"{}.inMesh".format(destination_node),
|
||||
force=True)
|
||||
|
||||
groupName = "{}:{}".format(namespace, name)
|
||||
|
||||
settings = get_project_settings(os.environ['AVALON_PROJECT'])
|
||||
colors = settings['maya']['load']['colors']
|
||||
|
||||
c = colors.get('yetiRig')
|
||||
settings = get_current_project_settings()
|
||||
colors = settings["maya"]["load"]["colors"]
|
||||
c = colors.get("yetiRig")
|
||||
if c is not None:
|
||||
cmds.setAttr(groupName + ".useOutlinerColor", 1)
|
||||
cmds.setAttr(groupName + ".outlinerColor",
|
||||
(float(c[0])/255),
|
||||
(float(c[1])/255),
|
||||
(float(c[2])/255)
|
||||
cmds.setAttr(group_name + ".useOutlinerColor", 1)
|
||||
cmds.setAttr(
|
||||
group_name + ".outlinerColor",
|
||||
(float(c[0]) / 255), (float(c[1]) / 255), (float(c[2]) / 255)
|
||||
)
|
||||
self[:] = nodes
|
||||
|
||||
|
|
|
|||
|
|
@ -46,7 +46,6 @@ class CollectAnimationOutputGeometry(pyblish.api.InstancePlugin):
|
|||
|
||||
hierarchy = members + descendants
|
||||
|
||||
|
||||
# Ignore certain node types (e.g. constraints)
|
||||
ignore = cmds.ls(hierarchy, type=self.ignore_type, long=True)
|
||||
if ignore:
|
||||
|
|
@ -58,3 +57,18 @@ class CollectAnimationOutputGeometry(pyblish.api.InstancePlugin):
|
|||
|
||||
if instance.data.get("farm"):
|
||||
instance.data["families"].append("publish.farm")
|
||||
|
||||
# Collect user defined attributes.
|
||||
if not instance.data.get("includeUserDefinedAttributes", False):
|
||||
return
|
||||
|
||||
user_defined_attributes = set()
|
||||
for node in hierarchy:
|
||||
attrs = cmds.listAttr(node, userDefined=True) or list()
|
||||
shapes = cmds.listRelatives(node, shapes=True) or list()
|
||||
for shape in shapes:
|
||||
attrs.extend(cmds.listAttr(shape, userDefined=True) or list())
|
||||
|
||||
user_defined_attributes.update(attrs)
|
||||
|
||||
instance.data["userDefinedAttributes"] = list(user_defined_attributes)
|
||||
|
|
|
|||
|
|
@ -1,5 +1,4 @@
|
|||
import copy
|
||||
from bson.objectid import ObjectId
|
||||
|
||||
from maya import cmds
|
||||
import maya.api.OpenMaya as om
|
||||
|
|
@ -165,9 +164,8 @@ class CollectUpstreamInputs(pyblish.api.InstancePlugin):
|
|||
containers = collect_input_containers(scene_containers,
|
||||
nodes)
|
||||
|
||||
inputs = [ObjectId(c["representation"]) for c in containers]
|
||||
inputs = [c["representation"] for c in containers]
|
||||
instance.data["inputRepresentations"] = inputs
|
||||
|
||||
self.log.info("Collected inputs: %s" % inputs)
|
||||
|
||||
def _collect_renderlayer_inputs(self, scene_containers, instance):
|
||||
|
|
|
|||
|
|
@ -137,6 +137,7 @@ class CollectInstances(pyblish.api.ContextPlugin):
|
|||
# Create the instance
|
||||
instance = context.create_instance(objset)
|
||||
instance[:] = members_hierarchy
|
||||
instance.data["objset"] = objset
|
||||
|
||||
# Store the exact members of the object set
|
||||
instance.data["setMembers"] = members
|
||||
|
|
|
|||
|
|
@ -42,3 +42,18 @@ class CollectPointcache(pyblish.api.InstancePlugin):
|
|||
if proxy_set:
|
||||
instance.remove(proxy_set)
|
||||
instance.data["setMembers"].remove(proxy_set)
|
||||
|
||||
# Collect user defined attributes.
|
||||
if not instance.data.get("includeUserDefinedAttributes", False):
|
||||
return
|
||||
|
||||
user_defined_attributes = set()
|
||||
for node in instance:
|
||||
attrs = cmds.listAttr(node, userDefined=True) or list()
|
||||
shapes = cmds.listRelatives(node, shapes=True) or list()
|
||||
for shape in shapes:
|
||||
attrs.extend(cmds.listAttr(shape, userDefined=True) or list())
|
||||
|
||||
user_defined_attributes.update(attrs)
|
||||
|
||||
instance.data["userDefinedAttributes"] = list(user_defined_attributes)
|
||||
|
|
|
|||
|
|
@ -42,6 +42,7 @@ Provides:
|
|||
import re
|
||||
import os
|
||||
import platform
|
||||
import json
|
||||
|
||||
from maya import cmds
|
||||
import maya.app.renderSetup.model.renderSetup as renderSetup
|
||||
|
|
@ -183,7 +184,11 @@ class CollectMayaRender(pyblish.api.ContextPlugin):
|
|||
self.log.info("multipart: {}".format(
|
||||
multipart))
|
||||
assert exp_files, "no file names were generated, this is bug"
|
||||
self.log.info(exp_files)
|
||||
self.log.info(
|
||||
"expected files: {}".format(
|
||||
json.dumps(exp_files, indent=4, sort_keys=True)
|
||||
)
|
||||
)
|
||||
|
||||
# if we want to attach render to subset, check if we have AOV's
|
||||
# in expectedFiles. If so, raise error as we cannot attach AOV
|
||||
|
|
@ -264,7 +269,7 @@ class CollectMayaRender(pyblish.api.ContextPlugin):
|
|||
self.log.info(full_exp_files)
|
||||
self.log.info("collecting layer: {}".format(layer_name))
|
||||
# Get layer specific settings, might be overrides
|
||||
|
||||
colorspace_data = lib.get_color_management_preferences()
|
||||
data = {
|
||||
"subset": expected_layer_name,
|
||||
"attachTo": attach_to,
|
||||
|
|
@ -318,6 +323,9 @@ class CollectMayaRender(pyblish.api.ContextPlugin):
|
|||
"renderSetupIncludeLights": render_instance.data.get(
|
||||
"renderSetupIncludeLights"
|
||||
),
|
||||
"colorspaceConfig": colorspace_data["config"],
|
||||
"colorspaceDisplay": colorspace_data["display"],
|
||||
"colorspaceView": colorspace_data["view"],
|
||||
"strict_error_checking": render_instance.data.get(
|
||||
"strict_error_checking", True
|
||||
)
|
||||
|
|
|
|||
|
|
@ -23,6 +23,11 @@ class CollectReview(pyblish.api.InstancePlugin):
|
|||
|
||||
task = legacy_io.Session["AVALON_TASK"]
|
||||
|
||||
# Get panel.
|
||||
instance.data["panel"] = cmds.playblast(
|
||||
activeEditor=True
|
||||
).split("|")[-1]
|
||||
|
||||
# get cameras
|
||||
members = instance.data['setMembers']
|
||||
cameras = cmds.ls(members, long=True,
|
||||
|
|
|
|||
|
|
@ -9,10 +9,16 @@ class CollectVrayProxy(pyblish.api.InstancePlugin):
|
|||
Add `pointcache` family for it.
|
||||
"""
|
||||
order = pyblish.api.CollectorOrder + 0.01
|
||||
label = 'Collect Vray Proxy'
|
||||
label = "Collect Vray Proxy"
|
||||
families = ["vrayproxy"]
|
||||
|
||||
def process(self, instance):
|
||||
"""Collector entry point."""
|
||||
if not instance.data.get('families'):
|
||||
instance.data["families"] = []
|
||||
|
||||
if instance.data.get("vrmesh"):
|
||||
instance.data["families"].append("vrayproxy.vrmesh")
|
||||
|
||||
if instance.data.get("alembic"):
|
||||
instance.data["families"].append("vrayproxy.alembic")
|
||||
|
|
|
|||
|
|
@ -22,6 +22,8 @@ class ExtractGLB(publish.Extractor):
|
|||
|
||||
self.log.info("Extracting GLB to: {}".format(path))
|
||||
|
||||
cmds.loadPlugin("maya2glTF", quiet=True)
|
||||
|
||||
nodes = instance[:]
|
||||
|
||||
self.log.info("Instance: {0}".format(nodes))
|
||||
|
|
@ -45,6 +47,7 @@ class ExtractGLB(publish.Extractor):
|
|||
"glb": True,
|
||||
"vno": True # visibleNodeOnly
|
||||
}
|
||||
|
||||
with lib.maintained_selection():
|
||||
cmds.select(nodes, hi=True, noExpand=True)
|
||||
extract_gltf(staging_dir,
|
||||
|
|
|
|||
|
|
@ -16,6 +16,7 @@ import pyblish.api
|
|||
from openpype.lib import source_hash, run_subprocess
|
||||
from openpype.pipeline import legacy_io, publish
|
||||
from openpype.hosts.maya.api import lib
|
||||
from openpype.hosts.maya.api.lib import image_info, guess_colorspace
|
||||
|
||||
# Modes for transfer
|
||||
COPY = 1
|
||||
|
|
@ -367,16 +368,25 @@ class ExtractLook(publish.Extractor):
|
|||
for filepath in files_metadata:
|
||||
|
||||
linearize = False
|
||||
if do_maketx and files_metadata[filepath]["color_space"].lower() == "srgb": # noqa: E501
|
||||
linearize = True
|
||||
# set its file node to 'raw' as tx will be linearized
|
||||
files_metadata[filepath]["color_space"] = "Raw"
|
||||
# if OCIO color management enabled
|
||||
# it won't take the condition of the files_metadata
|
||||
|
||||
ocio_maya = cmds.colorManagementPrefs(q=True,
|
||||
cmConfigFileEnabled=True,
|
||||
cmEnabled=True)
|
||||
|
||||
if do_maketx and not ocio_maya:
|
||||
if files_metadata[filepath]["color_space"].lower() == "srgb": # noqa: E501
|
||||
linearize = True
|
||||
# set its file node to 'raw' as tx will be linearized
|
||||
files_metadata[filepath]["color_space"] = "Raw"
|
||||
|
||||
# if do_maketx:
|
||||
# color_space = "Raw"
|
||||
|
||||
source, mode, texture_hash = self._process_texture(
|
||||
filepath,
|
||||
resource,
|
||||
do_maketx,
|
||||
staging=staging_dir,
|
||||
linearize=linearize,
|
||||
|
|
@ -482,7 +492,8 @@ class ExtractLook(publish.Extractor):
|
|||
resources_dir, basename + ext
|
||||
)
|
||||
|
||||
def _process_texture(self, filepath, do_maketx, staging, linearize, force):
|
||||
def _process_texture(self, filepath, resource,
|
||||
do_maketx, staging, linearize, force):
|
||||
"""Process a single texture file on disk for publishing.
|
||||
This will:
|
||||
1. Check whether it's already published, if so it will do hardlink
|
||||
|
|
@ -524,10 +535,47 @@ class ExtractLook(publish.Extractor):
|
|||
texture_hash
|
||||
]
|
||||
if linearize:
|
||||
self.log.info("tx: converting sRGB -> linear")
|
||||
additional_args.extend(["--colorconvert", "sRGB", "linear"])
|
||||
if cmds.colorManagementPrefs(query=True, cmEnabled=True):
|
||||
render_colorspace = cmds.colorManagementPrefs(query=True,
|
||||
renderingSpaceName=True) # noqa
|
||||
config_path = cmds.colorManagementPrefs(query=True,
|
||||
configFilePath=True) # noqa
|
||||
if not os.path.exists(config_path):
|
||||
raise RuntimeError("No OCIO config path found!")
|
||||
|
||||
color_space_attr = resource["node"] + ".colorSpace"
|
||||
try:
|
||||
color_space = cmds.getAttr(color_space_attr)
|
||||
except ValueError:
|
||||
# node doesn't have color space attribute
|
||||
if cmds.loadPlugin("mtoa", quiet=True):
|
||||
img_info = image_info(filepath)
|
||||
color_space = guess_colorspace(img_info)
|
||||
else:
|
||||
color_space = "Raw"
|
||||
self.log.info("tx: converting {0} -> {1}".format(color_space, render_colorspace)) # noqa
|
||||
|
||||
additional_args.extend(["--colorconvert",
|
||||
color_space,
|
||||
render_colorspace])
|
||||
else:
|
||||
|
||||
if cmds.loadPlugin("mtoa", quiet=True):
|
||||
img_info = image_info(filepath)
|
||||
color_space = guess_colorspace(img_info)
|
||||
if color_space == "sRGB":
|
||||
self.log.info("tx: converting sRGB -> linear")
|
||||
additional_args.extend(["--colorconvert",
|
||||
"sRGB",
|
||||
"Raw"])
|
||||
else:
|
||||
self.log.info("tx: texture's colorspace "
|
||||
"is already linear")
|
||||
else:
|
||||
self.log.warning("cannot guess the colorspace"
|
||||
"color conversion won't be available!") # noqa
|
||||
|
||||
|
||||
config_path = get_ocio_config_path("nuke-default")
|
||||
additional_args.extend(["--colorconfig", config_path])
|
||||
# Ensure folder exists
|
||||
if not os.path.exists(os.path.dirname(converted)):
|
||||
|
|
|
|||
|
|
@ -1,4 +1,5 @@
|
|||
import os
|
||||
import json
|
||||
|
||||
import clique
|
||||
import capture
|
||||
|
|
@ -44,10 +45,6 @@ class ExtractPlayblast(publish.Extractor):
|
|||
# get cameras
|
||||
camera = instance.data['review_camera']
|
||||
|
||||
override_viewport_options = (
|
||||
self.capture_preset['Viewport Options']
|
||||
['override_viewport_options']
|
||||
)
|
||||
preset = lib.load_capture_preset(data=self.capture_preset)
|
||||
# Grab capture presets from the project settings
|
||||
capture_presets = self.capture_preset
|
||||
|
|
@ -137,6 +134,28 @@ class ExtractPlayblast(publish.Extractor):
|
|||
pan_zoom = cmds.getAttr("{}.panZoomEnabled".format(preset["camera"]))
|
||||
cmds.setAttr("{}.panZoomEnabled".format(preset["camera"]), False)
|
||||
|
||||
# Need to explicitly enable some viewport changes so the viewport is
|
||||
# refreshed ahead of playblasting.
|
||||
keys = [
|
||||
"useDefaultMaterial",
|
||||
"wireframeOnShaded",
|
||||
"xray",
|
||||
"jointXray",
|
||||
"backfaceCulling"
|
||||
]
|
||||
viewport_defaults = {}
|
||||
for key in keys:
|
||||
viewport_defaults[key] = cmds.modelEditor(
|
||||
instance.data["panel"], query=True, **{key: True}
|
||||
)
|
||||
if preset["viewport_options"][key]:
|
||||
cmds.modelEditor(
|
||||
instance.data["panel"], edit=True, **{key: True}
|
||||
)
|
||||
|
||||
override_viewport_options = (
|
||||
capture_presets['Viewport Options']['override_viewport_options']
|
||||
)
|
||||
with lib.maintained_time():
|
||||
filename = preset.get("filename", "%TEMP%")
|
||||
|
||||
|
|
@ -145,18 +164,27 @@ class ExtractPlayblast(publish.Extractor):
|
|||
# playblast and viewer
|
||||
preset['viewer'] = False
|
||||
|
||||
self.log.info('using viewport preset: {}'.format(preset))
|
||||
|
||||
# Update preset with current panel setting
|
||||
# if override_viewport_options is turned off
|
||||
if not override_viewport_options:
|
||||
panel = cmds.getPanel(withFocus=True)
|
||||
panel_preset = capture.parse_active_view()
|
||||
panel_preset = capture.parse_view(instance.data["panel"])
|
||||
panel_preset.pop("camera")
|
||||
preset.update(panel_preset)
|
||||
cmds.setFocus(panel)
|
||||
|
||||
self.log.info(
|
||||
"Using preset:\n{}".format(
|
||||
json.dumps(preset, sort_keys=True, indent=4)
|
||||
)
|
||||
)
|
||||
|
||||
path = capture.capture(log=self.log, **preset)
|
||||
|
||||
# Restoring viewport options.
|
||||
if viewport_defaults:
|
||||
cmds.modelEditor(
|
||||
instance.data["panel"], edit=True, **viewport_defaults
|
||||
)
|
||||
|
||||
cmds.setAttr("{}.panZoomEnabled".format(preset["camera"]), pan_zoom)
|
||||
|
||||
# Restore panel camera.
|
||||
|
|
|
|||
|
|
@ -1,5 +1,4 @@
|
|||
import os
|
||||
import copy
|
||||
|
||||
from maya import cmds
|
||||
|
||||
|
|
@ -10,7 +9,6 @@ from openpype.hosts.maya.api.lib import (
|
|||
maintained_selection,
|
||||
iter_visible_nodes_in_range
|
||||
)
|
||||
from openpype.lib import StringTemplate
|
||||
|
||||
|
||||
class ExtractAlembic(publish.Extractor):
|
||||
|
|
@ -25,7 +23,7 @@ class ExtractAlembic(publish.Extractor):
|
|||
|
||||
label = "Extract Pointcache (Alembic)"
|
||||
hosts = ["maya"]
|
||||
families = ["pointcache", "model", "vrayproxy"]
|
||||
families = ["pointcache", "model", "vrayproxy.alembic"]
|
||||
targets = ["local", "remote"]
|
||||
|
||||
def process(self, instance):
|
||||
|
|
@ -41,6 +39,7 @@ class ExtractAlembic(publish.Extractor):
|
|||
|
||||
attrs = instance.data.get("attr", "").split(";")
|
||||
attrs = [value for value in attrs if value.strip()]
|
||||
attrs += instance.data.get("userDefinedAttributes", [])
|
||||
attrs += ["cbId"]
|
||||
|
||||
attr_prefixes = instance.data.get("attrPrefix", "").split(";")
|
||||
|
|
@ -115,6 +114,7 @@ class ExtractAlembic(publish.Extractor):
|
|||
|
||||
# Extract proxy.
|
||||
if not instance.data.get("proxy"):
|
||||
self.log.info("No proxy nodes found. Skipping proxy extraction.")
|
||||
return
|
||||
|
||||
path = path.replace(".abc", "_proxy.abc")
|
||||
|
|
@ -134,26 +134,14 @@ class ExtractAlembic(publish.Extractor):
|
|||
**options
|
||||
)
|
||||
|
||||
template_data = copy.deepcopy(instance.data["anatomyData"])
|
||||
template_data.update({"ext": "abc"})
|
||||
templates = instance.context.data["anatomy"].templates["publish"]
|
||||
published_filename_without_extension = StringTemplate(
|
||||
templates["file"]
|
||||
).format(template_data).replace(".abc", "_proxy")
|
||||
transfers = []
|
||||
destination = os.path.join(
|
||||
instance.data["resourcesDir"],
|
||||
filename.replace(
|
||||
filename.split(".")[0],
|
||||
published_filename_without_extension
|
||||
)
|
||||
)
|
||||
transfers.append((path, destination))
|
||||
|
||||
for source, destination in transfers:
|
||||
self.log.debug("Transfer: {} > {}".format(source, destination))
|
||||
|
||||
instance.data["transfers"] = transfers
|
||||
representation = {
|
||||
"name": "proxy",
|
||||
"ext": "abc",
|
||||
"files": os.path.basename(path),
|
||||
"stagingDir": dirname,
|
||||
"outputName": "proxy"
|
||||
}
|
||||
instance.data["representations"].append(representation)
|
||||
|
||||
def get_members_and_roots(self, instance):
|
||||
return instance[:], instance.data.get("setMembers")
|
||||
|
|
|
|||
|
|
@ -134,8 +134,8 @@ class ExtractThumbnail(publish.Extractor):
|
|||
|
||||
# Update preset with current panel setting
|
||||
# if override_viewport_options is turned off
|
||||
if not override_viewport_options:
|
||||
panel = cmds.getPanel(withFocus=True)
|
||||
panel = cmds.getPanel(withFocus=True) or ""
|
||||
if not override_viewport_options and "modelPanel" in panel:
|
||||
panel_preset = capture.parse_active_view()
|
||||
preset.update(panel_preset)
|
||||
cmds.setFocus(panel)
|
||||
|
|
|
|||
|
|
@ -16,7 +16,7 @@ class ExtractVRayProxy(publish.Extractor):
|
|||
|
||||
label = "VRay Proxy (.vrmesh)"
|
||||
hosts = ["maya"]
|
||||
families = ["vrayproxy"]
|
||||
families = ["vrayproxy.vrmesh"]
|
||||
|
||||
def process(self, instance):
|
||||
|
||||
|
|
|
|||
|
|
@ -57,6 +57,10 @@ class ValidateFrameRange(pyblish.api.InstancePlugin):
|
|||
|
||||
inst_start = int(instance.data.get("frameStartHandle"))
|
||||
inst_end = int(instance.data.get("frameEndHandle"))
|
||||
inst_frame_start = int(instance.data.get("frameStart"))
|
||||
inst_frame_end = int(instance.data.get("frameEnd"))
|
||||
inst_handle_start = int(instance.data.get("handleStart"))
|
||||
inst_handle_end = int(instance.data.get("handleEnd"))
|
||||
|
||||
# basic sanity checks
|
||||
assert frame_start_handle <= frame_end_handle, (
|
||||
|
|
@ -69,24 +73,37 @@ class ValidateFrameRange(pyblish.api.InstancePlugin):
|
|||
if [ef for ef in self.exclude_families
|
||||
if instance.data["family"] in ef]:
|
||||
return
|
||||
if(inst_start != frame_start_handle):
|
||||
if (inst_start != frame_start_handle):
|
||||
errors.append("Instance start frame [ {} ] doesn't "
|
||||
"match the one set on instance [ {} ]: "
|
||||
"match the one set on asset [ {} ]: "
|
||||
"{}/{}/{}/{} (handle/start/end/handle)".format(
|
||||
inst_start,
|
||||
frame_start_handle,
|
||||
handle_start, frame_start, frame_end, handle_end
|
||||
))
|
||||
|
||||
if(inst_end != frame_end_handle):
|
||||
if (inst_end != frame_end_handle):
|
||||
errors.append("Instance end frame [ {} ] doesn't "
|
||||
"match the one set on instance [ {} ]: "
|
||||
"match the one set on asset [ {} ]: "
|
||||
"{}/{}/{}/{} (handle/start/end/handle)".format(
|
||||
inst_end,
|
||||
frame_end_handle,
|
||||
handle_start, frame_start, frame_end, handle_end
|
||||
))
|
||||
|
||||
checks = {
|
||||
"frame start": (frame_start, inst_frame_start),
|
||||
"frame end": (frame_end, inst_frame_end),
|
||||
"handle start": (handle_start, inst_handle_start),
|
||||
"handle end": (handle_end, inst_handle_end)
|
||||
}
|
||||
for label, values in checks.items():
|
||||
if values[0] != values[1]:
|
||||
errors.append(
|
||||
"{} on instance ({}) does not match with the asset "
|
||||
"({}).".format(label.title(), values[1], values[0])
|
||||
)
|
||||
|
||||
for e in errors:
|
||||
self.log.error(e)
|
||||
|
||||
|
|
|
|||
207
openpype/hosts/maya/plugins/publish/validate_glsl_material.py
Normal file
207
openpype/hosts/maya/plugins/publish/validate_glsl_material.py
Normal file
|
|
@ -0,0 +1,207 @@
|
|||
import os
|
||||
from maya import cmds
|
||||
|
||||
import pyblish.api
|
||||
from openpype.pipeline.publish import (
|
||||
RepairAction,
|
||||
ValidateContentsOrder
|
||||
)
|
||||
from openpype.pipeline import PublishValidationError
|
||||
|
||||
|
||||
class ValidateGLSLMaterial(pyblish.api.InstancePlugin):
|
||||
"""
|
||||
Validate if the asset uses GLSL Shader
|
||||
"""
|
||||
|
||||
order = ValidateContentsOrder + 0.1
|
||||
families = ['gltf']
|
||||
hosts = ['maya']
|
||||
label = 'GLSL Shader for GLTF'
|
||||
actions = [RepairAction]
|
||||
optional = True
|
||||
active = True
|
||||
|
||||
def process(self, instance):
|
||||
shading_grp = self.get_material_from_shapes(instance)
|
||||
if not shading_grp:
|
||||
raise PublishValidationError("No shading group found")
|
||||
invalid = self.get_texture_shader_invalid(instance)
|
||||
if invalid:
|
||||
raise PublishValidationError("Non GLSL Shader found: "
|
||||
"{0}".format(invalid))
|
||||
|
||||
def get_material_from_shapes(self, instance):
|
||||
shapes = cmds.ls(instance, type="mesh", long=True)
|
||||
for shape in shapes:
|
||||
shading_grp = cmds.listConnections(shape,
|
||||
destination=True,
|
||||
type="shadingEngine")
|
||||
|
||||
return shading_grp or []
|
||||
|
||||
def get_texture_shader_invalid(self, instance):
|
||||
|
||||
invalid = set()
|
||||
shading_grp = self.get_material_from_shapes(instance)
|
||||
for shading_group in shading_grp:
|
||||
material_name = "{}.surfaceShader".format(shading_group)
|
||||
material = cmds.listConnections(material_name,
|
||||
source=True,
|
||||
destination=False,
|
||||
type="GLSLShader")
|
||||
|
||||
if not material:
|
||||
# add material name
|
||||
material = cmds.listConnections(material_name)[0]
|
||||
invalid.add(material)
|
||||
|
||||
return list(invalid)
|
||||
|
||||
@classmethod
|
||||
def repair(cls, instance):
|
||||
"""
|
||||
Repair instance by assigning GLSL Shader
|
||||
to the material
|
||||
"""
|
||||
cls.assign_glsl_shader(instance)
|
||||
return
|
||||
|
||||
@classmethod
|
||||
def assign_glsl_shader(cls, instance):
|
||||
"""
|
||||
Converting StingrayPBS material to GLSL Shaders
|
||||
for the glb export through Maya2GLTF plugin
|
||||
"""
|
||||
|
||||
meshes = cmds.ls(instance, type="mesh", long=True)
|
||||
cls.log.info("meshes: {}".format(meshes))
|
||||
# load the glsl shader plugin
|
||||
cmds.loadPlugin("glslShader", quiet=True)
|
||||
|
||||
for mesh in meshes:
|
||||
# create glsl shader
|
||||
glsl = cmds.createNode('GLSLShader')
|
||||
glsl_shading_grp = cmds.sets(name=glsl + "SG", empty=True,
|
||||
renderable=True, noSurfaceShader=True)
|
||||
cmds.connectAttr(glsl + ".outColor",
|
||||
glsl_shading_grp + ".surfaceShader")
|
||||
|
||||
# load the maya2gltf shader
|
||||
ogsfx_path = instance.context.data["project_settings"]["maya"]["publish"]["ExtractGLB"]["ogsfx_path"] # noqa
|
||||
if not os.path.exists(ogsfx_path):
|
||||
if ogsfx_path:
|
||||
# if custom ogsfx path is not specified
|
||||
# the log below is the warning for the user
|
||||
cls.log.warning("ogsfx shader file "
|
||||
"not found in {}".format(ogsfx_path))
|
||||
|
||||
cls.log.info("Find the ogsfx shader file in "
|
||||
"default maya directory...")
|
||||
# re-direct to search the ogsfx path in maya_dir
|
||||
ogsfx_path = os.getenv("MAYA_APP_DIR") + ogsfx_path
|
||||
if not os.path.exists(ogsfx_path):
|
||||
raise PublishValidationError("The ogsfx shader file does not " # noqa
|
||||
"exist: {}".format(ogsfx_path)) # noqa
|
||||
|
||||
cmds.setAttr(glsl + ".shader", ogsfx_path, typ="string")
|
||||
# list the materials used for the assets
|
||||
shading_grp = cmds.listConnections(mesh,
|
||||
destination=True,
|
||||
type="shadingEngine")
|
||||
|
||||
# get the materials related to the selected assets
|
||||
for material in shading_grp:
|
||||
pbs_shader = cmds.listConnections(material,
|
||||
destination=True,
|
||||
type="StingrayPBS")
|
||||
if pbs_shader:
|
||||
cls.pbs_shader_conversion(pbs_shader, glsl)
|
||||
# setting up to relink the texture if
|
||||
# the mesh is with aiStandardSurface
|
||||
arnold_shader = cmds.listConnections(material,
|
||||
destination=True,
|
||||
type="aiStandardSurface")
|
||||
if arnold_shader:
|
||||
cls.arnold_shader_conversion(arnold_shader, glsl)
|
||||
|
||||
cmds.sets(mesh, forceElement=str(glsl_shading_grp))
|
||||
|
||||
@classmethod
|
||||
def pbs_shader_conversion(cls, main_shader, glsl):
|
||||
|
||||
cls.log.info("StringrayPBS detected "
|
||||
"-> Can do texture conversion")
|
||||
|
||||
for shader in main_shader:
|
||||
# get the file textures related to the PBS Shader
|
||||
albedo = cmds.listConnections(shader +
|
||||
".TEX_color_map")
|
||||
if albedo:
|
||||
dif_output = albedo[0] + ".outColor"
|
||||
# get the glsl_shader input
|
||||
# reconnect the file nodes to maya2gltf shader
|
||||
glsl_dif = glsl + ".u_BaseColorTexture"
|
||||
cmds.connectAttr(dif_output, glsl_dif)
|
||||
|
||||
# connect orm map if there is one
|
||||
orm_packed = cmds.listConnections(shader +
|
||||
".TEX_ao_map")
|
||||
if orm_packed:
|
||||
orm_output = orm_packed[0] + ".outColor"
|
||||
|
||||
mtl = glsl + ".u_MetallicTexture"
|
||||
ao = glsl + ".u_OcclusionTexture"
|
||||
rough = glsl + ".u_RoughnessTexture"
|
||||
|
||||
cmds.connectAttr(orm_output, mtl)
|
||||
cmds.connectAttr(orm_output, ao)
|
||||
cmds.connectAttr(orm_output, rough)
|
||||
|
||||
# connect nrm map if there is one
|
||||
nrm = cmds.listConnections(shader +
|
||||
".TEX_normal_map")
|
||||
if nrm:
|
||||
nrm_output = nrm[0] + ".outColor"
|
||||
glsl_nrm = glsl + ".u_NormalTexture"
|
||||
cmds.connectAttr(nrm_output, glsl_nrm)
|
||||
|
||||
@classmethod
|
||||
def arnold_shader_conversion(cls, main_shader, glsl):
|
||||
cls.log.info("aiStandardSurface detected "
|
||||
"-> Can do texture conversion")
|
||||
|
||||
for shader in main_shader:
|
||||
# get the file textures related to the PBS Shader
|
||||
albedo = cmds.listConnections(shader + ".baseColor")
|
||||
if albedo:
|
||||
dif_output = albedo[0] + ".outColor"
|
||||
# get the glsl_shader input
|
||||
# reconnect the file nodes to maya2gltf shader
|
||||
glsl_dif = glsl + ".u_BaseColorTexture"
|
||||
cmds.connectAttr(dif_output, glsl_dif)
|
||||
|
||||
orm_packed = cmds.listConnections(shader +
|
||||
".specularRoughness")
|
||||
if orm_packed:
|
||||
orm_output = orm_packed[0] + ".outColor"
|
||||
|
||||
mtl = glsl + ".u_MetallicTexture"
|
||||
ao = glsl + ".u_OcclusionTexture"
|
||||
rough = glsl + ".u_RoughnessTexture"
|
||||
|
||||
cmds.connectAttr(orm_output, mtl)
|
||||
cmds.connectAttr(orm_output, ao)
|
||||
cmds.connectAttr(orm_output, rough)
|
||||
|
||||
# connect nrm map if there is one
|
||||
bump_node = cmds.listConnections(shader +
|
||||
".normalCamera")
|
||||
if bump_node:
|
||||
for bump in bump_node:
|
||||
nrm = cmds.listConnections(bump +
|
||||
".bumpValue")
|
||||
if nrm:
|
||||
nrm_output = nrm[0] + ".outColor"
|
||||
glsl_nrm = glsl + ".u_NormalTexture"
|
||||
cmds.connectAttr(nrm_output, glsl_nrm)
|
||||
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Add a link
Reference in a new issue