Merge branch 'develop' into OP-3572/Renderman-support-for-Sample-and-Display

This commit is contained in:
Kayla Man 2022-11-24 16:07:22 +08:00
commit 9c024776b0
350 changed files with 20065 additions and 5946 deletions

28
.github/workflows/milestone_assign.yml vendored Normal file
View file

@ -0,0 +1,28 @@
name: Milestone - assign to PRs
on:
pull_request_target:
types: [closed]
jobs:
run_if_release:
if: startsWith(github.base_ref, 'release/')
runs-on: ubuntu-latest
steps:
- name: 'Assign Milestone [next-minor]'
if: github.event.pull_request.milestone == null
uses: zoispag/action-assign-milestone@v1
with:
repo-token: "${{ secrets.GITHUB_TOKEN }}"
milestone: 'next-minor'
run_if_develop:
if: ${{ github.base_ref == 'develop' }}
runs-on: ubuntu-latest
steps:
- name: 'Assign Milestone [next-patch]'
if: github.event.pull_request.milestone == null
uses: zoispag/action-assign-milestone@v1
with:
repo-token: "${{ secrets.GITHUB_TOKEN }}"
milestone: 'next-patch'

62
.github/workflows/milestone_create.yml vendored Normal file
View file

@ -0,0 +1,62 @@
name: Milestone - create default
on:
milestone:
types: [closed, edited]
jobs:
generate-next-patch:
runs-on: ubuntu-latest
steps:
- name: 'Get Milestones'
uses: "WyriHaximus/github-action-get-milestones@master"
id: milestones
env:
GITHUB_TOKEN: "${{ secrets.GITHUB_TOKEN }}"
- run: printf "name=number::%s" $(printenv MILESTONES | jq --arg MILESTONE $(printenv MILESTONE) '.[] | select(.title == $MILESTONE) | .number')
id: querymilestone
env:
MILESTONES: ${{ steps.milestones.outputs.milestones }}
MILESTONE: "next-patch"
- name: Read output
run: |
echo "${{ steps.querymilestone.outputs.number }}"
- name: 'Create `next-patch` milestone'
if: steps.querymilestone.outputs.number == ''
id: createmilestone
uses: "WyriHaximus/github-action-create-milestone@v1"
with:
title: 'next-patch'
env:
GITHUB_TOKEN: "${{ secrets.GITHUB_TOKEN }}"
generate-next-minor:
runs-on: ubuntu-latest
steps:
- name: 'Get Milestones'
uses: "WyriHaximus/github-action-get-milestones@master"
id: milestones
env:
GITHUB_TOKEN: "${{ secrets.GITHUB_TOKEN }}"
- run: printf "name=number::%s" $(printenv MILESTONES | jq --arg MILESTONE $(printenv MILESTONE) '.[] | select(.title == $MILESTONE) | .number')
id: querymilestone
env:
MILESTONES: ${{ steps.milestones.outputs.milestones }}
MILESTONE: "next-minor"
- name: Read output
run: |
echo "${{ steps.querymilestone.outputs.number }}"
- name: 'Create `next-minor` milestone'
if: steps.querymilestone.outputs.number == ''
id: createmilestone
uses: "WyriHaximus/github-action-create-milestone@v1"
with:
title: 'next-minor'
env:
GITHUB_TOKEN: "${{ secrets.GITHUB_TOKEN }}"

View file

@ -37,27 +37,27 @@ jobs:
echo ::set-output name=next_tag::$RESULT
- name: "✏️ Generate full changelog"
if: steps.version_type.outputs.type != 'skip'
id: generate-full-changelog
uses: heinrichreimer/github-changelog-generator-action@v2.2
with:
token: ${{ secrets.ADMIN_TOKEN }}
addSections: '{"documentation":{"prefix":"### 📖 Documentation","labels":["type: documentation"]},"tests":{"prefix":"### ✅ Testing","labels":["tests"]},"feature":{"prefix":"**🆕 New features**", "labels":["type: feature"]},"breaking":{"prefix":"**💥 Breaking**", "labels":["breaking"]},"enhancements":{"prefix":"**🚀 Enhancements**", "labels":["type: enhancement"]},"bugs":{"prefix":"**🐛 Bug fixes**", "labels":["type: bug"]},"deprecated":{"prefix":"**⚠️ Deprecations**", "labels":["depreciated"]}, "refactor":{"prefix":"**🔀 Refactored code**", "labels":["refactor"]}}'
issues: false
issuesWoLabels: false
sinceTag: "3.0.0"
maxIssues: 100
pullRequests: true
prWoLabels: false
author: false
unreleased: true
compareLink: true
stripGeneratorNotice: true
verbose: true
unreleasedLabel: ${{ steps.version.outputs.next_tag }}
excludeTagsRegex: "CI/.+"
releaseBranch: "main"
# - name: "✏️ Generate full changelog"
# if: steps.version_type.outputs.type != 'skip'
# id: generate-full-changelog
# uses: heinrichreimer/github-changelog-generator-action@v2.3
# with:
# token: ${{ secrets.ADMIN_TOKEN }}
# addSections: '{"documentation":{"prefix":"### 📖 Documentation","labels":["type: documentation"]},"tests":{"prefix":"### ✅ Testing","labels":["tests"]},"feature":{"prefix":"**🆕 New features**", "labels":["type: feature"]},"breaking":{"prefix":"**💥 Breaking**", "labels":["breaking"]},"enhancements":{"prefix":"**🚀 Enhancements**", "labels":["type: enhancement"]},"bugs":{"prefix":"**🐛 Bug fixes**", "labels":["type: bug"]},"deprecated":{"prefix":"**⚠️ Deprecations**", "labels":["depreciated"]}, "refactor":{"prefix":"**🔀 Refactored code**", "labels":["refactor"]}}'
# issues: false
# issuesWoLabels: false
# sinceTag: "3.12.0"
# maxIssues: 100
# pullRequests: true
# prWoLabels: false
# author: false
# unreleased: true
# compareLink: true
# stripGeneratorNotice: true
# verbose: true
# unreleasedLabel: ${{ steps.version.outputs.next_tag }}
# excludeTagsRegex: "CI/.+"
# releaseBranch: "main"
- name: "🖨️ Print changelog to console"
if: steps.version_type.outputs.type != 'skip'
@ -85,11 +85,11 @@ jobs:
tags: true
unprotect_reviews: true
- name: 🔨 Merge main back to develop
- name: 🔨 Merge main back to develop
uses: everlytic/branch-merge@1.1.0
if: steps.version_type.outputs.type != 'skip'
with:
github_token: ${{ secrets.ADMIN_TOKEN }}
source_ref: 'main'
target_branch: 'develop'
commit_message_template: '[Automated] Merged {source_ref} into {target_branch}'
commit_message_template: '[Automated] Merged {source_ref} into {target_branch}'

View file

@ -2,7 +2,7 @@ name: Stable Release
on:
release:
types:
types:
- prereleased
jobs:
@ -13,7 +13,7 @@ jobs:
steps:
- name: 🚛 Checkout Code
uses: actions/checkout@v2
with:
with:
fetch-depth: 0
- name: Set up Python
@ -33,27 +33,27 @@ jobs:
echo ::set-output name=last_release::$LASTRELEASE
echo ::set-output name=release_tag::$RESULT
- name: "✏️ Generate full changelog"
if: steps.version.outputs.release_tag != 'skip'
id: generate-full-changelog
uses: heinrichreimer/github-changelog-generator-action@v2.2
with:
token: ${{ secrets.ADMIN_TOKEN }}
addSections: '{"documentation":{"prefix":"### 📖 Documentation","labels":["type: documentation"]},"tests":{"prefix":"### ✅ Testing","labels":["tests"]},"feature":{"prefix":"**🆕 New features**", "labels":["type: feature"]},"breaking":{"prefix":"**💥 Breaking**", "labels":["breaking"]},"enhancements":{"prefix":"**🚀 Enhancements**", "labels":["type: enhancement"]},"bugs":{"prefix":"**🐛 Bug fixes**", "labels":["type: bug"]},"deprecated":{"prefix":"**⚠️ Deprecations**", "labels":["depreciated"]}, "refactor":{"prefix":"**🔀 Refactored code**", "labels":["refactor"]}}'
issues: false
issuesWoLabels: false
sinceTag: "3.0.0"
maxIssues: 100
pullRequests: true
prWoLabels: false
author: false
unreleased: true
compareLink: true
stripGeneratorNotice: true
verbose: true
futureRelease: ${{ steps.version.outputs.release_tag }}
excludeTagsRegex: "CI/.+"
releaseBranch: "main"
# - name: "✏️ Generate full changelog"
# if: steps.version.outputs.release_tag != 'skip'
# id: generate-full-changelog
# uses: heinrichreimer/github-changelog-generator-action@v2.3
# with:
# token: ${{ secrets.ADMIN_TOKEN }}
# addSections: '{"documentation":{"prefix":"### 📖 Documentation","labels":["type: documentation"]},"tests":{"prefix":"### ✅ Testing","labels":["tests"]},"feature":{"prefix":"**🆕 New features**", "labels":["type: feature"]},"breaking":{"prefix":"**💥 Breaking**", "labels":["breaking"]},"enhancements":{"prefix":"**🚀 Enhancements**", "labels":["type: enhancement"]},"bugs":{"prefix":"**🐛 Bug fixes**", "labels":["type: bug"]},"deprecated":{"prefix":"**⚠️ Deprecations**", "labels":["depreciated"]}, "refactor":{"prefix":"**🔀 Refactored code**", "labels":["refactor"]}}'
# issues: false
# issuesWoLabels: false
# sinceTag: "3.12.0"
# maxIssues: 100
# pullRequests: true
# prWoLabels: false
# author: false
# unreleased: true
# compareLink: true
# stripGeneratorNotice: true
# verbose: true
# futureRelease: ${{ steps.version.outputs.release_tag }}
# excludeTagsRegex: "CI/.+"
# releaseBranch: "main"
- name: 💾 Commit and Tag
id: git_commit
@ -73,8 +73,8 @@ jobs:
token: ${{ secrets.ADMIN_TOKEN }}
branch: main
tags: true
unprotect_reviews: true
unprotect_reviews: true
- name: "✏️ Generate last changelog"
if: steps.version.outputs.release_tag != 'skip'
id: generate-last-changelog
@ -114,11 +114,11 @@ jobs:
with:
tag: "${{ steps.version.outputs.current_version }}"
- name: 🔁 Merge main back to develop
- name: 🔁 Merge main back to develop
if: steps.version.outputs.release_tag != 'skip'
uses: everlytic/branch-merge@1.1.0
with:
github_token: ${{ secrets.ADMIN_TOKEN }}
source_ref: 'main'
target_branch: 'develop'
commit_message_template: '[Automated] Merged release {source_ref} into {target_branch}'
commit_message_template: '[Automated] Merged release {source_ref} into {target_branch}'

2
.gitignore vendored
View file

@ -110,3 +110,5 @@ tools/run_eventserver.*
# Developer tools
tools/dev_*
.github_changelog_generator

File diff suppressed because it is too large Load diff

2035
HISTORY.md

File diff suppressed because it is too large Load diff

View file

@ -63,7 +63,8 @@ class OpenPypeVersion(semver.VersionInfo):
"""
staging = False
path = None
_VERSION_REGEX = re.compile(r"(?P<major>0|[1-9]\d*)\.(?P<minor>0|[1-9]\d*)\.(?P<patch>0|[1-9]\d*)(?:-(?P<prerelease>(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+(?P<buildmetadata>[0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?") # noqa: E501
# this should match any string complying with https://semver.org/
_VERSION_REGEX = re.compile(r"(?P<major>0|[1-9]\d*)\.(?P<minor>0|[1-9]\d*)\.(?P<patch>0|[1-9]\d*)(?:-(?P<prerelease>[a-zA-Z\d\-.]*))?(?:\+(?P<buildmetadata>[a-zA-Z\d\-.]*))?") # noqa: E501
_installed_version = None
def __init__(self, *args, **kwargs):
@ -211,6 +212,8 @@ class OpenPypeVersion(semver.VersionInfo):
OpenPypeVersion: of detected or None.
"""
# strip .zip ext if present
string = re.sub(r"\.zip$", "", string, flags=re.IGNORECASE)
m = re.search(OpenPypeVersion._VERSION_REGEX, string)
if not m:
return None
@ -815,6 +818,13 @@ class BootstrapRepos:
except Exception as e:
self._print(str(e), LOG_ERROR, exc_info=True)
return None
if not destination_dir.exists():
destination_dir.mkdir(parents=True)
elif not destination_dir.is_dir():
self._print(
"Destination exists but is not directory.", LOG_ERROR)
return None
try:
shutil.move(zip_file.as_posix(), destination_dir.as_posix())
except shutil.Error as e:

View file

@ -11,7 +11,6 @@ from .lib import (
PypeLogger,
Logger,
Anatomy,
config,
execute,
run_subprocess,
version_up,
@ -72,7 +71,6 @@ __all__ = [
"PypeLogger",
"Logger",
"Anatomy",
"config",
"execute",
"get_default_components",
"ApplicationManager",

View file

@ -277,6 +277,13 @@ def projectmanager():
PypeCommands().launch_project_manager()
@main.command(context_settings={"ignore_unknown_options": True})
def publish_report_viewer():
from openpype.tools.publisher.publish_report_viewer import main
sys.exit(main())
@main.command()
@click.argument("output_path")
@click.option("--project", help="Define project context")

View file

@ -389,10 +389,11 @@ def get_subset_by_name(project_name, subset_name, asset_id, fields=None):
returned if 'None' is passed.
Returns:
None: If subset with specified filters was not found.
Dict: Subset document which can be reduced to specified 'fields'.
"""
Union[None, Dict[str, Any]]: None if subset with specified filters was
not found or dict subset document which can be reduced to
specified 'fields'.
"""
if not subset_name:
return None

View file

@ -2,6 +2,7 @@ from .mongo import get_project_connection
from .entities import (
get_assets,
get_asset_by_id,
get_version_by_id,
get_representation_by_id,
convert_id,
)
@ -127,12 +128,20 @@ def get_linked_representation_id(
if not version_id:
return []
version_doc = get_version_by_id(
project_name, version_id, fields=["type", "version_id"]
)
if version_doc["type"] == "hero_version":
version_id = version_doc["version_id"]
if max_depth is None:
max_depth = 0
match = {
"_id": version_id,
"type": {"$in": ["version", "hero_version"]}
# Links are not stored to hero versions at this moment so filter
# is limited to just versions
"type": "version"
}
graph_lookup = {
@ -187,7 +196,7 @@ def _process_referenced_pipeline_result(result, link_type):
referenced_version_ids = set()
correctly_linked_ids = set()
for item in result:
input_links = item["data"].get("inputLinks")
input_links = item.get("data", {}).get("inputLinks")
if not input_links:
continue
@ -203,7 +212,7 @@ def _process_referenced_pipeline_result(result, link_type):
continue
for output in sorted(outputs_recursive, key=lambda o: o["depth"]):
output_links = output["data"].get("inputLinks")
output_links = output.get("data", {}).get("inputLinks")
if not output_links:
continue

View file

@ -23,6 +23,7 @@ CURRENT_PROJECT_CONFIG_SCHEMA = "openpype:config-2.0"
CURRENT_ASSET_DOC_SCHEMA = "openpype:asset-3.0"
CURRENT_SUBSET_SCHEMA = "openpype:subset-3.0"
CURRENT_VERSION_SCHEMA = "openpype:version-3.0"
CURRENT_HERO_VERSION_SCHEMA = "openpype:hero_version-1.0"
CURRENT_REPRESENTATION_SCHEMA = "openpype:representation-2.0"
CURRENT_WORKFILE_INFO_SCHEMA = "openpype:workfile-1.0"
CURRENT_THUMBNAIL_SCHEMA = "openpype:thumbnail-1.0"
@ -162,6 +163,34 @@ def new_version_doc(version, subset_id, data=None, entity_id=None):
}
def new_hero_version_doc(version_id, subset_id, data=None, entity_id=None):
"""Create skeleton data of hero version document.
Args:
version_id (ObjectId): Is considered as unique identifier of version
under subset.
subset_id (Union[str, ObjectId]): Id of parent subset.
data (Dict[str, Any]): Version document data.
entity_id (Union[str, ObjectId]): Predefined id of document. New id is
created if not passed.
Returns:
Dict[str, Any]: Skeleton of version document.
"""
if data is None:
data = {}
return {
"_id": _create_or_convert_to_mongo_id(entity_id),
"schema": CURRENT_HERO_VERSION_SCHEMA,
"type": "hero_version",
"version_id": version_id,
"parent": subset_id,
"data": data
}
def new_representation_doc(
name, version_id, context, data=None, entity_id=None
):
@ -293,6 +322,20 @@ def prepare_version_update_data(old_doc, new_doc, replace=True):
return _prepare_update_data(old_doc, new_doc, replace)
def prepare_hero_version_update_data(old_doc, new_doc, replace=True):
"""Compare two hero version documents and prepare update data.
Based on compared values will create update data for 'UpdateOperation'.
Empty output means that documents are identical.
Returns:
Dict[str, Any]: Changes between old and new document.
"""
return _prepare_update_data(old_doc, new_doc, replace)
def prepare_representation_update_data(old_doc, new_doc, replace=True):
"""Compare two representation documents and prepare update data.

View file

@ -0,0 +1,177 @@
import os
import shutil
from time import sleep
from openpype.client.entities import (
get_last_version_by_subset_id,
get_representations,
get_subsets,
)
from openpype.lib import PreLaunchHook
from openpype.lib.local_settings import get_local_site_id
from openpype.lib.profiles_filtering import filter_profiles
from openpype.pipeline.load.utils import get_representation_path
from openpype.settings.lib import get_project_settings
class CopyLastPublishedWorkfile(PreLaunchHook):
"""Copy last published workfile as first workfile.
Prelaunch hook works only if last workfile leads to not existing file.
- That is possible only if it's first version.
"""
# Before `AddLastWorkfileToLaunchArgs`
order = -1
app_groups = ["blender", "photoshop", "tvpaint", "aftereffects"]
def execute(self):
"""Check if local workfile doesn't exist, else copy it.
1- Check if setting for this feature is enabled
2- Check if workfile in work area doesn't exist
3- Check if published workfile exists and is copied locally in publish
4- Substitute copied published workfile as first workfile
Returns:
None: This is a void method.
"""
sync_server = self.modules_manager.get("sync_server")
if not sync_server or not sync_server.enabled:
self.log.debug("Sync server module is not enabled or available")
return
# Check there is no workfile available
last_workfile = self.data.get("last_workfile_path")
if os.path.exists(last_workfile):
self.log.debug(
"Last workfile exists. Skipping {} process.".format(
self.__class__.__name__
)
)
return
# Get data
project_name = self.data["project_name"]
task_name = self.data["task_name"]
task_type = self.data["task_type"]
host_name = self.application.host_name
# Check settings has enabled it
project_settings = get_project_settings(project_name)
profiles = project_settings["global"]["tools"]["Workfiles"][
"last_workfile_on_startup"
]
filter_data = {
"tasks": task_name,
"task_types": task_type,
"hosts": host_name,
}
last_workfile_settings = filter_profiles(profiles, filter_data)
use_last_published_workfile = last_workfile_settings.get(
"use_last_published_workfile"
)
if use_last_published_workfile is None:
self.log.info(
(
"Seems like old version of settings is used."
' Can\'t access custom templates in host "{}".'.format(
host_name
)
)
)
return
elif use_last_published_workfile is False:
self.log.info(
(
'Project "{}" has turned off to use last published'
' workfile as first workfile for host "{}"'.format(
project_name, host_name
)
)
)
return
self.log.info("Trying to fetch last published workfile...")
project_doc = self.data.get("project_doc")
asset_doc = self.data.get("asset_doc")
anatomy = self.data.get("anatomy")
# Check it can proceed
if not project_doc and not asset_doc:
return
# Get subset id
subset_id = next(
(
subset["_id"]
for subset in get_subsets(
project_name,
asset_ids=[asset_doc["_id"]],
fields=["_id", "data.family", "data.families"],
)
if subset["data"].get("family") == "workfile"
# Legacy compatibility
or "workfile" in subset["data"].get("families", {})
),
None,
)
if not subset_id:
self.log.debug(
'No any workfile for asset "{}".'.format(asset_doc["name"])
)
return
# Get workfile representation
last_version_doc = get_last_version_by_subset_id(
project_name, subset_id, fields=["_id"]
)
if not last_version_doc:
self.log.debug("Subset does not have any versions")
return
workfile_representation = next(
(
representation
for representation in get_representations(
project_name, version_ids=[last_version_doc["_id"]]
)
if representation["context"]["task"]["name"] == task_name
),
None,
)
if not workfile_representation:
self.log.debug(
'No published workfile for task "{}" and host "{}".'.format(
task_name, host_name
)
)
return
local_site_id = get_local_site_id()
sync_server.add_site(
project_name,
workfile_representation["_id"],
local_site_id,
force=True,
priority=99,
reset_timer=True,
)
while not sync_server.is_representation_on_site(
project_name, workfile_representation["_id"], local_site_id
):
sleep(5)
# Get paths
published_workfile_path = get_representation_path(
workfile_representation, root=anatomy.roots
)
local_workfile_dir = os.path.dirname(last_workfile)
# Copy file and substitute path
self.data["last_workfile_path"] = shutil.copy(
published_workfile_path, local_workfile_dir
)

View file

@ -312,6 +312,8 @@ class IPublishHost:
required = [
"get_context_data",
"update_context_data",
"get_context_title",
"get_current_context",
]
missing = []
for name in required:

View file

@ -1,5 +1,4 @@
from openpype.modules import OpenPypeModule
from openpype.modules.interfaces import IHostAddon
from openpype.modules import OpenPypeModule, IHostAddon
class AfterEffectsAddon(OpenPypeModule, IHostAddon):

View file

@ -1,6 +1,5 @@
import os
from openpype.modules import OpenPypeModule
from openpype.modules.interfaces import IHostAddon
from openpype.modules import OpenPypeModule, IHostAddon
BLENDER_ROOT_DIR = os.path.dirname(os.path.abspath(__file__))

View file

@ -3,7 +3,7 @@ from typing import List
import bpy
import pyblish.api
import openpype.api
import openpype.hosts.blender.api.action
from openpype.pipeline.publish import ValidateContentsOrder

View file

@ -3,14 +3,15 @@ from typing import List
import bpy
import pyblish.api
import openpype.api
from openpype.pipeline.publish import ValidateContentsOrder
import openpype.hosts.blender.api.action
class ValidateMeshHasUvs(pyblish.api.InstancePlugin):
"""Validate that the current mesh has UV's."""
order = openpype.api.ValidateContentsOrder
order = ValidateContentsOrder
hosts = ["blender"]
families = ["model"]
category = "geometry"

View file

@ -3,14 +3,15 @@ from typing import List
import bpy
import pyblish.api
import openpype.api
from openpype.pipeline.publish import ValidateContentsOrder
import openpype.hosts.blender.api.action
class ValidateMeshNoNegativeScale(pyblish.api.Validator):
"""Ensure that meshes don't have a negative scale."""
order = openpype.api.ValidateContentsOrder
order = ValidateContentsOrder
hosts = ["blender"]
families = ["model"]
category = "geometry"

View file

@ -3,7 +3,7 @@ from typing import List
import bpy
import pyblish.api
import openpype.api
import openpype.hosts.blender.api.action
from openpype.pipeline.publish import ValidateContentsOrder

View file

@ -4,7 +4,7 @@ import mathutils
import bpy
import pyblish.api
import openpype.api
import openpype.hosts.blender.api.action
from openpype.pipeline.publish import ValidateContentsOrder

View file

@ -1,6 +1,5 @@
import os
from openpype.modules import OpenPypeModule
from openpype.modules.interfaces import IHostAddon
from openpype.modules import OpenPypeModule, IHostAddon
HOST_DIR = os.path.dirname(os.path.abspath(__file__))

View file

@ -225,7 +225,8 @@ class FlameMenuUniversal(_FlameMenuApp):
menu['actions'].append({
"name": "Load...",
"execute": lambda x: self.tools_helper.show_loader()
"execute": lambda x: callback_selection(
x, self.tools_helper.show_loader)
})
menu['actions'].append({
"name": "Manage...",

View file

@ -4,13 +4,13 @@ import shutil
from copy import deepcopy
from xml.etree import ElementTree as ET
import qargparse
from Qt import QtCore, QtWidgets
import openpype.api as openpype
import qargparse
from openpype import style
from openpype.lib import Logger
from openpype.pipeline import LegacyCreator, LoaderPlugin
from openpype.settings import get_current_project_settings
from . import constants
from . import lib as flib
@ -306,7 +306,7 @@ class Creator(LegacyCreator):
def __init__(self, *args, **kwargs):
super(Creator, self).__init__(*args, **kwargs)
self.presets = openpype.get_current_project_settings()[
self.presets = get_current_project_settings()[
"flame"]["create"].get(self.__class__.__name__, {})
# adding basic current context flame objects
@ -690,6 +690,54 @@ class ClipLoader(LoaderPlugin):
)
]
_mapping = None
def get_colorspace(self, context):
"""Get colorspace name
Look either to version data or representation data.
Args:
context (dict): version context data
Returns:
str: colorspace name or None
"""
version = context['version']
version_data = version.get("data", {})
colorspace = version_data.get(
"colorspace", None
)
if (
not colorspace
or colorspace == "Unknown"
):
colorspace = context["representation"]["data"].get(
"colorspace", None)
return colorspace
@classmethod
def get_native_colorspace(cls, input_colorspace):
"""Return native colorspace name.
Args:
input_colorspace (str | None): colorspace name
Returns:
str: native colorspace name defined in mapping or None
"""
if not cls._mapping:
settings = get_current_project_settings()["flame"]
mapping = settings["imageio"]["profilesMapping"]["inputs"]
cls._mapping = {
input["ocioName"]: input["flameName"]
for input in mapping
}
return cls._mapping.get(input_colorspace)
class OpenClipSolver(flib.MediaInfoFile):
create_new_clip = False

View file

@ -1,7 +1,7 @@
"""Host API required Work Files tool"""
import os
from openpype.api import Logger
from openpype.lib import Logger
# from .. import (
# get_project_manager,
# get_current_project

View file

@ -3,16 +3,17 @@ import json
import tempfile
import contextlib
import socket
from pprint import pformat
from openpype.lib import (
PreLaunchHook,
get_openpype_username
get_openpype_username,
run_subprocess,
)
from openpype.lib.applications import (
ApplicationLaunchFailed
)
from openpype.hosts import flame as opflame
import openpype
from pprint import pformat
class FlamePrelaunch(PreLaunchHook):
@ -42,17 +43,9 @@ class FlamePrelaunch(PreLaunchHook):
volume_name = _env.get("FLAME_WIRETAP_VOLUME")
# get image io
project_anatomy = self.data["anatomy"]
project_settings = self.data["project_settings"]
# make sure anatomy settings are having flame key
if not project_anatomy["imageio"].get("flame"):
raise ApplicationLaunchFailed((
"Anatomy project settings are missing `flame` key. "
"Please make sure you remove project overides on "
"Anatomy Image io")
)
imageio_flame = project_anatomy["imageio"]["flame"]
imageio_flame = project_settings["flame"]["imageio"]
# get user name and host name
user_name = get_openpype_username()
@ -135,7 +128,6 @@ class FlamePrelaunch(PreLaunchHook):
except OSError as exc:
self.log.warning("Not able to open files: {}".format(exc))
def _get_flame_fps(self, fps_num):
fps_table = {
float(23.976): "23.976 fps",
@ -187,7 +179,7 @@ class FlamePrelaunch(PreLaunchHook):
"env": self.launch_context.env
}
openpype.api.run_subprocess(args, **process_kwargs)
run_subprocess(args, **process_kwargs)
# process returned json file to pass launch args
return_json_data = open(tmp_json_path).read()

View file

@ -36,14 +36,15 @@ class LoadClip(opfapi.ClipLoader):
version = context['version']
version_data = version.get("data", {})
version_name = version.get("name", None)
colorspace = version_data.get("colorspace", None)
colorspace = self.get_colorspace(context)
clip_name = StringTemplate(self.clip_name_template).format(
context["representation"]["context"])
# TODO: settings in imageio
# convert colorspace with ocio to flame mapping
# in imageio flame section
colorspace = colorspace
colorspace = self.get_native_colorspace(colorspace)
self.log.info("Loading with colorspace: `{}`".format(colorspace))
# create workfile path
workfile_dir = os.environ["AVALON_WORKDIR"]

View file

@ -1,3 +1,4 @@
from copy import deepcopy
import os
import flame
from pprint import pformat
@ -22,7 +23,7 @@ class LoadClipBatch(opfapi.ClipLoader):
# settings
reel_name = "OP_LoadedReel"
clip_name_template = "{asset}_{subset}<_{output}>"
clip_name_template = "{batch}_{asset}_{subset}<_{output}>"
def load(self, context, name, namespace, options):
@ -34,19 +35,22 @@ class LoadClipBatch(opfapi.ClipLoader):
version = context['version']
version_data = version.get("data", {})
version_name = version.get("name", None)
colorspace = version_data.get("colorspace", None)
colorspace = self.get_colorspace(context)
# in case output is not in context replace key to representation
if not context["representation"]["context"].get("output"):
self.clip_name_template.replace("output", "representation")
clip_name = StringTemplate(self.clip_name_template).format(
context["representation"]["context"])
formating_data = deepcopy(context["representation"]["context"])
formating_data["batch"] = self.batch.name.get_value()
clip_name = StringTemplate(self.clip_name_template).format(
formating_data)
# TODO: settings in imageio
# convert colorspace with ocio to flame mapping
# in imageio flame section
colorspace = colorspace
colorspace = self.get_native_colorspace(colorspace)
self.log.info("Loading with colorspace: `{}`".format(colorspace))
# create workfile path
workfile_dir = options.get("workdir") or os.environ["AVALON_WORKDIR"]
@ -56,6 +60,7 @@ class LoadClipBatch(opfapi.ClipLoader):
openclip_path = os.path.join(
openclip_dir, clip_name + ".clip"
)
if not os.path.exists(openclip_dir):
os.makedirs(openclip_dir)

View file

@ -1,6 +1,5 @@
import os
from openpype.modules import OpenPypeModule
from openpype.modules.interfaces import IHostAddon
from openpype.modules import OpenPypeModule, IHostAddon
FUSION_HOST_DIR = os.path.dirname(os.path.abspath(__file__))

View file

@ -3,8 +3,6 @@ import sys
import re
import contextlib
from Qt import QtGui
from openpype.lib import Logger
from openpype.client import (
get_asset_by_name,
@ -92,7 +90,7 @@ def set_asset_resolution():
})
def validate_comp_prefs(comp=None):
def validate_comp_prefs(comp=None, force_repair=False):
"""Validate current comp defaults with asset settings.
Validates fps, resolutionWidth, resolutionHeight, aspectRatio.
@ -135,21 +133,22 @@ def validate_comp_prefs(comp=None):
asset_value = asset_data[key]
comp_value = comp_frame_format_prefs.get(comp_key)
if asset_value != comp_value:
# todo: Actually show dialog to user instead of just logging
log.warning(
"Comp {pref} {value} does not match asset "
"'{asset_name}' {pref} {asset_value}".format(
pref=label,
value=comp_value,
asset_name=asset_doc["name"],
asset_value=asset_value)
)
invalid_msg = "{} {} should be {}".format(label,
comp_value,
asset_value)
invalid.append(invalid_msg)
if not force_repair:
# Do not log warning if we force repair anyway
log.warning(
"Comp {pref} {value} does not match asset "
"'{asset_name}' {pref} {asset_value}".format(
pref=label,
value=comp_value,
asset_name=asset_doc["name"],
asset_value=asset_value)
)
if invalid:
def _on_repair():
@ -160,6 +159,11 @@ def validate_comp_prefs(comp=None):
attributes[comp_key_full] = value
comp.SetPrefs(attributes)
if force_repair:
log.info("Applying default Comp preferences..")
_on_repair()
return
from . import menu
from openpype.widgets import popup
from openpype.style import load_stylesheet

View file

@ -16,6 +16,7 @@ from openpype.hosts.fusion.api.lib import (
from openpype.pipeline import legacy_io
from openpype.resources import get_openpype_icon_filepath
from .pipeline import FusionEventHandler
from .pulse import FusionPulse
self = sys.modules[__name__]
@ -119,6 +120,10 @@ class OpenPypeMenu(QtWidgets.QWidget):
self._pulse = FusionPulse(parent=self)
self._pulse.start()
# Detect Fusion events as OpenPype events
self._event_handler = FusionEventHandler(parent=self)
self._event_handler.start()
def on_task_changed(self):
# Update current context label
label = legacy_io.Session["AVALON_ASSET"]

View file

@ -2,13 +2,16 @@
Basic avalon integration
"""
import os
import sys
import logging
import pyblish.api
from Qt import QtCore
from openpype.lib import (
Logger,
register_event_callback
register_event_callback,
emit_event
)
from openpype.pipeline import (
register_loader_plugin_path,
@ -39,12 +42,28 @@ CREATE_PATH = os.path.join(PLUGINS_DIR, "create")
INVENTORY_PATH = os.path.join(PLUGINS_DIR, "inventory")
class CompLogHandler(logging.Handler):
class FusionLogHandler(logging.Handler):
# Keep a reference to fusion's Print function (Remote Object)
_print = None
@property
def print(self):
if self._print is not None:
# Use cached
return self._print
_print = getattr(sys.modules["__main__"], "fusion").Print
if _print is None:
# Backwards compatibility: Print method on Fusion instance was
# added around Fusion 17.4 and wasn't available on PyRemote Object
# before
_print = get_current_comp().Print
self._print = _print
return _print
def emit(self, record):
entry = self.format(record)
comp = get_current_comp()
if comp:
comp.Print(entry)
self.print(entry)
def install():
@ -67,7 +86,7 @@ def install():
# Attach default logging handler that prints to active comp
logger = logging.getLogger()
formatter = logging.Formatter(fmt="%(message)s\n")
handler = CompLogHandler()
handler = FusionLogHandler()
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(logging.DEBUG)
@ -84,10 +103,10 @@ def install():
"instanceToggled", on_pyblish_instance_toggled
)
# Fusion integration currently does not attach to direct callbacks of
# the application. So we use workfile callbacks to allow similar behavior
# on save and open
register_event_callback("workfile.open.after", on_after_open)
# Register events
register_event_callback("open", on_after_open)
register_event_callback("save", on_save)
register_event_callback("new", on_new)
def uninstall():
@ -137,8 +156,18 @@ def on_pyblish_instance_toggled(instance, old_value, new_value):
tool.SetAttrs({"TOOLB_PassThrough": passthrough})
def on_after_open(_event):
comp = get_current_comp()
def on_new(event):
comp = event["Rets"]["comp"]
validate_comp_prefs(comp, force_repair=True)
def on_save(event):
comp = event["sender"]
validate_comp_prefs(comp)
def on_after_open(event):
comp = event["sender"]
validate_comp_prefs(comp)
if any_outdated_containers():
@ -182,7 +211,7 @@ def ls():
"""
comp = get_current_comp()
tools = comp.GetToolList(False, "Loader").values()
tools = comp.GetToolList(False).values()
for tool in tools:
container = parse_container(tool)
@ -254,3 +283,114 @@ def parse_container(tool):
return container
class FusionEventThread(QtCore.QThread):
"""QThread which will periodically ping Fusion app for any events.
The fusion.UIManager must be set up to be notified of events before they'll
be reported by this thread, for example:
fusion.UIManager.AddNotify("Comp_Save", None)
"""
on_event = QtCore.Signal(dict)
def run(self):
app = getattr(sys.modules["__main__"], "app", None)
if app is None:
# No Fusion app found
return
# As optimization store the GetEvent method directly because every
# getattr of UIManager.GetEvent tries to resolve the Remote Function
# through the PyRemoteObject
get_event = app.UIManager.GetEvent
delay = int(os.environ.get("OPENPYPE_FUSION_CALLBACK_INTERVAL", 1000))
while True:
if self.isInterruptionRequested():
return
# Process all events that have been queued up until now
while True:
event = get_event(False)
if not event:
break
self.on_event.emit(event)
# Wait some time before processing events again
# to not keep blocking the UI
self.msleep(delay)
class FusionEventHandler(QtCore.QObject):
"""Emits OpenPype events based on Fusion events captured in a QThread.
This will emit the following OpenPype events based on Fusion actions:
save: Comp_Save, Comp_SaveAs
open: Comp_Opened
new: Comp_New
To use this you can attach it to you Qt UI so it runs in the background.
E.g.
>>> handler = FusionEventHandler(parent=window)
>>> handler.start()
"""
ACTION_IDS = [
"Comp_Save",
"Comp_SaveAs",
"Comp_New",
"Comp_Opened"
]
def __init__(self, parent=None):
super(FusionEventHandler, self).__init__(parent=parent)
# Set up Fusion event callbacks
fusion = getattr(sys.modules["__main__"], "fusion", None)
ui = fusion.UIManager
# Add notifications for the ones we want to listen to
notifiers = []
for action_id in self.ACTION_IDS:
notifier = ui.AddNotify(action_id, None)
notifiers.append(notifier)
# TODO: Not entirely sure whether these must be kept to avoid
# garbage collection
self._notifiers = notifiers
self._event_thread = FusionEventThread(parent=self)
self._event_thread.on_event.connect(self._on_event)
def start(self):
self._event_thread.start()
def stop(self):
self._event_thread.stop()
def _on_event(self, event):
"""Handle Fusion events to emit OpenPype events"""
if not event:
return
what = event["what"]
# Comp Save
if what in {"Comp_Save", "Comp_SaveAs"}:
if not event["Rets"].get("success"):
# If the Save action is cancelled it will still emit an
# event but with "success": False so we ignore those cases
return
# Comp was saved
emit_event("save", data=event)
return
# Comp New
elif what in {"Comp_New"}:
emit_event("new", data=event)
# Comp Opened
elif what in {"Comp_Opened"}:
emit_event("open", data=event)

View file

@ -19,9 +19,12 @@ class PulseThread(QtCore.QThread):
while True:
if self.isInterruptionRequested():
return
try:
app.Test()
except Exception:
# We don't need to call Test because PyRemoteObject of the app
# will actually fail to even resolve the Test function if it has
# gone down. So we can actually already just check by confirming
# the method is still getting resolved. (Optimization)
if app.Test is None:
self.no_response.emit()
self.msleep(interval)

View file

@ -15,13 +15,7 @@ class FusionPreLaunchOCIO(PreLaunchHook):
project_settings = self.data["project_settings"]
# make sure anatomy settings are having flame key
imageio_fusion = project_settings.get("fusion", {}).get("imageio")
if not imageio_fusion:
raise ApplicationLaunchFailed((
"Anatomy project settings are missing `fusion` key. "
"Please make sure you remove project overrides on "
"Anatomy ImageIO")
)
imageio_fusion = project_settings["fusion"]["imageio"]
ocio = imageio_fusion.get("ocio")
enabled = ocio.get("enabled", False)

View file

@ -0,0 +1,70 @@
from openpype.pipeline import (
load,
get_representation_path,
)
from openpype.hosts.fusion.api import (
imprint_container,
get_current_comp,
comp_lock_and_undo_chunk
)
class FusionLoadAlembicMesh(load.LoaderPlugin):
"""Load Alembic mesh into Fusion"""
families = ["pointcache", "model"]
representations = ["abc"]
label = "Load alembic mesh"
order = -10
icon = "code-fork"
color = "orange"
tool_type = "SurfaceAlembicMesh"
def load(self, context, name, namespace, data):
# Fallback to asset name when namespace is None
if namespace is None:
namespace = context['asset']['name']
# Create the Loader with the filename path set
comp = get_current_comp()
with comp_lock_and_undo_chunk(comp, "Create tool"):
path = self.fname
args = (-32768, -32768)
tool = comp.AddTool(self.tool_type, *args)
tool["Filename"] = path
imprint_container(tool,
name=name,
namespace=namespace,
context=context,
loader=self.__class__.__name__)
def switch(self, container, representation):
self.update(container, representation)
def update(self, container, representation):
"""Update Alembic path"""
tool = container["_tool"]
assert tool.ID == self.tool_type, f"Must be {self.tool_type}"
comp = tool.Comp()
path = get_representation_path(representation)
with comp_lock_and_undo_chunk(comp, "Update tool"):
tool["Filename"] = path
# Update the imprinted representation
tool.SetData("avalon.representation", str(representation["_id"]))
def remove(self, container):
tool = container["_tool"]
assert tool.ID == self.tool_type, f"Must be {self.tool_type}"
comp = tool.Comp()
with comp_lock_and_undo_chunk(comp, "Remove tool"):
tool.Delete()

View file

@ -0,0 +1,71 @@
from openpype.pipeline import (
load,
get_representation_path,
)
from openpype.hosts.fusion.api import (
imprint_container,
get_current_comp,
comp_lock_and_undo_chunk
)
class FusionLoadFBXMesh(load.LoaderPlugin):
"""Load FBX mesh into Fusion"""
families = ["*"]
representations = ["fbx"]
label = "Load FBX mesh"
order = -10
icon = "code-fork"
color = "orange"
tool_type = "SurfaceFBXMesh"
def load(self, context, name, namespace, data):
# Fallback to asset name when namespace is None
if namespace is None:
namespace = context['asset']['name']
# Create the Loader with the filename path set
comp = get_current_comp()
with comp_lock_and_undo_chunk(comp, "Create tool"):
path = self.fname
args = (-32768, -32768)
tool = comp.AddTool(self.tool_type, *args)
tool["ImportFile"] = path
imprint_container(tool,
name=name,
namespace=namespace,
context=context,
loader=self.__class__.__name__)
def switch(self, container, representation):
self.update(container, representation)
def update(self, container, representation):
"""Update path"""
tool = container["_tool"]
assert tool.ID == self.tool_type, f"Must be {self.tool_type}"
comp = tool.Comp()
path = get_representation_path(representation)
with comp_lock_and_undo_chunk(comp, "Update tool"):
tool["ImportFile"] = path
# Update the imprinted representation
tool.SetData("avalon.representation", str(representation["_id"]))
def remove(self, container):
tool = container["_tool"]
assert tool.ID == self.tool_type, f"Must be {self.tool_type}"
comp = tool.Comp()
with comp_lock_and_undo_chunk(comp, "Remove tool"):
tool.Delete()

View file

@ -1,6 +1,5 @@
import os
from openpype.modules import OpenPypeModule
from openpype.modules.interfaces import IHostAddon
from openpype.modules import OpenPypeModule, IHostAddon
HARMONY_HOST_DIR = os.path.dirname(os.path.abspath(__file__))

View file

@ -1,7 +1,6 @@
import os
import platform
from openpype.modules import OpenPypeModule
from openpype.modules.interfaces import IHostAddon
from openpype.modules import OpenPypeModule, IHostAddon
HIERO_ROOT_DIR = os.path.dirname(os.path.abspath(__file__))

View file

@ -30,9 +30,15 @@ from .lib import (
get_timeline_selection,
get_current_track,
get_track_item_tags,
get_track_openpype_tag,
set_track_openpype_tag,
get_track_openpype_data,
get_track_item_pype_tag,
set_track_item_pype_tag,
get_track_item_pype_data,
get_trackitem_openpype_tag,
set_trackitem_openpype_tag,
get_trackitem_openpype_data,
set_publish_attribute,
get_publish_attribute,
imprint,
@ -85,9 +91,12 @@ __all__ = [
"get_timeline_selection",
"get_current_track",
"get_track_item_tags",
"get_track_item_pype_tag",
"set_track_item_pype_tag",
"get_track_item_pype_data",
"get_track_openpype_tag",
"set_track_openpype_tag",
"get_track_openpype_data",
"get_trackitem_openpype_tag",
"set_trackitem_openpype_tag",
"get_trackitem_openpype_data",
"set_publish_attribute",
"get_publish_attribute",
"imprint",
@ -99,6 +108,10 @@ __all__ = [
"apply_colorspace_project",
"apply_colorspace_clips",
"get_sequence_pattern_and_padding",
# depricated
"get_track_item_pype_tag",
"set_track_item_pype_tag",
"get_track_item_pype_data",
# plugins
"CreatorWidget",

View file

@ -7,28 +7,68 @@ import os
import re
import sys
import platform
import functools
import warnings
import json
import ast
import secrets
import shutil
import hiero
from Qt import QtWidgets
from Qt import QtWidgets, QtCore, QtXml
from openpype.client import get_project
from openpype.settings import get_anatomy_settings
from openpype.settings import get_project_settings
from openpype.pipeline import legacy_io, Anatomy
from openpype.pipeline.load import filter_containers
from openpype.lib import Logger
from . import tags
try:
from PySide.QtCore import QFile, QTextStream
from PySide.QtXml import QDomDocument
except ImportError:
from PySide2.QtCore import QFile, QTextStream
from PySide2.QtXml import QDomDocument
# from opentimelineio import opentime
# from pprint import pformat
class DeprecatedWarning(DeprecationWarning):
pass
def deprecated(new_destination):
"""Mark functions as deprecated.
It will result in a warning being emitted when the function is used.
"""
func = None
if callable(new_destination):
func = new_destination
new_destination = None
def _decorator(decorated_func):
if new_destination is None:
warning_message = (
" Please check content of deprecated function to figure out"
" possible replacement."
)
else:
warning_message = " Please replace your usage with '{}'.".format(
new_destination
)
@functools.wraps(decorated_func)
def wrapper(*args, **kwargs):
warnings.simplefilter("always", DeprecatedWarning)
warnings.warn(
(
"Call to deprecated function '{}'"
"\nFunction was moved or removed.{}"
).format(decorated_func.__name__, warning_message),
category=DeprecatedWarning,
stacklevel=4
)
return decorated_func(*args, **kwargs)
return wrapper
if func is None:
return _decorator
return _decorator(func)
log = Logger.get_logger(__name__)
@ -301,7 +341,124 @@ def get_track_item_tags(track_item):
return returning_tag_data
def _get_tag_unique_hash():
# sourcery skip: avoid-builtin-shadow
return secrets.token_hex(nbytes=4)
def set_track_openpype_tag(track, data=None):
"""
Set openpype track tag to input track object.
Attributes:
track (hiero.core.VideoTrack): hiero object
Returns:
hiero.core.Tag
"""
data = data or {}
# basic Tag's attribute
tag_data = {
"editable": "0",
"note": "OpenPype data container",
"icon": "openpype_icon.png",
"metadata": dict(data.items())
}
# get available pype tag if any
_tag = get_track_openpype_tag(track)
if _tag:
# it not tag then create one
tag = tags.update_tag(_tag, tag_data)
else:
# if pype tag available then update with input data
tag = tags.create_tag(
"{}_{}".format(
self.pype_tag_name,
_get_tag_unique_hash()
),
tag_data
)
# add it to the input track item
track.addTag(tag)
return tag
def get_track_openpype_tag(track):
"""
Get pype track item tag created by creator or loader plugin.
Attributes:
trackItem (hiero.core.TrackItem): hiero object
Returns:
hiero.core.Tag: hierarchy, orig clip attributes
"""
# get all tags from track item
_tags = track.tags()
if not _tags:
return None
for tag in _tags:
# return only correct tag defined by global name
if self.pype_tag_name in tag.name():
return tag
def get_track_openpype_data(track, container_name=None):
"""
Get track's openpype tag data.
Attributes:
trackItem (hiero.core.VideoTrack): hiero object
Returns:
dict: data found on pype tag
"""
return_data = {}
# get pype data tag from track item
tag = get_track_openpype_tag(track)
if not tag:
return None
# get tag metadata attribute
tag_data = deepcopy(dict(tag.metadata()))
for obj_name, obj_data in tag_data.items():
obj_name = obj_name.replace("tag.", "")
if obj_name in ["applieswhole", "note", "label"]:
continue
return_data[obj_name] = json.loads(obj_data)
return (
return_data[container_name]
if container_name
else return_data
)
@deprecated("openpype.hosts.hiero.api.lib.get_trackitem_openpype_tag")
def get_track_item_pype_tag(track_item):
# backward compatibility alias
return get_trackitem_openpype_tag(track_item)
@deprecated("openpype.hosts.hiero.api.lib.set_trackitem_openpype_tag")
def set_track_item_pype_tag(track_item, data=None):
# backward compatibility alias
return set_trackitem_openpype_tag(track_item, data)
@deprecated("openpype.hosts.hiero.api.lib.get_trackitem_openpype_data")
def get_track_item_pype_data(track_item):
# backward compatibility alias
return get_trackitem_openpype_data(track_item)
def get_trackitem_openpype_tag(track_item):
"""
Get pype track item tag created by creator or loader plugin.
@ -317,16 +474,16 @@ def get_track_item_pype_tag(track_item):
return None
for tag in _tags:
# return only correct tag defined by global name
if tag.name() == self.pype_tag_name:
if self.pype_tag_name in tag.name():
return tag
def set_track_item_pype_tag(track_item, data=None):
def set_trackitem_openpype_tag(track_item, data=None):
"""
Set pype track item tag to input track_item.
Set openpype track tag to input track object.
Attributes:
trackItem (hiero.core.TrackItem): hiero object
track (hiero.core.VideoTrack): hiero object
Returns:
hiero.core.Tag
@ -341,21 +498,26 @@ def set_track_item_pype_tag(track_item, data=None):
"metadata": dict(data.items())
}
# get available pype tag if any
_tag = get_track_item_pype_tag(track_item)
_tag = get_trackitem_openpype_tag(track_item)
if _tag:
# it not tag then create one
tag = tags.update_tag(_tag, tag_data)
else:
# if pype tag available then update with input data
tag = tags.create_tag(self.pype_tag_name, tag_data)
tag = tags.create_tag(
"{}_{}".format(
self.pype_tag_name,
_get_tag_unique_hash()
),
tag_data
)
# add it to the input track item
track_item.addTag(tag)
return tag
def get_track_item_pype_data(track_item):
def get_trackitem_openpype_data(track_item):
"""
Get track item's pype tag data.
@ -367,7 +529,7 @@ def get_track_item_pype_data(track_item):
"""
data = {}
# get pype data tag from track item
tag = get_track_item_pype_tag(track_item)
tag = get_trackitem_openpype_tag(track_item)
if not tag:
return None
@ -420,7 +582,7 @@ def imprint(track_item, data=None):
"""
data = data or {}
tag = set_track_item_pype_tag(track_item, data)
tag = set_trackitem_openpype_tag(track_item, data)
# add publish attribute
set_publish_attribute(tag, True)
@ -832,22 +994,22 @@ def set_selected_track_items(track_items_list, sequence=None):
def _read_doc_from_path(path):
# reading QDomDocument from HROX path
hrox_file = QFile(path)
if not hrox_file.open(QFile.ReadOnly):
# reading QtXml.QDomDocument from HROX path
hrox_file = QtCore.QFile(path)
if not hrox_file.open(QtCore.QFile.ReadOnly):
raise RuntimeError("Failed to open file for reading")
doc = QDomDocument()
doc = QtXml.QDomDocument()
doc.setContent(hrox_file)
hrox_file.close()
return doc
def _write_doc_to_path(doc, path):
# write QDomDocument to path as HROX
hrox_file = QFile(path)
if not hrox_file.open(QFile.WriteOnly):
# write QtXml.QDomDocument to path as HROX
hrox_file = QtCore.QFile(path)
if not hrox_file.open(QtCore.QFile.WriteOnly):
raise RuntimeError("Failed to open file for writing")
stream = QTextStream(hrox_file)
stream = QtCore.QTextStream(hrox_file)
doc.save(stream, 1)
hrox_file.close()
@ -878,8 +1040,7 @@ def apply_colorspace_project():
project.close()
# get presets for hiero
imageio = get_anatomy_settings(
project_name)["imageio"].get("hiero", None)
imageio = get_project_settings(project_name)["hiero"]["imageio"]
presets = imageio.get("workfile")
# save the workfile as subversion "comment:_colorspaceChange"
@ -932,8 +1093,7 @@ def apply_colorspace_clips():
clips = project.clips()
# get presets for hiero
imageio = get_anatomy_settings(
project_name)["imageio"].get("hiero", None)
imageio = get_project_settings(project_name)["hiero"]["imageio"]
from pprint import pprint
presets = imageio.get("regexInputs", {}).get("inputs", {})
@ -1032,7 +1192,7 @@ def sync_clip_name_to_data_asset(track_items_list):
# get name and data
ti_name = track_item.name()
data = get_track_item_pype_data(track_item)
data = get_trackitem_openpype_data(track_item)
# ignore if no data on the clip or not publish instance
if not data:
@ -1044,10 +1204,10 @@ def sync_clip_name_to_data_asset(track_items_list):
if data["asset"] != ti_name:
data["asset"] = ti_name
# remove the original tag
tag = get_track_item_pype_tag(track_item)
tag = get_trackitem_openpype_tag(track_item)
track_item.removeTag(tag)
# create new tag with updated data
set_track_item_pype_tag(track_item, data)
set_trackitem_openpype_tag(track_item, data)
print("asset was changed in clip: {}".format(ti_name))
@ -1085,10 +1245,10 @@ def check_inventory_versions(track_items=None):
project_name = legacy_io.active_project()
filter_result = filter_containers(containers, project_name)
for container in filter_result.latest:
set_track_color(container["_track_item"], clip_color)
set_track_color(container["_item"], clip_color)
for container in filter_result.outdated:
set_track_color(container["_track_item"], clip_color_last)
set_track_color(container["_item"], clip_color_last)
def selection_changed_timeline(event):

View file

@ -1,6 +1,7 @@
"""
Basic avalon integration
"""
from copy import deepcopy
import os
import contextlib
from collections import OrderedDict
@ -17,6 +18,7 @@ from openpype.pipeline import (
)
from openpype.tools.utils import host_tools
from . import lib, menu, events
import hiero
log = Logger.get_logger(__name__)
@ -106,7 +108,7 @@ def containerise(track_item,
data_imprint.update({k: v})
log.debug("_ data_imprint: {}".format(data_imprint))
lib.set_track_item_pype_tag(track_item, data_imprint)
lib.set_trackitem_openpype_tag(track_item, data_imprint)
return track_item
@ -123,79 +125,131 @@ def ls():
"""
# get all track items from current timeline
all_track_items = lib.get_track_items()
all_items = lib.get_track_items()
for track_item in all_track_items:
container = parse_container(track_item)
if container:
yield container
# append all video tracks
for track in lib.get_current_sequence():
if type(track) != hiero.core.VideoTrack:
continue
all_items.append(track)
for item in all_items:
container_data = parse_container(item)
if isinstance(container_data, list):
for _c in container_data:
yield _c
elif container_data:
yield container_data
def parse_container(track_item, validate=True):
def parse_container(item, validate=True):
"""Return container data from track_item's pype tag.
Args:
track_item (hiero.core.TrackItem): A containerised track item.
item (hiero.core.TrackItem or hiero.core.VideoTrack):
A containerised track item.
validate (bool)[optional]: validating with avalon scheme
Returns:
dict: The container schema data for input containerized track item.
"""
def data_to_container(item, data):
if (
not data
or data.get("id") != "pyblish.avalon.container"
):
return
if validate and data and data.get("schema"):
schema.validate(data)
if not isinstance(data, dict):
return
# If not all required data return the empty container
required = ['schema', 'id', 'name',
'namespace', 'loader', 'representation']
if any(key not in data for key in required):
return
container = {key: data[key] for key in required}
container["objectName"] = item.name()
# Store reference to the node object
container["_item"] = item
return container
# convert tag metadata to normal keys names
data = lib.get_track_item_pype_data(track_item)
if (
not data
or data.get("id") != "pyblish.avalon.container"
):
return
if type(item) == hiero.core.VideoTrack:
return_list = []
_data = lib.get_track_openpype_data(item)
if validate and data and data.get("schema"):
schema.validate(data)
if not _data:
return
# convert the data to list and validate them
for _, obj_data in _data.items():
cotnainer = data_to_container(item, obj_data)
return_list.append(cotnainer)
return return_list
else:
_data = lib.get_trackitem_openpype_data(item)
return data_to_container(item, _data)
if not isinstance(data, dict):
return
# If not all required data return the empty container
required = ['schema', 'id', 'name',
'namespace', 'loader', 'representation']
if not all(key in data for key in required):
return
container = {key: data[key] for key in required}
container["objectName"] = track_item.name()
# Store reference to the node object
container["_track_item"] = track_item
def _update_container_data(container, data):
for key in container:
try:
container[key] = data[key]
except KeyError:
pass
return container
def update_container(track_item, data=None):
"""Update container data to input track_item's pype tag.
def update_container(item, data=None):
"""Update container data to input track_item or track's
openpype tag.
Args:
track_item (hiero.core.TrackItem): A containerised track item.
item (hiero.core.TrackItem or hiero.core.VideoTrack):
A containerised track item.
data (dict)[optional]: dictionery with data to be updated
Returns:
bool: True if container was updated correctly
"""
data = data or dict()
container = lib.get_track_item_pype_data(track_item)
data = data or {}
data = deepcopy(data)
for _key, _value in container.items():
try:
container[_key] = data[_key]
except KeyError:
pass
if type(item) == hiero.core.VideoTrack:
# form object data for test
object_name = data["objectName"]
log.info("Updating container: `{}`".format(track_item.name()))
return bool(lib.set_track_item_pype_tag(track_item, container))
# get all available containers
containers = lib.get_track_openpype_data(item)
container = lib.get_track_openpype_data(item, object_name)
containers = deepcopy(containers)
container = deepcopy(container)
# update data in container
updated_container = _update_container_data(container, data)
# merge updated container back to containers
containers.update({object_name: updated_container})
return bool(lib.set_track_openpype_tag(item, containers))
else:
container = lib.get_trackitem_openpype_data(item)
updated_container = _update_container_data(container, data)
log.info("Updating container: `{}`".format(item.name()))
return bool(lib.set_trackitem_openpype_tag(item, updated_container))
def launch_workfiles_app(*args):
@ -251,7 +305,6 @@ def reload_config():
import importlib
for module in (
"openpype.api",
"openpype.hosts.hiero.lib",
"openpype.hosts.hiero.menu",
"openpype.hosts.hiero.tags"
@ -273,11 +326,11 @@ def on_pyblish_instance_toggled(instance, old_value, new_value):
instance, old_value, new_value))
from openpype.hosts.hiero.api import (
get_track_item_pype_tag,
get_trackitem_openpype_tag,
set_publish_attribute
)
# Whether instances should be passthrough based on new value
track_item = instance.data["item"]
tag = get_track_item_pype_tag(track_item)
tag = get_trackitem_openpype_tag(track_item)
set_publish_attribute(tag, new_value)

View file

@ -8,7 +8,7 @@ import hiero
from Qt import QtWidgets, QtCore
import qargparse
import openpype.api as openpype
from openpype.settings import get_current_project_settings
from openpype.lib import Logger
from openpype.pipeline import LoaderPlugin, LegacyCreator
from openpype.pipeline.context_tools import get_current_project_asset
@ -170,7 +170,10 @@ class CreatorWidget(QtWidgets.QDialog):
for func, val in kwargs.items():
if getattr(item, func):
func_attr = getattr(item, func)
func_attr(val)
if isinstance(val, tuple):
func_attr(*val)
else:
func_attr(val)
# add to layout
layout.addRow(label, item)
@ -273,8 +276,8 @@ class CreatorWidget(QtWidgets.QDialog):
elif v["type"] == "QSpinBox":
data[k]["value"] = self.create_row(
content_layout, "QSpinBox", v["label"],
setValue=v["value"], setMinimum=0,
setMaximum=100000, setToolTip=tool_tip)
setRange=(1, 9999999), setValue=v["value"],
setToolTip=tool_tip)
return data
@ -606,7 +609,7 @@ class Creator(LegacyCreator):
def __init__(self, *args, **kwargs):
super(Creator, self).__init__(*args, **kwargs)
import openpype.hosts.hiero.api as phiero
self.presets = openpype.get_current_project_settings()[
self.presets = get_current_project_settings()[
"hiero"]["create"].get(self.__class__.__name__, {})
# adding basic current context resolve objects

View file

@ -1,3 +1,4 @@
import json
import re
import os
import hiero
@ -85,17 +86,16 @@ def update_tag(tag, data):
# get metadata key from data
data_mtd = data.get("metadata", {})
# due to hiero bug we have to make sure keys which are not existent in
# data are cleared of value by `None`
for _mk in mtd.dict().keys():
if _mk.replace("tag.", "") not in data_mtd.keys():
mtd.setValue(_mk, str(None))
# set all data metadata to tag metadata
for k, v in data_mtd.items():
for _k, _v in data_mtd.items():
value = str(_v)
if type(_v) == dict:
value = json.dumps(_v)
# set the value
mtd.setValue(
"tag.{}".format(str(k)),
str(v)
"tag.{}".format(str(_k)),
value
)
# set note description of tag

View file

@ -0,0 +1,308 @@
import json
from collections import OrderedDict
import six
from openpype.client import (
get_version_by_id
)
from openpype.pipeline import (
AVALON_CONTAINER_ID,
load,
legacy_io,
get_representation_path
)
from openpype.hosts.hiero import api as phiero
from openpype.lib import Logger
class LoadEffects(load.LoaderPlugin):
"""Loading colorspace soft effect exported from nukestudio"""
representations = ["effectJson"]
families = ["effect"]
label = "Load Effects"
order = 0
icon = "cc"
color = "white"
log = Logger.get_logger(__name__)
def load(self, context, name, namespace, data):
"""
Loading function to get the soft effects to particular read node
Arguments:
context (dict): context of version
name (str): name of the version
namespace (str): asset name
data (dict): compulsory attribute > not used
Returns:
nuke node: containerised nuke node object
"""
active_sequence = phiero.get_current_sequence()
active_track = phiero.get_current_track(
active_sequence, "Loaded_{}".format(name))
# get main variables
namespace = namespace or context["asset"]["name"]
object_name = "{}_{}".format(name, namespace)
clip_in = context["asset"]["data"]["clipIn"]
clip_out = context["asset"]["data"]["clipOut"]
data_imprint = {
"objectName": object_name,
"children_names": []
}
# getting file path
file = self.fname.replace("\\", "/")
if self._shared_loading(
file,
active_track,
clip_in,
clip_out,
data_imprint
):
self.containerise(
active_track,
name=name,
namespace=namespace,
object_name=object_name,
context=context,
loader=self.__class__.__name__,
data=data_imprint)
def _shared_loading(
self,
file,
active_track,
clip_in,
clip_out,
data_imprint,
update=False
):
# getting data from json file with unicode conversion
with open(file, "r") as f:
json_f = {self.byteify(key): self.byteify(value)
for key, value in json.load(f).items()}
# get correct order of nodes by positions on track and subtrack
nodes_order = self.reorder_nodes(json_f)
used_subtracks = {
stitem.name(): stitem
for stitem in phiero.flatten(active_track.subTrackItems())
}
loaded = False
for index_order, (ef_name, ef_val) in enumerate(nodes_order.items()):
new_name = "{}_loaded".format(ef_name)
if new_name not in used_subtracks:
effect_track_item = active_track.createEffect(
effectType=ef_val["class"],
timelineIn=clip_in,
timelineOut=clip_out,
subTrackIndex=index_order
)
effect_track_item.setName(new_name)
else:
effect_track_item = used_subtracks[new_name]
node = effect_track_item.node()
for knob_name, knob_value in ef_val["node"].items():
if (
not knob_value
or knob_name == "name"
):
continue
try:
# assume list means animation
# except 4 values could be RGBA or vector
if isinstance(knob_value, list) and len(knob_value) > 4:
node[knob_name].setAnimated()
for i, value in enumerate(knob_value):
if isinstance(value, list):
# list can have vector animation
for ci, cv in enumerate(value):
node[knob_name].setValueAt(
cv,
(clip_in + i),
ci
)
else:
# list is single values
node[knob_name].setValueAt(
value,
(clip_in + i)
)
else:
node[knob_name].setValue(knob_value)
except NameError:
self.log.warning("Knob: {} cannot be set".format(
knob_name))
# register all loaded children
data_imprint["children_names"].append(new_name)
# make sure containerisation will happen
loaded = True
return loaded
def update(self, container, representation):
""" Updating previously loaded effects
"""
active_track = container["_item"]
file = get_representation_path(representation).replace("\\", "/")
# get main variables
name = container['name']
namespace = container['namespace']
# get timeline in out data
project_name = legacy_io.active_project()
version_doc = get_version_by_id(project_name, representation["parent"])
version_data = version_doc["data"]
clip_in = version_data["clipIn"]
clip_out = version_data["clipOut"]
object_name = "{}_{}".format(name, namespace)
# Disable previously created nodes
used_subtracks = {
stitem.name(): stitem
for stitem in phiero.flatten(active_track.subTrackItems())
}
container = phiero.get_track_openpype_data(
active_track, object_name
)
loaded_subtrack_items = container["children_names"]
for loaded_stitem in loaded_subtrack_items:
if loaded_stitem not in used_subtracks:
continue
item_to_remove = used_subtracks.pop(loaded_stitem)
# TODO: find a way to erase nodes
self.log.debug(
"This node needs to be removed: {}".format(item_to_remove))
data_imprint = {
"objectName": object_name,
"name": name,
"representation": str(representation["_id"]),
"children_names": []
}
if self._shared_loading(
file,
active_track,
clip_in,
clip_out,
data_imprint,
update=True
):
return phiero.update_container(active_track, data_imprint)
def reorder_nodes(self, data):
new_order = OrderedDict()
trackNums = [v["trackIndex"] for k, v in data.items()
if isinstance(v, dict)]
subTrackNums = [v["subTrackIndex"] for k, v in data.items()
if isinstance(v, dict)]
for trackIndex in range(
min(trackNums), max(trackNums) + 1):
for subTrackIndex in range(
min(subTrackNums), max(subTrackNums) + 1):
item = self.get_item(data, trackIndex, subTrackIndex)
if item is not {}:
new_order.update(item)
return new_order
def get_item(self, data, trackIndex, subTrackIndex):
return {key: val for key, val in data.items()
if isinstance(val, dict)
if subTrackIndex == val["subTrackIndex"]
if trackIndex == val["trackIndex"]}
def byteify(self, input):
"""
Converts unicode strings to strings
It goes through all dictionary
Arguments:
input (dict/str): input
Returns:
dict: with fixed values and keys
"""
if isinstance(input, dict):
return {self.byteify(key): self.byteify(value)
for key, value in input.items()}
elif isinstance(input, list):
return [self.byteify(element) for element in input]
elif isinstance(input, six.text_type):
return str(input)
else:
return input
def switch(self, container, representation):
self.update(container, representation)
def remove(self, container):
pass
def containerise(
self,
track,
name,
namespace,
object_name,
context,
loader=None,
data=None
):
"""Bundle Hiero's object into an assembly and imprint it with metadata
Containerisation enables a tracking of version, author and origin
for loaded assets.
Arguments:
track (hiero.core.VideoTrack): object to imprint as container
name (str): Name of resulting assembly
namespace (str): Namespace under which to host container
object_name (str): name of container
context (dict): Asset information
loader (str, optional): Name of node used to produce this
container.
Returns:
track_item (hiero.core.TrackItem): containerised object
"""
data_imprint = {
object_name: {
"schema": "openpype:container-2.0",
"id": AVALON_CONTAINER_ID,
"name": str(name),
"namespace": str(namespace),
"loader": str(loader),
"representation": str(context["representation"]["_id"]),
}
}
if data:
for k, v in data.items():
data_imprint[object_name].update({k: v})
self.log.debug("_ data_imprint: {}".format(data_imprint))
phiero.set_track_openpype_tag(track, data_imprint)

View file

@ -16,6 +16,9 @@ class CollectClipEffects(pyblish.api.InstancePlugin):
review_track_index = instance.context.data.get("reviewTrackIndex")
item = instance.data["item"]
if "audio" in instance.data["family"]:
return
# frame range
self.handle_start = instance.data["handleStart"]
self.handle_end = instance.data["handleEnd"]

View file

@ -1,5 +1,6 @@
from pyblish import api
import openpype.api as pype
from openpype.lib import version_up
class IntegrateVersionUpWorkfile(api.ContextPlugin):
@ -15,7 +16,7 @@ class IntegrateVersionUpWorkfile(api.ContextPlugin):
def process(self, context):
project = context.data["activeProject"]
path = context.data.get("currentFile")
new_path = pype.version_up(path)
new_path = version_up(path)
if project:
project.saveAs(new_path)

View file

@ -48,7 +48,7 @@ class PrecollectInstances(pyblish.api.ContextPlugin):
self.log.debug("clip_name: {}".format(clip_name))
# get openpype tag data
tag_data = phiero.get_track_item_pype_data(track_item)
tag_data = phiero.get_trackitem_openpype_data(track_item)
self.log.debug("__ tag_data: {}".format(pformat(tag_data)))
if not tag_data:
@ -326,8 +326,7 @@ class PrecollectInstances(pyblish.api.ContextPlugin):
return hiero_export.create_otio_time_range(
frame_start, frame_duration, fps)
@staticmethod
def collect_sub_track_items(tracks):
def collect_sub_track_items(self, tracks):
"""
Returns dictionary with track index as key and list of subtracks
"""
@ -336,8 +335,10 @@ class PrecollectInstances(pyblish.api.ContextPlugin):
for track in tracks:
items = track.items()
effet_items = track.subTrackItems()
# skip if no clips on track > need track with effect only
if items:
if not effet_items:
continue
# skip all disabled tracks
@ -345,10 +346,11 @@ class PrecollectInstances(pyblish.api.ContextPlugin):
continue
track_index = track.trackIndex()
_sub_track_items = phiero.flatten(track.subTrackItems())
_sub_track_items = phiero.flatten(effet_items)
_sub_track_items = list(_sub_track_items)
# continue only if any subtrack items are collected
if not list(_sub_track_items):
if not _sub_track_items:
continue
enabled_sti = []

View file

@ -1,6 +1,5 @@
import os
from openpype.modules import OpenPypeModule
from openpype.modules.interfaces import IHostAddon
from openpype.modules import OpenPypeModule, IHostAddon
HOUDINI_HOST_DIR = os.path.dirname(os.path.abspath(__file__))

View file

@ -1,11 +1,10 @@
import os
import re
import clique
from openpype.pipeline import (
load,
get_representation_path,
)
from openpype.hosts.houdini.api import pipeline
@ -20,7 +19,6 @@ class AssLoader(load.LoaderPlugin):
color = "orange"
def load(self, context, name=None, namespace=None, data=None):
import hou
# Get the root node
@ -32,7 +30,11 @@ class AssLoader(load.LoaderPlugin):
# Create a new geo node
procedural = obj.createNode("arnold::procedural", node_name=node_name)
procedural.setParms({"ar_filename": self.get_path(self.fname)})
procedural.setParms(
{
"ar_filename": self.format_path(context["representation"])
})
nodes = [procedural]
self[:] = nodes
@ -46,62 +48,43 @@ class AssLoader(load.LoaderPlugin):
suffix="",
)
def get_path(self, path):
# Find all frames in the folder
ext = ".ass.gz" if path.endswith(".ass.gz") else ".ass"
folder = os.path.dirname(path)
frames = [f for f in os.listdir(folder) if f.endswith(ext)]
# Get the collection of frames to detect frame padding
patterns = [clique.PATTERNS["frames"]]
collections, remainder = clique.assemble(frames,
minimum_items=1,
patterns=patterns)
self.log.debug("Detected collections: {}".format(collections))
self.log.debug("Detected remainder: {}".format(remainder))
if not collections and remainder:
if len(remainder) != 1:
raise ValueError("Frames not correctly detected "
"in: {}".format(remainder))
# A single frame without frame range detected
filepath = remainder[0]
return os.path.normpath(filepath).replace("\\", "/")
# Frames detected with a valid "frame" number pattern
# Then we don't want to have any remainder files found
assert len(collections) == 1 and not remainder
collection = collections[0]
num_frames = len(collection.indexes)
if num_frames == 1:
# Return the input path without dynamic $F variable
result = path
else:
# More than a single frame detected - use $F{padding}
fname = "{}$F{}{}".format(collection.head,
collection.padding,
collection.tail)
result = os.path.join(folder, fname)
# Format file name, Houdini only wants forward slashes
return os.path.normpath(result).replace("\\", "/")
def update(self, container, representation):
# Update the file path
file_path = get_representation_path(representation)
file_path = file_path.replace("\\", "/")
procedural = container["node"]
procedural.setParms({"ar_filename": self.get_path(file_path)})
procedural.setParms({"ar_filename": self.format_path(representation)})
# Update attribute
procedural.setParms({"representation": str(representation["_id"])})
def remove(self, container):
node = container["node"]
node.destroy()
@staticmethod
def format_path(representation):
"""Format file path correctly for single ass.* or ass.* sequence.
Args:
representation (dict): representation to be loaded.
Returns:
str: Formatted path to be used by the input node.
"""
path = get_representation_path(representation)
if not os.path.exists(path):
raise RuntimeError("Path does not exist: {}".format(path))
is_sequence = bool(representation["context"].get("frame"))
# The path is either a single file or sequence in a folder.
if is_sequence:
dir_path, file_name = os.path.split(path)
path = os.path.join(
dir_path,
re.sub(r"(.*)\.(\d+)\.(ass.*)", "\\1.$F4.\\3", file_name)
)
return os.path.normpath(path).replace("\\", "/")
def switch(self, container, representation):
self.update(container, representation)

View file

@ -73,7 +73,7 @@ class ImageLoader(load.LoaderPlugin):
# Imprint it manually
data = {
"schema": "avalon-core:container-2.0",
"schema": "openpype:container-2.0",
"id": AVALON_CONTAINER_ID,
"name": node_name,
"namespace": namespace,

View file

@ -43,7 +43,7 @@ class USDSublayerLoader(load.LoaderPlugin):
# Imprint it manually
data = {
"schema": "avalon-core:container-2.0",
"schema": "openpype:container-2.0",
"id": AVALON_CONTAINER_ID,
"name": node_name,
"namespace": namespace,

View file

@ -43,7 +43,7 @@ class USDReferenceLoader(load.LoaderPlugin):
# Imprint it manually
data = {
"schema": "avalon-core:container-2.0",
"schema": "openpype:container-2.0",
"id": AVALON_CONTAINER_ID,
"name": node_name,
"namespace": namespace,

View file

@ -1,8 +1,9 @@
# -*- coding: utf-8 -*-
import openpype.api
import pyblish.api
import hou
from openpype.pipeline.publish import RepairAction
class ValidateWorkfilePaths(pyblish.api.InstancePlugin):
"""Validate workfile paths so they are absolute."""
@ -11,7 +12,7 @@ class ValidateWorkfilePaths(pyblish.api.InstancePlugin):
families = ["workfile"]
hosts = ["houdini"]
label = "Validate Workfile Paths"
actions = [openpype.api.RepairAction]
actions = [RepairAction]
optional = True
node_types = ["file", "alembic"]
@ -35,6 +36,9 @@ class ValidateWorkfilePaths(pyblish.api.InstancePlugin):
def get_invalid(cls):
invalid = []
for param, _ in hou.fileReferences():
if param is None:
continue
# skip nodes we are not interested in
if param.node().type().name() not in cls.node_types:
continue

View file

@ -1,6 +1,5 @@
import os
from openpype.modules import OpenPypeModule
from openpype.modules.interfaces import IHostAddon
from openpype.modules import OpenPypeModule, IHostAddon
MAYA_ROOT_DIR = os.path.dirname(os.path.abspath(__file__))
@ -28,13 +27,16 @@ class MayaAddon(OpenPypeModule, IHostAddon):
env["PYTHONPATH"] = os.pathsep.join(new_python_paths)
# Set default values if are not already set via settings
defaults = {
"OPENPYPE_LOG_NO_COLORS": "Yes"
# Set default environments
envs = {
"OPENPYPE_LOG_NO_COLORS": "Yes",
# For python module 'qtpy'
"QT_API": "PySide2",
# For python module 'Qt'
"QT_PREFERRED_BINDING": "PySide2"
}
for key, value in defaults.items():
if not env.get(key):
env[key] = value
for key, value in envs.items():
env[key] = value
def get_launch_hook_paths(self, app):
if app.host_name != self.host_name:

View file

@ -8,7 +8,7 @@ from functools import partial
import maya.cmds as cmds
import maya.mel as mel
from openpype.api import resources
from openpype import resources
from openpype.tools.utils import host_tools
from .lib import get_main_window

View file

@ -23,7 +23,7 @@ from openpype.client import (
get_last_versions,
get_representation_by_name
)
from openpype.api import get_anatomy_settings
from openpype.settings import get_project_settings
from openpype.pipeline import (
legacy_io,
discover_loader_plugins,
@ -1532,7 +1532,7 @@ def get_container_members(container):
if ref.rsplit(":", 1)[-1].startswith("_UNKNOWN_REF_NODE_"):
continue
reference_members = cmds.referenceQuery(ref, nodes=True)
reference_members = cmds.referenceQuery(ref, nodes=True, dagPath=True)
reference_members = cmds.ls(reference_members,
long=True,
objectsOnly=True)
@ -2459,182 +2459,120 @@ def bake_to_world_space(nodes,
def load_capture_preset(data=None):
"""Convert OpenPype Extract Playblast settings to `capture` arguments
Input data is the settings from:
`project_settings/maya/publish/ExtractPlayblast/capture_preset`
Args:
data (dict): Capture preset settings from OpenPype settings
Returns:
dict: `capture.capture` compatible keyword arguments
"""
import capture
preset = data
options = dict()
viewport_options = dict()
viewport2_options = dict()
camera_options = dict()
# CODEC
id = 'Codec'
for key in preset[id]:
options[str(key)] = preset[id][key]
# Straight key-value match from settings to capture arguments
options.update(data["Codec"])
options.update(data["Generic"])
options.update(data["Resolution"])
# GENERIC
id = 'Generic'
for key in preset[id]:
options[str(key)] = preset[id][key]
# RESOLUTION
id = 'Resolution'
options['height'] = preset[id]['height']
options['width'] = preset[id]['width']
camera_options.update(data['Camera Options'])
viewport_options.update(data["Renderer"])
# DISPLAY OPTIONS
id = 'Display Options'
disp_options = {}
for key in preset[id]:
for key, value in data['Display Options'].items():
if key.startswith('background'):
disp_options[key] = preset['Display Options'][key]
if len(disp_options[key]) == 4:
disp_options[key][0] = (float(disp_options[key][0])/255)
disp_options[key][1] = (float(disp_options[key][1])/255)
disp_options[key][2] = (float(disp_options[key][2])/255)
disp_options[key].pop()
# Convert background, backgroundTop, backgroundBottom colors
if len(value) == 4:
# Ignore alpha + convert RGB to float
value = [
float(value[0]) / 255,
float(value[1]) / 255,
float(value[2]) / 255
]
disp_options[key] = value
else:
disp_options['displayGradient'] = True
options['display_options'] = disp_options
# VIEWPORT OPTIONS
temp_options = {}
id = 'Renderer'
for key in preset[id]:
temp_options[str(key)] = preset[id][key]
# Viewport Options has a mixture of Viewport2 Options and Viewport Options
# to pass along to capture. So we'll need to differentiate between the two
VIEWPORT2_OPTIONS = {
"textureMaxResolution",
"renderDepthOfField",
"ssaoEnable",
"ssaoSamples",
"ssaoAmount",
"ssaoRadius",
"ssaoFilterRadius",
"hwFogStart",
"hwFogEnd",
"hwFogAlpha",
"hwFogFalloff",
"hwFogColorR",
"hwFogColorG",
"hwFogColorB",
"hwFogDensity",
"motionBlurEnable",
"motionBlurSampleCount",
"motionBlurShutterOpenFraction",
"lineAAEnable"
}
for key, value in data['Viewport Options'].items():
temp_options2 = {}
id = 'Viewport Options'
for key in preset[id]:
# There are some keys we want to ignore
if key in {"override_viewport_options", "high_quality"}:
continue
# First handle special cases where we do value conversion to
# separate option values
if key == 'textureMaxResolution':
if preset[id][key] > 0:
temp_options2['textureMaxResolution'] = preset[id][key]
temp_options2['enableTextureMaxRes'] = True
temp_options2['textureMaxResMode'] = 1
viewport2_options['textureMaxResolution'] = value
if value > 0:
viewport2_options['enableTextureMaxRes'] = True
viewport2_options['textureMaxResMode'] = 1
else:
temp_options2['textureMaxResolution'] = preset[id][key]
temp_options2['enableTextureMaxRes'] = False
temp_options2['textureMaxResMode'] = 0
viewport2_options['enableTextureMaxRes'] = False
viewport2_options['textureMaxResMode'] = 0
if key == 'multiSample':
if preset[id][key] > 0:
temp_options2['multiSampleEnable'] = True
temp_options2['multiSampleCount'] = preset[id][key]
else:
temp_options2['multiSampleEnable'] = False
temp_options2['multiSampleCount'] = preset[id][key]
elif key == 'multiSample':
viewport2_options['multiSampleEnable'] = value > 0
viewport2_options['multiSampleCount'] = value
if key == 'renderDepthOfField':
temp_options2['renderDepthOfField'] = preset[id][key]
elif key == 'alphaCut':
viewport2_options['transparencyAlgorithm'] = 5
viewport2_options['transparencyQuality'] = 1
if key == 'ssaoEnable':
if preset[id][key] is True:
temp_options2['ssaoEnable'] = True
else:
temp_options2['ssaoEnable'] = False
elif key == 'hwFogFalloff':
# Settings enum value string to integer
viewport2_options['hwFogFalloff'] = int(value)
if key == 'ssaoSamples':
temp_options2['ssaoSamples'] = preset[id][key]
if key == 'ssaoAmount':
temp_options2['ssaoAmount'] = preset[id][key]
if key == 'ssaoRadius':
temp_options2['ssaoRadius'] = preset[id][key]
if key == 'hwFogDensity':
temp_options2['hwFogDensity'] = preset[id][key]
if key == 'ssaoFilterRadius':
temp_options2['ssaoFilterRadius'] = preset[id][key]
if key == 'alphaCut':
temp_options2['transparencyAlgorithm'] = 5
temp_options2['transparencyQuality'] = 1
if key == 'headsUpDisplay':
temp_options['headsUpDisplay'] = True
if key == 'fogging':
temp_options['fogging'] = preset[id][key] or False
if key == 'hwFogStart':
temp_options2['hwFogStart'] = preset[id][key]
if key == 'hwFogEnd':
temp_options2['hwFogEnd'] = preset[id][key]
if key == 'hwFogAlpha':
temp_options2['hwFogAlpha'] = preset[id][key]
if key == 'hwFogFalloff':
temp_options2['hwFogFalloff'] = int(preset[id][key])
if key == 'hwFogColorR':
temp_options2['hwFogColorR'] = preset[id][key]
if key == 'hwFogColorG':
temp_options2['hwFogColorG'] = preset[id][key]
if key == 'hwFogColorB':
temp_options2['hwFogColorB'] = preset[id][key]
if key == 'motionBlurEnable':
if preset[id][key] is True:
temp_options2['motionBlurEnable'] = True
else:
temp_options2['motionBlurEnable'] = False
if key == 'motionBlurSampleCount':
temp_options2['motionBlurSampleCount'] = preset[id][key]
if key == 'motionBlurShutterOpenFraction':
temp_options2['motionBlurShutterOpenFraction'] = preset[id][key]
if key == 'lineAAEnable':
if preset[id][key] is True:
temp_options2['lineAAEnable'] = True
else:
temp_options2['lineAAEnable'] = False
# Then handle Viewport 2.0 Options
elif key in VIEWPORT2_OPTIONS:
viewport2_options[key] = value
# Then assume remainder is Viewport Options
else:
temp_options[str(key)] = preset[id][key]
viewport_options[key] = value
for key in ['override_viewport_options',
'high_quality',
'alphaCut',
'gpuCacheDisplayFilter',
'multiSample',
'ssaoEnable',
'ssaoSamples',
'ssaoAmount',
'ssaoFilterRadius',
'ssaoRadius',
'hwFogStart',
'hwFogEnd',
'hwFogAlpha',
'hwFogFalloff',
'hwFogColorR',
'hwFogColorG',
'hwFogColorB',
'hwFogDensity',
'textureMaxResolution',
'motionBlurEnable',
'motionBlurSampleCount',
'motionBlurShutterOpenFraction',
'lineAAEnable',
'renderDepthOfField'
]:
temp_options.pop(key, None)
options['viewport_options'] = temp_options
options['viewport2_options'] = temp_options2
options['viewport_options'] = viewport_options
options['viewport2_options'] = viewport2_options
options['camera_options'] = camera_options
# use active sound track
scene = capture.parse_active_scene()
options['sound'] = scene['sound']
# options['display_options'] = temp_options
return options
@ -3159,7 +3097,7 @@ def set_colorspace():
"""Set Colorspace from project configuration
"""
project_name = os.getenv("AVALON_PROJECT")
imageio = get_anatomy_settings(project_name)["imageio"]["maya"]
imageio = get_project_settings(project_name)["maya"]["imageio"]
# Maya 2022+ introduces new OCIO v2 color management settings that
# can override the old color managenement preferences. OpenPype has

View file

@ -536,6 +536,11 @@ class RenderProductsArnold(ARenderProducts):
products = []
aov_name = self._get_attr(aov, "name")
multipart = False
multilayer = bool(self._get_attr("defaultArnoldDriver.multipart"))
merge_AOVs = bool(self._get_attr("defaultArnoldDriver.mergeAOVs"))
if multilayer or merge_AOVs:
multipart = True
ai_drivers = cmds.listConnections("{}.outputs".format(aov),
source=True,
destination=False,
@ -589,6 +594,7 @@ class RenderProductsArnold(ARenderProducts):
ext=ext,
aov=aov_name,
driver=ai_driver,
multipart=multipart,
camera=camera)
products.append(product)
@ -1016,7 +1022,11 @@ class RenderProductsRedshift(ARenderProducts):
# due to some AOVs still being written into separate files,
# like Cryptomatte.
# AOVs are merged in multi-channel file
multipart = bool(self._get_attr("redshiftOptions.exrForceMultilayer"))
multipart = False
force_layer = bool(self._get_attr("redshiftOptions.exrForceMultilayer")) # noqa
exMultipart = bool(self._get_attr("redshiftOptions.exrMultipart"))
if exMultipart or force_layer:
multipart = True
# Get Redshift Extension from image format
image_format = self._get_attr("redshiftOptions.imageFormat") # integer
@ -1044,7 +1054,6 @@ class RenderProductsRedshift(ARenderProducts):
# Any AOVs that still get processed, like Cryptomatte
# by themselves are not multipart files.
aov_multipart = not multipart
# Redshift skips rendering of masterlayer without AOV suffix
# when a Beauty AOV is rendered. It overrides the main layer.
@ -1075,7 +1084,7 @@ class RenderProductsRedshift(ARenderProducts):
productName=aov_light_group_name,
aov=aov_name,
ext=ext,
multipart=aov_multipart,
multipart=multipart,
camera=camera)
products.append(product)
@ -1089,7 +1098,7 @@ class RenderProductsRedshift(ARenderProducts):
product = RenderProduct(productName=aov_name,
aov=aov_name,
ext=ext,
multipart=aov_multipart,
multipart=multipart,
camera=camera)
products.append(product)
@ -1100,7 +1109,7 @@ class RenderProductsRedshift(ARenderProducts):
if light_groups_enabled:
return products
beauty_name = "Beauty_other" if has_beauty_aov else ""
beauty_name = "BeautyAux" if has_beauty_aov else ""
for camera in cameras:
products.insert(0,
RenderProduct(productName=beauty_name,

View file

@ -6,7 +6,7 @@ import six
import sys
from openpype.lib import Logger
from openpype.api import (
from openpype.settings import (
get_project_settings,
get_current_project_settings
)

View file

View file

@ -12,6 +12,7 @@ class CreateAnimation(plugin.Creator):
family = "animation"
icon = "male"
write_color_sets = False
write_face_sets = False
def __init__(self, *args, **kwargs):
super(CreateAnimation, self).__init__(*args, **kwargs)
@ -24,7 +25,7 @@ class CreateAnimation(plugin.Creator):
# Write vertex colors with the geometry.
self.data["writeColorSets"] = self.write_color_sets
self.data["writeFaceSets"] = False
self.data["writeFaceSets"] = self.write_face_sets
# Include only renderable visible shapes.
# Skips locators and empty transforms

View file

@ -9,13 +9,14 @@ class CreateModel(plugin.Creator):
family = "model"
icon = "cube"
defaults = ["Main", "Proxy", "_MD", "_HD", "_LD"]
write_color_sets = False
write_face_sets = False
def __init__(self, *args, **kwargs):
super(CreateModel, self).__init__(*args, **kwargs)
# Vertex colors with the geometry
self.data["writeColorSets"] = False
self.data["writeFaceSets"] = False
self.data["writeColorSets"] = self.write_color_sets
self.data["writeFaceSets"] = self.write_face_sets
# Include attributes by attribute name or prefix
self.data["attr"] = ""

View file

@ -12,6 +12,7 @@ class CreatePointCache(plugin.Creator):
family = "pointcache"
icon = "gears"
write_color_sets = False
write_face_sets = False
def __init__(self, *args, **kwargs):
super(CreatePointCache, self).__init__(*args, **kwargs)
@ -21,7 +22,8 @@ class CreatePointCache(plugin.Creator):
# Vertex colors with the geometry.
self.data["writeColorSets"] = self.write_color_sets
self.data["writeFaceSets"] = False # Vertex colors with the geometry.
# Vertex colors with the geometry.
self.data["writeFaceSets"] = self.write_face_sets
self.data["renderableOnly"] = False # Only renderable visible shapes
self.data["visibleOnly"] = False # only nodes that are visible
self.data["includeParentHierarchy"] = False # Include parent groups

View file

@ -9,26 +9,18 @@ import requests
from maya import cmds
from maya.app.renderSetup.model import renderSetup
from openpype.api import (
from openpype.settings import (
get_system_settings,
get_project_settings,
)
from openpype.lib import requests_get
from openpype.modules import ModulesManager
from openpype.pipeline import legacy_io
from openpype.hosts.maya.api import (
lib,
lib_rendersettings,
plugin
)
from openpype.lib import requests_get
from openpype.api import (
get_system_settings,
get_project_settings)
from openpype.modules import ModulesManager
from openpype.pipeline import legacy_io
from openpype.pipeline import (
CreatorError,
legacy_io,
)
from openpype.pipeline.context_tools import get_current_project_asset
class CreateRender(plugin.Creator):

View file

@ -1,7 +1,7 @@
# -*- coding: utf-8 -*-
"""Creator for Unreal Static Meshes."""
from openpype.hosts.maya.api import plugin, lib
from openpype.api import get_project_settings
from openpype.settings import get_project_settings
from openpype.pipeline import legacy_io
from maya import cmds # noqa

View file

@ -12,7 +12,7 @@ from openpype.hosts.maya.api import (
lib,
plugin
)
from openpype.api import (
from openpype.settings import (
get_system_settings,
get_project_settings
)

View file

@ -90,7 +90,7 @@ class ImportMayaLoader(load.LoaderPlugin):
so you could also use it as a new base.
"""
representations = ["ma", "mb"]
representations = ["ma", "mb", "obj"]
families = ["*"]
label = "Import"

View file

@ -0,0 +1,132 @@
import os
from openpype.pipeline import (
legacy_io,
load,
get_representation_path
)
from openpype.settings import get_project_settings
class AlembicStandinLoader(load.LoaderPlugin):
"""Load Alembic as Arnold Standin"""
families = ["animation", "model", "pointcache"]
representations = ["abc"]
label = "Import Alembic as Arnold Standin"
order = -5
icon = "code-fork"
color = "orange"
def load(self, context, name, namespace, options):
import maya.cmds as cmds
import mtoa.ui.arnoldmenu
from openpype.hosts.maya.api.pipeline import containerise
from openpype.hosts.maya.api.lib import unique_namespace
version = context["version"]
version_data = version.get("data", {})
family = version["data"]["families"]
self.log.info("version_data: {}\n".format(version_data))
self.log.info("family: {}\n".format(family))
frameStart = version_data.get("frameStart", None)
asset = context["asset"]["name"]
namespace = namespace or unique_namespace(
asset + "_",
prefix="_" if asset[0].isdigit() else "",
suffix="_",
)
# Root group
label = "{}:{}".format(namespace, name)
root = cmds.group(name=label, empty=True)
settings = get_project_settings(os.environ['AVALON_PROJECT'])
colors = settings["maya"]["load"]["colors"]
fps = legacy_io.Session["AVALON_FPS"]
c = colors.get(family[0])
if c is not None:
r = (float(c[0]) / 255)
g = (float(c[1]) / 255)
b = (float(c[2]) / 255)
cmds.setAttr(root + ".useOutlinerColor", 1)
cmds.setAttr(root + ".outlinerColor",
r, g, b)
transform_name = label + "_ABC"
standinShape = cmds.ls(mtoa.ui.arnoldmenu.createStandIn())[0]
standin = cmds.listRelatives(standinShape, parent=True,
typ="transform")
standin = cmds.rename(standin, transform_name)
standinShape = cmds.listRelatives(standin, children=True)[0]
cmds.parent(standin, root)
# Set the standin filepath
cmds.setAttr(standinShape + ".dso", self.fname, type="string")
cmds.setAttr(standinShape + ".abcFPS", float(fps))
if frameStart is None:
cmds.setAttr(standinShape + ".useFrameExtension", 0)
elif "model" in family:
cmds.setAttr(standinShape + ".useFrameExtension", 0)
else:
cmds.setAttr(standinShape + ".useFrameExtension", 1)
nodes = [root, standin]
self[:] = nodes
return containerise(
name=name,
namespace=namespace,
nodes=nodes,
context=context,
loader=self.__class__.__name__)
def update(self, container, representation):
import pymel.core as pm
path = get_representation_path(representation)
fps = legacy_io.Session["AVALON_FPS"]
# Update the standin
standins = list()
members = pm.sets(container['objectName'], query=True)
self.log.info("container:{}".format(container))
for member in members:
shape = member.getShape()
if (shape and shape.type() == "aiStandIn"):
standins.append(shape)
for standin in standins:
standin.dso.set(path)
standin.abcFPS.set(float(fps))
if "modelMain" in container['objectName']:
standin.useFrameExtension.set(0)
else:
standin.useFrameExtension.set(1)
container = pm.PyNode(container["objectName"])
container.representation.set(str(representation["_id"]))
def switch(self, container, representation):
self.update(container, representation)
def remove(self, container):
import maya.cmds as cmds
members = cmds.sets(container['objectName'], query=True)
cmds.lockNode(members, lock=False)
cmds.delete([container['objectName']] + members)
# Clean up the namespace
try:
cmds.namespace(removeNamespace=container['namespace'],
deleteNamespaceContent=True)
except RuntimeError:
pass

View file

@ -1,7 +1,7 @@
import os
import clique
from openpype.api import get_project_settings
from openpype.settings import get_project_settings
from openpype.pipeline import (
load,
get_representation_path

View file

@ -4,7 +4,7 @@ from openpype.pipeline import (
load,
get_representation_path
)
from openpype.api import get_project_settings
from openpype.settings import get_project_settings
class GpuCacheLoader(load.LoaderPlugin):

View file

@ -5,7 +5,7 @@ import clique
import maya.cmds as cmds
from openpype.api import get_project_settings
from openpype.settings import get_project_settings
from openpype.pipeline import (
load,
get_representation_path

View file

@ -1,7 +1,7 @@
import os
from maya import cmds
from openpype.api import get_project_settings
from openpype.settings import get_project_settings
from openpype.pipeline import legacy_io
from openpype.pipeline.create import (
legacy_create,

View file

@ -1,6 +1,6 @@
import os
from openpype.api import get_project_settings
from openpype.settings import get_project_settings
from openpype.pipeline import (
load,
get_representation_path

View file

@ -1,6 +1,6 @@
import os
from openpype.api import get_project_settings
from openpype.settings import get_project_settings
from openpype.pipeline import (
load,
get_representation_path

View file

@ -1,6 +1,6 @@
import os
from openpype.api import get_project_settings
from openpype.settings import get_project_settings
from openpype.pipeline import (
load,
get_representation_path

View file

@ -10,7 +10,7 @@ import os
import maya.cmds as cmds
from openpype.client import get_representation_by_name
from openpype.api import get_project_settings
from openpype.settings import get_project_settings
from openpype.pipeline import (
legacy_io,
load,

View file

@ -1,7 +1,7 @@
# -*- coding: utf-8 -*-
import os
import maya.cmds as cmds # noqa
from openpype.api import get_project_settings
from openpype.settings import get_project_settings
from openpype.pipeline import (
load,
get_representation_path

View file

@ -6,7 +6,7 @@ from collections import defaultdict
import clique
from maya import cmds
from openpype.api import get_project_settings
from openpype.settings import get_project_settings
from openpype.pipeline import (
load,
get_representation_path
@ -73,8 +73,8 @@ class YetiCacheLoader(load.LoaderPlugin):
c = colors.get(family)
if c is not None:
cmds.setAttr(group_name + ".useOutlinerColor", 1)
cmds.setAttr(group_name + ".outlinerColor",
cmds.setAttr(group_node + ".useOutlinerColor", 1)
cmds.setAttr(group_node + ".outlinerColor",
(float(c[0])/255),
(float(c[1])/255),
(float(c[2])/255)
@ -250,7 +250,7 @@ class YetiCacheLoader(load.LoaderPlugin):
"""
name = node_name.replace(":", "_")
pattern = r"^({name})(\.[0-4]+)?(\.fur)$".format(name=re.escape(name))
pattern = r"^({name})(\.[0-9]+)?(\.fur)$".format(name=re.escape(name))
files = [fname for fname in os.listdir(root) if re.match(pattern,
fname)]

View file

@ -1,7 +1,7 @@
import os
from collections import defaultdict
from openpype.api import get_project_settings
from openpype.settings import get_project_settings
import openpype.hosts.maya.api.plugin
from openpype.hosts.maya.api import lib

View file

@ -102,23 +102,26 @@ class CollectMayaRender(pyblish.api.ContextPlugin):
}
for layer in collected_render_layers:
try:
if layer.startswith("LAYER_"):
# this is support for legacy mode where render layers
# started with `LAYER_` prefix.
expected_layer_name = re.search(
r"^LAYER_(.*)", layer).group(1)
else:
# new way is to prefix render layer name with instance
# namespace.
expected_layer_name = re.search(
r"^.+:(.*)", layer).group(1)
except IndexError:
if layer.startswith("LAYER_"):
# this is support for legacy mode where render layers
# started with `LAYER_` prefix.
layer_name_pattern = r"^LAYER_(.*)"
else:
# new way is to prefix render layer name with instance
# namespace.
layer_name_pattern = r"^.+:(.*)"
# todo: We should have a more explicit way to link the renderlayer
match = re.match(layer_name_pattern, layer)
if not match:
msg = "Invalid layer name in set [ {} ]".format(layer)
self.log.warning(msg)
continue
self.log.info("processing %s" % layer)
expected_layer_name = match.group(1)
self.log.info("Processing '{}' as layer [ {} ]"
"".format(layer, expected_layer_name))
# check if layer is part of renderSetup
if expected_layer_name not in maya_render_layers:
msg = "Render layer [ {} ] is not in " "Render Setup".format(

View file

@ -34,14 +34,15 @@ class ExtractLayout(publish.Extractor):
for asset in cmds.sets(str(instance), query=True):
# Find the container
grp_name = asset.split(':')[0]
containers = cmds.ls(f"{grp_name}*_CON")
containers = cmds.ls("{}*_CON".format(grp_name))
assert len(containers) == 1, \
f"More than one container found for {asset}"
"More than one container found for {}".format(asset)
container = containers[0]
representation_id = cmds.getAttr(f"{container}.representation")
representation_id = cmds.getAttr(
"{}.representation".format(container))
representation = get_representation_by_id(
project_name,
@ -56,7 +57,8 @@ class ExtractLayout(publish.Extractor):
json_element = {
"family": family,
"instance_name": cmds.getAttr(f"{container}.name"),
"instance_name": cmds.getAttr(
"{}.namespace".format(container)),
"representation": str(representation_id),
"version": str(version_id)
}

View file

@ -0,0 +1,78 @@
# -*- coding: utf-8 -*-
import os
from maya import cmds
# import maya.mel as mel
import pyblish.api
from openpype.pipeline import publish
from openpype.hosts.maya.api import lib
class ExtractObj(publish.Extractor):
"""Extract OBJ from Maya.
This extracts reproducible OBJ exports ignoring any of the settings
set on the local machine in the OBJ export options window.
"""
order = pyblish.api.ExtractorOrder
hosts = ["maya"]
label = "Extract OBJ"
families = ["model"]
def process(self, instance):
# Define output path
staging_dir = self.staging_dir(instance)
filename = "{0}.obj".format(instance.name)
path = os.path.join(staging_dir, filename)
# The export requires forward slashes because we need to
# format it into a string in a mel expression
self.log.info("Extracting OBJ to: {0}".format(path))
members = instance.data("setMembers")
members = cmds.ls(members,
dag=True,
shapes=True,
type=("mesh", "nurbsCurve"),
noIntermediate=True,
long=True)
self.log.info("Members: {0}".format(members))
self.log.info("Instance: {0}".format(instance[:]))
if not cmds.pluginInfo('objExport', query=True, loaded=True):
cmds.loadPlugin('objExport')
# Export
with lib.no_display_layers(instance):
with lib.displaySmoothness(members,
divisionsU=0,
divisionsV=0,
pointsWire=4,
pointsShaded=1,
polygonObject=1):
with lib.shader(members,
shadingEngine="initialShadingGroup"):
with lib.maintained_selection():
cmds.select(members, noExpand=True)
cmds.file(path,
exportSelected=True,
type='OBJexport',
preserveReferences=True,
force=True)
if "representation" not in instance.data:
instance.data["representation"] = []
representation = {
'name': 'obj',
'ext': 'obj',
'files': filename,
"stagingDir": staging_dir,
}
instance.data["representations"].append(representation)
self.log.info("Extract OBJ successful to: {0}".format(path))

View file

@ -77,8 +77,10 @@ class ExtractPlayblast(publish.Extractor):
preset['height'] = asset_height
preset['start_frame'] = start
preset['end_frame'] = end
camera_option = preset.get("camera_option", {})
camera_option["depthOfField"] = cmds.getAttr(
# Enforce persisting camera depth of field
camera_options = preset.setdefault("camera_options", {})
camera_options["depthOfField"] = cmds.getAttr(
"{0}.depthOfField".format(camera))
stagingdir = self.staging_dir(instance)
@ -131,13 +133,15 @@ class ExtractPlayblast(publish.Extractor):
preset.update(panel_preset)
cmds.setFocus(panel)
path = capture.capture(**preset)
path = capture.capture(log=self.log, **preset)
self.log.debug("playblast path {}".format(path))
collected_files = os.listdir(stagingdir)
patterns = [clique.PATTERNS["frames"]]
collections, remainder = clique.assemble(collected_files,
minimum_items=1)
minimum_items=1,
patterns=patterns)
self.log.debug("filename {}".format(filename))
frame_collection = None

View file

@ -1,5 +1,6 @@
import os
import glob
import tempfile
import capture
@ -81,9 +82,17 @@ class ExtractThumbnail(publish.Extractor):
elif asset_width and asset_height:
preset['width'] = asset_width
preset['height'] = asset_height
stagingDir = self.staging_dir(instance)
# Create temp directory for thumbnail
# - this is to avoid "override" of source file
dst_staging = tempfile.mkdtemp(prefix="pyblish_tmp_")
self.log.debug(
"Create temp directory {} for thumbnail".format(dst_staging)
)
# Store new staging to cleanup paths
instance.context.data["cleanupFullPaths"].append(dst_staging)
filename = "{0}".format(instance.name)
path = os.path.join(stagingDir, filename)
path = os.path.join(dst_staging, filename)
self.log.info("Outputting images to %s" % path)
@ -137,7 +146,7 @@ class ExtractThumbnail(publish.Extractor):
'name': 'thumbnail',
'ext': 'jpg',
'files': thumbnail,
"stagingDir": stagingDir,
"stagingDir": dst_staging,
"thumbnail": True
}
instance.data["representations"].append(representation)

View file

@ -11,7 +11,7 @@ import pyblish.api
from openpype.lib import requests_post
from openpype.hosts.maya.api import lib
from openpype.pipeline import legacy_io
from openpype.api import get_system_settings
from openpype.settings import get_system_settings
# mapping between Maya renderer names and Muster template ids
@ -118,7 +118,7 @@ def preview_fname(folder, scene, layer, padding, ext):
"""
# Following hardcoded "<Scene>/<Scene>_<Layer>/<Layer>"
output = "maya/{scene}/{layer}/{layer}.{number}.{ext}".format(
output = "{scene}/{layer}/{layer}.{number}.{ext}".format(
scene=scene,
layer=layer,
number="#" * padding,

View file

@ -22,10 +22,10 @@ def get_redshift_image_format_labels():
class ValidateRenderSettings(pyblish.api.InstancePlugin):
"""Validates the global render settings
* File Name Prefix must start with: `maya/<Scene>`
* File Name Prefix must start with: `<Scene>`
all other token are customizable but sane values for Arnold are:
`maya/<Scene>/<RenderLayer>/<RenderLayer>_<RenderPass>`
`<Scene>/<RenderLayer>/<RenderLayer>_<RenderPass>`
<Camera> token is supported also, useful for multiple renderable
cameras per render layer.
@ -64,12 +64,12 @@ class ValidateRenderSettings(pyblish.api.InstancePlugin):
}
ImagePrefixTokens = {
'mentalray': 'maya/<Scene>/<RenderLayer>/<RenderLayer>{aov_separator}<RenderPass>', # noqa: E501
'arnold': 'maya/<Scene>/<RenderLayer>/<RenderLayer>{aov_separator}<RenderPass>', # noqa: E501
'redshift': 'maya/<Scene>/<RenderLayer>/<RenderLayer>',
'vray': 'maya/<Scene>/<Layer>/<Layer>',
'mentalray': '<Scene>/<RenderLayer>/<RenderLayer>{aov_separator}<RenderPass>', # noqa: E501
'arnold': '<Scene>/<RenderLayer>/<RenderLayer>{aov_separator}<RenderPass>', # noqa: E501
'redshift': '<Scene>/<RenderLayer>/<RenderLayer>',
'vray': '<Scene>/<Layer>/<Layer>',
'renderman': '<layer>{aov_separator}<aov>.<f4>.<ext>',
'mayahardware2': 'maya/<Scene>/<RenderLayer>/<RenderLayer>',
'mayahardware2': '<Scene>/<RenderLayer>/<RenderLayer>',
}
_aov_chars = {
@ -80,7 +80,7 @@ class ValidateRenderSettings(pyblish.api.InstancePlugin):
redshift_AOV_prefix = "<BeautyPath>/<BeautyFile>{aov_separator}<RenderPass>" # noqa: E501
renderman_dir_prefix = "maya/<scene>/<layer>"
renderman_dir_prefix = "<scene>/<layer>"
R_AOV_TOKEN = re.compile(
r'%a|<aov>|<renderpass>', re.IGNORECASE)
@ -90,8 +90,8 @@ class ValidateRenderSettings(pyblish.api.InstancePlugin):
R_SCENE_TOKEN = re.compile(r'%s|<scene>', re.IGNORECASE)
DEFAULT_PADDING = 4
VRAY_PREFIX = "maya/<Scene>/<Layer>/<Layer>"
DEFAULT_PREFIX = "maya/<Scene>/<RenderLayer>/<RenderLayer>_<RenderPass>"
VRAY_PREFIX = "<Scene>/<Layer>/<Layer>"
DEFAULT_PREFIX = "<Scene>/<RenderLayer>/<RenderLayer>_<RenderPass>"
def process(self, instance):
@ -123,7 +123,6 @@ class ValidateRenderSettings(pyblish.api.InstancePlugin):
prefix = prefix.replace(
"{aov_separator}", instance.data.get("aovSeparator", "_"))
required_prefix = "maya/<scene>"
default_prefix = cls.ImagePrefixTokens[renderer]
if not anim_override:
@ -131,15 +130,6 @@ class ValidateRenderSettings(pyblish.api.InstancePlugin):
cls.log.error("Animation needs to be enabled. Use the same "
"frame for start and end to render single frame")
if renderer != "renderman" and not prefix.lower().startswith(
required_prefix):
invalid = True
cls.log.error(
("Wrong image prefix [ {} ] "
" - doesn't start with: '{}'").format(
prefix, required_prefix)
)
if not re.search(cls.R_LAYER_TOKEN, prefix):
invalid = True
cls.log.error("Wrong image prefix [ {} ] - "
@ -268,14 +258,20 @@ class ValidateRenderSettings(pyblish.api.InstancePlugin):
# go through definitions and test if such node.attribute exists.
# if so, compare its value from the one required.
for attr, value in OrderedDict(validation_settings).items():
# first get node of that type
cls.log.debug("{}: {}".format(attr, value))
node_type = attr.split(".")[0]
attribute_name = ".".join(attr.split(".")[1:])
if "." not in attr:
cls.log.warning("Skipping invalid attribute defined in "
"validation settings: '{}'".format(attr))
continue
node_type, attribute_name = attr.split(".", 1)
# first get node of that type
nodes = cmds.ls(type=node_type)
if not isinstance(nodes, list):
cls.log.warning("No nodes of '{}' found.".format(node_type))
if not nodes:
cls.log.warning(
"No nodes of type '{}' found.".format(node_type))
continue
for node in nodes:

View file

@ -1,7 +1,6 @@
from maya import cmds
import pyblish.api
import openpype.api
import openpype.hosts.maya.api.action
from openpype.pipeline.publish import ValidateContentsOrder
@ -24,7 +23,7 @@ class ValidateUniqueNames(pyblish.api.Validator):
"""Returns the invalid transforms in the instance.
Returns:
list: Non unique name transforms
list: Non-unique name transforms.
"""

View file

@ -1,5 +1,5 @@
import os
from openpype.api import get_project_settings
from openpype.settings import get_project_settings
from openpype.pipeline import install_host
from openpype.hosts.maya.api import MayaHost
from maya import cmds

View file

@ -1,7 +1,6 @@
import os
import platform
from openpype.modules import OpenPypeModule
from openpype.modules.interfaces import IHostAddon
from openpype.modules import OpenPypeModule, IHostAddon
NUKE_ROOT_DIR = os.path.dirname(os.path.abspath(__file__))

View file

@ -563,7 +563,15 @@ def get_node_path(path, padding=4):
def get_nuke_imageio_settings():
return get_anatomy_settings(Context.project_name)["imageio"]["nuke"]
project_imageio = get_project_settings(
Context.project_name)["nuke"]["imageio"]
# backward compatibility for project started before 3.10
# those are still having `__legacy__` knob types
if not project_imageio["enabled"]:
return get_anatomy_settings(Context.project_name)["imageio"]["nuke"]
return get_project_settings(Context.project_name)["nuke"]["imageio"]
def get_created_node_imageio_setting_legacy(nodeclass, creator, subset):
@ -2922,3 +2930,47 @@ def get_nodes_by_names(names):
nuke.toNode(name)
for name in names
]
def get_viewer_config_from_string(input_string):
"""Convert string to display and viewer string
Args:
input_string (str): string with viewer
Raises:
IndexError: if more then one slash in input string
IndexError: if missing closing bracket
Returns:
tuple[str]: display, viewer
"""
display = None
viewer = input_string
# check if () or / or \ in name
if "/" in viewer:
split = viewer.split("/")
# rise if more then one column
if len(split) > 2:
raise IndexError((
"Viewer Input string is not correct. "
"more then two `/` slashes! {}"
).format(input_string))
viewer = split[1]
display = split[0]
elif "(" in viewer:
pattern = r"([\w\d\s]+).*[(](.*)[)]"
result = re.findall(pattern, viewer)
try:
result = result.pop()
display = str(result[1]).rstrip()
viewer = str(result[0]).rstrip()
except IndexError:
raise IndexError((
"Viewer Input string is not correct. "
"Missing bracket! {}"
).format(input_string))
return (display, viewer)

View file

@ -7,9 +7,7 @@ import nuke
import pyblish.api
import openpype
from openpype.api import (
get_current_project_settings
)
from openpype.settings import get_current_project_settings
from openpype.lib import register_event_callback, Logger
from openpype.pipeline import (
register_loader_plugin_path,
@ -68,7 +66,6 @@ def reload_config():
"""
for module in (
"openpype.api",
"openpype.hosts.nuke.api.actions",
"openpype.hosts.nuke.api.menu",
"openpype.hosts.nuke.api.plugin",
@ -367,6 +364,9 @@ def containerise(node,
set_avalon_knob_data(node, data)
# set tab to first native
node.setTab(0)
return node

View file

@ -6,7 +6,7 @@ from abc import abstractmethod
import nuke
from openpype.api import get_current_project_settings
from openpype.settings import get_current_project_settings
from openpype.pipeline import (
LegacyCreator,
LoaderPlugin,
@ -19,7 +19,8 @@ from .lib import (
add_publish_knob,
get_nuke_imageio_settings,
set_node_knobs_from_settings,
get_view_process_node
get_view_process_node,
get_viewer_config_from_string
)
@ -190,7 +191,20 @@ class ExporterReview(object):
if "#" in self.fhead:
self.fhead = self.fhead.replace("#", "")[:-1]
def get_representation_data(self, tags=None, range=False):
def get_representation_data(
self, tags=None, range=False,
custom_tags=None
):
""" Add representation data to self.data
Args:
tags (list[str], optional): list of defined tags.
Defaults to None.
range (bool, optional): flag for adding ranges.
Defaults to False.
custom_tags (list[str], optional): user inputed custom tags.
Defaults to None.
"""
add_tags = tags or []
repre = {
"name": self.name,
@ -200,6 +214,9 @@ class ExporterReview(object):
"tags": [self.name.replace("_", "-")] + add_tags
}
if custom_tags:
repre["custom_tags"] = custom_tags
if range:
repre.update({
"frameStart": self.first_frame,
@ -312,7 +329,8 @@ class ExporterReviewLut(ExporterReview):
dag_node.setInput(0, self.previous_node)
self._temp_nodes.append(dag_node)
self.previous_node = dag_node
self.log.debug("OCIODisplay... `{}`".format(self._temp_nodes))
self.log.debug(
"OCIODisplay... `{}`".format(self._temp_nodes))
# GenerateLUT
gen_lut_node = nuke.createNode("GenerateLUT")
@ -415,6 +433,7 @@ class ExporterReviewMov(ExporterReview):
return path
def generate_mov(self, farm=False, **kwargs):
add_tags = []
self.publish_on_farm = farm
read_raw = kwargs["read_raw"]
reformat_node_add = kwargs["reformat_node_add"]
@ -433,10 +452,10 @@ class ExporterReviewMov(ExporterReview):
self.log.debug(">> baking_view_profile `{}`".format(
baking_view_profile))
add_tags = kwargs.get("add_tags", [])
add_custom_tags = kwargs.get("add_custom_tags", [])
self.log.info(
"__ add_tags: `{0}`".format(add_tags))
"__ add_custom_tags: `{0}`".format(add_custom_tags))
subset = self.instance.data["subset"]
self._temp_nodes[subset] = []
@ -491,7 +510,15 @@ class ExporterReviewMov(ExporterReview):
if not self.viewer_lut_raw:
# OCIODisplay
dag_node = nuke.createNode("OCIODisplay")
dag_node["view"].setValue(str(baking_view_profile))
display, viewer = get_viewer_config_from_string(
str(baking_view_profile)
)
if display:
dag_node["display"].setValue(display)
# assign viewer
dag_node["view"].setValue(viewer)
# connect
dag_node.setInput(0, self.previous_node)
@ -542,6 +569,7 @@ class ExporterReviewMov(ExporterReview):
# ---------- generate representation data
self.get_representation_data(
tags=["review", "delete"] + add_tags,
custom_tags=add_custom_tags,
range=True
)

View file

@ -1,7 +1,7 @@
import os
import nuke
from openpype.api import resources
from openpype import resources
from .lib import maintained_selection

View file

@ -65,6 +65,9 @@ class AlembicCameraLoader(load.LoaderPlugin):
object_name, file),
inpanel=False
)
# hide property panel
camera_node.hideControlPanel()
camera_node.forceValidate()
camera_node["frame_rate"].setValue(float(fps))

View file

@ -1,6 +1,8 @@
import nuke
import qargparse
from pprint import pformat
from copy import deepcopy
from openpype.lib import Logger
from openpype.client import (
get_version_by_id,
get_last_version_by_subset_id,
@ -27,6 +29,7 @@ class LoadClip(plugin.NukeLoader):
Either it is image sequence or video file.
"""
log = Logger.get_logger(__name__)
families = [
"source",
@ -85,13 +88,18 @@ class LoadClip(plugin.NukeLoader):
)
def load(self, context, name, namespace, options):
repre = context["representation"]
representation = context["representation"]
# reste container id so it is always unique for each instance
self.reset_container_id()
is_sequence = len(repre["files"]) > 1
is_sequence = len(representation["files"]) > 1
file = self.fname.replace("\\", "/")
if is_sequence:
representation = self._representation_with_hash_in_frame(
representation
)
filepath = get_representation_path(representation).replace("\\", "/")
self.log.debug("_ filepath: {}".format(filepath))
start_at_workfile = options.get(
"start_at_workfile", self.options_defaults["start_at_workfile"])
@ -101,11 +109,10 @@ class LoadClip(plugin.NukeLoader):
version = context['version']
version_data = version.get("data", {})
repre_id = repre["_id"]
repre_id = representation["_id"]
repre_cont = repre["context"]
self.log.info("version_data: {}\n".format(version_data))
self.log.debug("_ version_data: {}\n".format(
pformat(version_data)))
self.log.debug(
"Representation id `{}` ".format(repre_id))
@ -121,36 +128,33 @@ class LoadClip(plugin.NukeLoader):
duration = last - first
first = 1
last = first + duration
elif "#" not in file:
frame = repre_cont.get("frame")
assert frame, "Representation is not sequence"
padding = len(frame)
file = file.replace(frame, "#" * padding)
# Fallback to asset name when namespace is None
if namespace is None:
namespace = context['asset']['name']
if not file:
if not filepath:
self.log.warning(
"Representation id `{}` is failing to load".format(repre_id))
return
read_name = self._get_node_name(repre)
read_name = self._get_node_name(representation)
# Create the Loader with the filename path set
read_node = nuke.createNode(
"Read",
"name {}".format(read_name))
# hide property panel
read_node.hideControlPanel()
# to avoid multiple undo steps for rest of process
# we will switch off undo-ing
with viewer_update_and_undo_stop():
read_node["file"].setValue(file)
read_node["file"].setValue(filepath)
used_colorspace = self._set_colorspace(
read_node, version_data, repre["data"])
read_node, version_data, representation["data"])
self._set_range_to_node(read_node, first, last, start_at_workfile)
@ -172,7 +176,7 @@ class LoadClip(plugin.NukeLoader):
data_imprint[k] = version
elif k == 'colorspace':
colorspace = repre["data"].get(k)
colorspace = representation["data"].get(k)
colorspace = colorspace or version_data.get(k)
data_imprint["db_colorspace"] = colorspace
if used_colorspace:
@ -206,6 +210,20 @@ class LoadClip(plugin.NukeLoader):
def switch(self, container, representation):
self.update(container, representation)
def _representation_with_hash_in_frame(self, representation):
"""Convert frame key value to padded hash
Args:
representation (dict): representation data
Returns:
dict: altered representation data
"""
representation = deepcopy(representation)
frame = representation["context"]["frame"]
representation["context"]["frame"] = "#" * len(str(frame))
return representation
def update(self, container, representation):
"""Update the Loader's path
@ -218,7 +236,13 @@ class LoadClip(plugin.NukeLoader):
is_sequence = len(representation["files"]) > 1
read_node = nuke.toNode(container['objectName'])
file = get_representation_path(representation).replace("\\", "/")
if is_sequence:
representation = self._representation_with_hash_in_frame(
representation
)
filepath = get_representation_path(representation).replace("\\", "/")
self.log.debug("_ filepath: {}".format(filepath))
start_at_workfile = "start at" in read_node['frame_mode'].value()
@ -233,8 +257,6 @@ class LoadClip(plugin.NukeLoader):
version_data = version_doc.get("data", {})
repre_id = representation["_id"]
repre_cont = representation["context"]
# colorspace profile
colorspace = representation["data"].get("colorspace")
colorspace = colorspace or version_data.get("colorspace")
@ -251,14 +273,8 @@ class LoadClip(plugin.NukeLoader):
duration = last - first
first = 1
last = first + duration
elif "#" not in file:
frame = repre_cont.get("frame")
assert frame, "Representation is not sequence"
padding = len(frame)
file = file.replace(frame, "#" * padding)
if not file:
if not filepath:
self.log.warning(
"Representation id `{}` is failing to load".format(repre_id))
return
@ -266,14 +282,14 @@ class LoadClip(plugin.NukeLoader):
read_name = self._get_node_name(representation)
read_node["name"].setValue(read_name)
read_node["file"].setValue(file)
read_node["file"].setValue(filepath)
# to avoid multiple undo steps for rest of process
# we will switch off undo-ing
with viewer_update_and_undo_stop():
used_colorspace = self._set_colorspace(
read_node, version_data, representation["data"],
path=file)
path=filepath)
self._set_range_to_node(read_node, first, last, start_at_workfile)
@ -345,8 +361,10 @@ class LoadClip(plugin.NukeLoader):
time_warp_nodes = version_data.get('timewarps', [])
last_node = None
source_id = self.get_container_id(parent_node)
self.log.info("__ source_id: {}".format(source_id))
self.log.info("__ members: {}".format(self.get_members(parent_node)))
self.log.debug("__ source_id: {}".format(source_id))
self.log.debug("__ members: {}".format(
self.get_members(parent_node)))
dependent_nodes = self.clear_members(parent_node)
with maintained_selection():
@ -425,7 +443,7 @@ class LoadClip(plugin.NukeLoader):
colorspace = repre_data.get("colorspace")
colorspace = colorspace or version_data.get("colorspace")
# colorspace from `project_anatomy/imageio/nuke/regexInputs`
# colorspace from `project_settings/nuke/imageio/regexInputs`
iio_colorspace = get_imageio_input_colorspace(path)
# Set colorspace defined in version data

View file

@ -89,6 +89,9 @@ class LoadEffects(load.LoaderPlugin):
"Group",
"name {}_1".format(object_name))
# hide property panel
GN.hideControlPanel()
# adding content to the group node
with GN:
pre_node = nuke.createNode("Input")

View file

@ -90,6 +90,9 @@ class LoadEffectsInputProcess(load.LoaderPlugin):
"Group",
"name {}_1".format(object_name))
# hide property panel
GN.hideControlPanel()
# adding content to the group node
with GN:
pre_node = nuke.createNode("Input")

View file

@ -62,7 +62,9 @@ class LoadImage(load.LoaderPlugin):
def load(self, context, name, namespace, options):
self.log.info("__ options: `{}`".format(options))
frame_number = options.get("frame_number", 1)
frame_number = options.get(
"frame_number", int(nuke.root()["first_frame"].getValue())
)
version = context['version']
version_data = version.get("data", {})
@ -112,6 +114,10 @@ class LoadImage(load.LoaderPlugin):
r = nuke.createNode(
"Read",
"name {}".format(read_name))
# hide property panel
r.hideControlPanel()
r["file"].setValue(file)
# Set colorspace defined in version data

View file

@ -63,6 +63,10 @@ class AlembicModelLoader(load.LoaderPlugin):
object_name, file),
inpanel=False
)
# hide property panel
model_node.hideControlPanel()
model_node.forceValidate()
# Ensure all items are imported and selected.

View file

@ -71,6 +71,9 @@ class LinkAsGroup(load.LoaderPlugin):
"Precomp",
"file {}".format(file))
# hide property panel
P.hideControlPanel()
# Set colorspace defined in version data
colorspace = context["version"]["data"].get("colorspace", None)
self.log.info("colorspace: {}\n".format(colorspace))

Some files were not shown because too many files have changed in this diff Show more