Merge branch 'develop' into feature/data_exchange-3dmax_rendering_deadline

This commit is contained in:
Kayla Man 2023-02-17 19:47:02 +08:00
commit cf377f1a52
174 changed files with 4223 additions and 2635 deletions

View file

@ -1,19 +0,0 @@
name: Automate Projects
on:
issues:
types: [opened, labeled]
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
jobs:
assign_one_project:
runs-on: ubuntu-latest
name: Assign to One Project
steps:
- name: Assign NEW bugs to triage
uses: srggrs/assign-one-project-github-action@1.2.0
if: contains(github.event.issue.labels.*.name, 'bug')
with:
project: 'https://github.com/pypeclub/pype/projects/2'
column_name: 'Needs triage'

View file

@ -13,7 +13,7 @@ jobs:
if: github.event.pull_request.milestone == null
uses: zoispag/action-assign-milestone@v1
with:
repo-token: "${{ secrets.GITHUB_TOKEN }}"
repo-token: "${{ secrets.YNPUT_BOT_TOKEN }}"
milestone: 'next-minor'
run_if_develop:
@ -24,5 +24,5 @@ jobs:
if: github.event.pull_request.milestone == null
uses: zoispag/action-assign-milestone@v1
with:
repo-token: "${{ secrets.GITHUB_TOKEN }}"
milestone: 'next-patch'
repo-token: "${{ secrets.YNPUT_BOT_TOKEN }}"
milestone: 'next-patch'

View file

@ -12,7 +12,7 @@ jobs:
uses: "WyriHaximus/github-action-get-milestones@master"
id: milestones
env:
GITHUB_TOKEN: "${{ secrets.GITHUB_TOKEN }}"
GITHUB_TOKEN: "${{ secrets.YNPUT_BOT_TOKEN }}"
- run: printf "name=number::%s" $(printenv MILESTONES | jq --arg MILESTONE $(printenv MILESTONE) '.[] | select(.title == $MILESTONE) | .number')
id: querymilestone
@ -31,7 +31,7 @@ jobs:
with:
title: 'next-patch'
env:
GITHUB_TOKEN: "${{ secrets.GITHUB_TOKEN }}"
GITHUB_TOKEN: "${{ secrets.YNPUT_BOT_TOKEN }}"
generate-next-minor:
runs-on: ubuntu-latest
@ -40,7 +40,7 @@ jobs:
uses: "WyriHaximus/github-action-get-milestones@master"
id: milestones
env:
GITHUB_TOKEN: "${{ secrets.GITHUB_TOKEN }}"
GITHUB_TOKEN: "${{ secrets.YNPUT_BOT_TOKEN }}"
- run: printf "name=number::%s" $(printenv MILESTONES | jq --arg MILESTONE $(printenv MILESTONE) '.[] | select(.title == $MILESTONE) | .number')
id: querymilestone
@ -59,4 +59,4 @@ jobs:
with:
title: 'next-minor'
env:
GITHUB_TOKEN: "${{ secrets.GITHUB_TOKEN }}"
GITHUB_TOKEN: "${{ secrets.YNPUT_BOT_TOKEN }}"

View file

@ -14,10 +14,10 @@ jobs:
- name: 🚛 Checkout Code
uses: actions/checkout@v2
- name: 🔨 Merge develop to main
- name: 🔨 Merge develop to main
uses: everlytic/branch-merge@1.1.0
with:
github_token: ${{ secrets.ADMIN_TOKEN }}
github_token: ${{ secrets.YNPUT_BOT_TOKEN }}
source_ref: 'develop'
target_branch: 'main'
commit_message_template: '[Automated] Merged {source_ref} into {target_branch}'
@ -26,4 +26,4 @@ jobs:
uses: benc-uk/workflow-dispatch@v1
with:
workflow: Nightly Prerelease
token: ${{ secrets.ADMIN_TOKEN }}
token: ${{ secrets.YNPUT_BOT_TOKEN }}

View file

@ -25,43 +25,15 @@ jobs:
- name: 🔎 Determine next version type
id: version_type
run: |
TYPE=$(python ./tools/ci_tools.py --bump --github_token ${{ secrets.GITHUB_TOKEN }})
echo ::set-output name=type::$TYPE
TYPE=$(python ./tools/ci_tools.py --bump --github_token ${{ secrets.YNPUT_BOT_TOKEN }})
echo "type=${TYPE}" >> $GITHUB_OUTPUT
- name: 💉 Inject new version into files
id: version
if: steps.version_type.outputs.type != 'skip'
run: |
RESULT=$(python ./tools/ci_tools.py --nightly --github_token ${{ secrets.GITHUB_TOKEN }})
echo ::set-output name=next_tag::$RESULT
# - name: "✏️ Generate full changelog"
# if: steps.version_type.outputs.type != 'skip'
# id: generate-full-changelog
# uses: heinrichreimer/github-changelog-generator-action@v2.3
# with:
# token: ${{ secrets.ADMIN_TOKEN }}
# addSections: '{"documentation":{"prefix":"### 📖 Documentation","labels":["type: documentation"]},"tests":{"prefix":"### ✅ Testing","labels":["tests"]},"feature":{"prefix":"**🆕 New features**", "labels":["type: feature"]},"breaking":{"prefix":"**💥 Breaking**", "labels":["breaking"]},"enhancements":{"prefix":"**🚀 Enhancements**", "labels":["type: enhancement"]},"bugs":{"prefix":"**🐛 Bug fixes**", "labels":["type: bug"]},"deprecated":{"prefix":"**⚠️ Deprecations**", "labels":["depreciated"]}, "refactor":{"prefix":"**🔀 Refactored code**", "labels":["refactor"]}}'
# issues: false
# issuesWoLabels: false
# sinceTag: "3.12.0"
# maxIssues: 100
# pullRequests: true
# prWoLabels: false
# author: false
# unreleased: true
# compareLink: true
# stripGeneratorNotice: true
# verbose: true
# unreleasedLabel: ${{ steps.version.outputs.next_tag }}
# excludeTagsRegex: "CI/.+"
# releaseBranch: "main"
- name: "🖨️ Print changelog to console"
if: steps.version_type.outputs.type != 'skip'
run: cat CHANGELOG.md
NEW_VERSION_TAG=$(python ./tools/ci_tools.py --nightly --github_token ${{ secrets.YNPUT_BOT_TOKEN }})
echo "next_tag=${NEW_VERSION_TAG}" >> $GITHUB_OUTPUT
- name: 💾 Commit and Tag
id: git_commit
@ -80,7 +52,7 @@ jobs:
- name: Push to protected main branch
uses: CasperWA/push-protected@v2.10.0
with:
token: ${{ secrets.ADMIN_TOKEN }}
token: ${{ secrets.YNPUT_BOT_TOKEN }}
branch: main
tags: true
unprotect_reviews: true
@ -89,7 +61,7 @@ jobs:
uses: everlytic/branch-merge@1.1.0
if: steps.version_type.outputs.type != 'skip'
with:
github_token: ${{ secrets.ADMIN_TOKEN }}
github_token: ${{ secrets.YNPUT_BOT_TOKEN }}
source_ref: 'main'
target_branch: 'develop'
commit_message_template: '[Automated] Merged {source_ref} into {target_branch}'

View file

@ -26,34 +26,12 @@ jobs:
- name: 💉 Inject new version into files
id: version
run: |
echo ::set-output name=current_version::${GITHUB_REF#refs/*/}
RESULT=$(python ./tools/ci_tools.py --finalize ${GITHUB_REF#refs/*/})
LASTRELEASE=$(python ./tools/ci_tools.py --lastversion release)
NEW_VERSION=$(python ./tools/ci_tools.py --finalize ${GITHUB_REF#refs/*/})
LAST_VERSION=$(python ./tools/ci_tools.py --lastversion release)
echo ::set-output name=last_release::$LASTRELEASE
echo ::set-output name=release_tag::$RESULT
# - name: "✏️ Generate full changelog"
# if: steps.version.outputs.release_tag != 'skip'
# id: generate-full-changelog
# uses: heinrichreimer/github-changelog-generator-action@v2.3
# with:
# token: ${{ secrets.ADMIN_TOKEN }}
# addSections: '{"documentation":{"prefix":"### 📖 Documentation","labels":["type: documentation"]},"tests":{"prefix":"### ✅ Testing","labels":["tests"]},"feature":{"prefix":"**🆕 New features**", "labels":["type: feature"]},"breaking":{"prefix":"**💥 Breaking**", "labels":["breaking"]},"enhancements":{"prefix":"**🚀 Enhancements**", "labels":["type: enhancement"]},"bugs":{"prefix":"**🐛 Bug fixes**", "labels":["type: bug"]},"deprecated":{"prefix":"**⚠️ Deprecations**", "labels":["depreciated"]}, "refactor":{"prefix":"**🔀 Refactored code**", "labels":["refactor"]}}'
# issues: false
# issuesWoLabels: false
# sinceTag: "3.12.0"
# maxIssues: 100
# pullRequests: true
# prWoLabels: false
# author: false
# unreleased: true
# compareLink: true
# stripGeneratorNotice: true
# verbose: true
# futureRelease: ${{ steps.version.outputs.release_tag }}
# excludeTagsRegex: "CI/.+"
# releaseBranch: "main"
echo "current_version=${GITHUB_REF#refs/*/}" >> $GITHUB_OUTPUT
echo "last_release=${LAST_VERSION}" >> $GITHUB_OUTPUT
echo "release_tag=${NEW_VERSION}" >> $GITHUB_OUTPUT
- name: 💾 Commit and Tag
id: git_commit
@ -70,43 +48,17 @@ jobs:
if: steps.version.outputs.release_tag != 'skip'
uses: CasperWA/push-protected@v2.10.0
with:
token: ${{ secrets.ADMIN_TOKEN }}
token: ${{ secrets.YNPUT_BOT_TOKEN }}
branch: main
tags: true
unprotect_reviews: true
- name: "✏️ Generate last changelog"
if: steps.version.outputs.release_tag != 'skip'
id: generate-last-changelog
uses: heinrichreimer/github-changelog-generator-action@v2.2
with:
token: ${{ secrets.ADMIN_TOKEN }}
addSections: '{"documentation":{"prefix":"### 📖 Documentation","labels":["type: documentation"]},"tests":{"prefix":"### ✅ Testing","labels":["tests"]},"feature":{"prefix":"**🆕 New features**", "labels":["type: feature"]},"breaking":{"prefix":"**💥 Breaking**", "labels":["breaking"]},"enhancements":{"prefix":"**🚀 Enhancements**", "labels":["type: enhancement"]},"bugs":{"prefix":"**🐛 Bug fixes**", "labels":["type: bug"]},"deprecated":{"prefix":"**⚠️ Deprecations**", "labels":["depreciated"]}, "refactor":{"prefix":"**🔀 Refactored code**", "labels":["refactor"]}}'
issues: false
issuesWoLabels: false
sinceTag: ${{ steps.version.outputs.last_release }}
maxIssues: 100
pullRequests: true
prWoLabels: false
author: false
unreleased: true
compareLink: true
stripGeneratorNotice: true
verbose: true
futureRelease: ${{ steps.version.outputs.release_tag }}
excludeTagsRegex: "CI/.+"
releaseBranch: "main"
stripHeaders: true
base: 'none'
- name: 🚀 Github Release
if: steps.version.outputs.release_tag != 'skip'
uses: ncipollo/release-action@v1
with:
body: ${{ steps.generate-last-changelog.outputs.changelog }}
tag: ${{ steps.version.outputs.release_tag }}
token: ${{ secrets.ADMIN_TOKEN }}
token: ${{ secrets.YNPUT_BOT_TOKEN }}
- name: ☠ Delete Pre-release
if: steps.version.outputs.release_tag != 'skip'
@ -118,7 +70,7 @@ jobs:
if: steps.version.outputs.release_tag != 'skip'
uses: everlytic/branch-merge@1.1.0
with:
github_token: ${{ secrets.ADMIN_TOKEN }}
github_token: ${{ secrets.YNPUT_BOT_TOKEN }}
source_ref: 'main'
target_branch: 'develop'
commit_message_template: '[Automated] Merged release {source_ref} into {target_branch}'

View file

@ -28,7 +28,7 @@ jobs:
uses: actions/setup-python@v2
with:
python-version: ${{ matrix.python-version }}
- name: 🧵 Install Requirements
shell: pwsh
run: |
@ -64,27 +64,3 @@ jobs:
run: |
export SKIP_THIRD_PARTY_VALIDATION="1"
./tools/build.sh
# MacOS-latest:
# runs-on: macos-latest
# strategy:
# matrix:
# python-version: [3.9]
# steps:
# - name: 🚛 Checkout Code
# uses: actions/checkout@v2
# - name: Set up Python
# uses: actions/setup-python@v2
# with:
# python-version: ${{ matrix.python-version }}
# - name: 🧵 Install Requirements
# run: |
# ./tools/create_env.sh
# - name: 🔨 Build
# run: |
# ./tools/build.sh

View file

@ -9,4 +9,4 @@ repos:
- id: check-yaml
- id: check-added-large-files
- id: no-commit-to-branch
args: [ '--pattern', '^(?!((release|enhancement|feature|bugfix|documentation|tests|local|chore)\/[a-zA-Z0-9\-]+)$).*' ]
args: [ '--pattern', '^(?!((release|enhancement|feature|bugfix|documentation|tests|local|chore)\/[a-zA-Z0-9\-_]+)$).*' ]

View file

@ -1,112 +0,0 @@
from .settings import (
get_system_settings,
get_project_settings,
get_current_project_settings,
get_anatomy_settings,
SystemSettings,
ProjectSettings
)
from .lib import (
PypeLogger,
Logger,
Anatomy,
execute,
run_subprocess,
version_up,
get_asset,
get_workdir_data,
get_version_from_path,
get_last_version_from_path,
get_app_environments_for_context,
source_hash,
get_latest_version,
get_local_site_id,
change_openpype_mongo_url,
create_project_folders,
get_project_basic_paths
)
from .lib.mongo import (
get_default_components
)
from .lib.applications import (
ApplicationManager
)
from .lib.avalon_context import (
BuildWorkfile
)
from . import resources
from .plugin import (
Extractor,
ValidatePipelineOrder,
ValidateContentsOrder,
ValidateSceneOrder,
ValidateMeshOrder,
)
# temporary fix, might
from .action import (
get_errored_instances_from_context,
RepairAction,
RepairContextAction
)
__all__ = [
"get_system_settings",
"get_project_settings",
"get_current_project_settings",
"get_anatomy_settings",
"get_project_basic_paths",
"SystemSettings",
"ProjectSettings",
"PypeLogger",
"Logger",
"Anatomy",
"execute",
"get_default_components",
"ApplicationManager",
"BuildWorkfile",
# Resources
"resources",
# plugin classes
"Extractor",
# ordering
"ValidatePipelineOrder",
"ValidateContentsOrder",
"ValidateSceneOrder",
"ValidateMeshOrder",
# action
"get_errored_instances_from_context",
"RepairAction",
"RepairContextAction",
# get contextual data
"version_up",
"get_asset",
"get_workdir_data",
"get_version_from_path",
"get_last_version_from_path",
"get_app_environments_for_context",
"source_hash",
"run_subprocess",
"get_latest_version",
"get_local_site_id",
"change_openpype_mongo_url",
"get_project_basic_paths",
"create_project_folders"
]

View file

@ -164,7 +164,6 @@ def get_linked_representation_id(
# Recursive graph lookup for inputs
{"$graphLookup": graph_lookup}
]
conn = get_project_connection(project_name)
result = conn.aggregate(query_pipeline)
referenced_version_ids = _process_referenced_pipeline_result(
@ -213,7 +212,7 @@ def _process_referenced_pipeline_result(result, link_type):
for output in sorted(outputs_recursive, key=lambda o: o["depth"]):
output_links = output.get("data", {}).get("inputLinks")
if not output_links:
if not output_links and output["type"] != "hero_version":
continue
# Leaf
@ -232,6 +231,9 @@ def _process_referenced_pipeline_result(result, link_type):
def _filter_input_links(input_links, link_type, correctly_linked_ids):
if not input_links: # to handle hero versions
return
for input_link in input_links:
if link_type and input_link["type"] != link_type:
continue

View file

@ -1,4 +1,5 @@
import os
from openpype.lib import PreLaunchHook
@ -40,5 +41,13 @@ class AddLastWorkfileToLaunchArgs(PreLaunchHook):
self.log.info("Current context does not have any workfile yet.")
return
# Determine whether to open workfile post initialization.
if self.host_name == "maya":
key = "open_workfile_post_initialization"
if self.data["project_settings"]["maya"][key]:
self.log.debug("Opening workfile post initialization.")
self.data["env"]["OPENPYPE_" + key.upper()] = "1"
return
# Add path to workfile to arguments
self.launch_context.launch_args.append(last_workfile)

View file

@ -8,6 +8,7 @@ exists is used.
import os
from abc import ABCMeta, abstractmethod
import platform
import six
@ -187,11 +188,19 @@ class HostDirmap(object):
self.log.debug("local overrides {}".format(active_overrides))
self.log.debug("remote overrides {}".format(remote_overrides))
current_platform = platform.system().lower()
for root_name, active_site_dir in active_overrides.items():
remote_site_dir = (
remote_overrides.get(root_name)
or sync_settings["sites"][remote_site]["root"][root_name]
)
if isinstance(remote_site_dir, dict):
remote_site_dir = remote_site_dir.get(current_platform)
if not remote_site_dir:
continue
if os.path.isdir(active_site_dir):
if "destination-path" not in mapping:
mapping["destination-path"] = []

View file

@ -1,3 +1,4 @@
import os
import logging
import contextlib
from abc import ABCMeta, abstractproperty
@ -100,6 +101,30 @@ class HostBase(object):
pass
def get_current_project_name(self):
"""
Returns:
Union[str, None]: Current project name.
"""
return os.environ.get("AVALON_PROJECT")
def get_current_asset_name(self):
"""
Returns:
Union[str, None]: Current asset name.
"""
return os.environ.get("AVALON_ASSET")
def get_current_task_name(self):
"""
Returns:
Union[str, None]: Current task name.
"""
return os.environ.get("AVALON_TASK")
def get_current_context(self):
"""Get current context information.
@ -111,19 +136,14 @@ class HostBase(object):
Default implementation returns values from 'legacy_io.Session'.
Returns:
dict: Context with 3 keys 'project_name', 'asset_name' and
'task_name'. All of them can be 'None'.
Dict[str, Union[str, None]]: Context with 3 keys 'project_name',
'asset_name' and 'task_name'. All of them can be 'None'.
"""
from openpype.pipeline import legacy_io
if legacy_io.is_installed():
legacy_io.install()
return {
"project_name": legacy_io.Session["AVALON_PROJECT"],
"asset_name": legacy_io.Session["AVALON_ASSET"],
"task_name": legacy_io.Session["AVALON_TASK"]
"project_name": self.get_current_project_name(),
"asset_name": self.get_current_asset_name(),
"task_name": self.get_current_task_name()
}
def get_context_title(self):

View file

@ -6,14 +6,19 @@ from openpype.hosts.aftereffects import api
from openpype.pipeline import (
Creator,
CreatedInstance,
CreatorError,
legacy_io,
CreatorError
)
from openpype.hosts.aftereffects.api.pipeline import cache_and_get_instances
from openpype.lib import prepare_template_data
from openpype.pipeline.create import SUBSET_NAME_ALLOWED_SYMBOLS
class RenderCreator(Creator):
"""Creates 'render' instance for publishing.
Result of 'render' instance is video or sequence of images for particular
composition based of configuration in its RenderQueue.
"""
identifier = "render"
label = "Render"
family = "render"
@ -28,45 +33,6 @@ class RenderCreator(Creator):
["RenderCreator"]
["defaults"])
def get_icon(self):
return resources.get_openpype_splash_filepath()
def collect_instances(self):
for instance_data in cache_and_get_instances(self):
# legacy instances have family=='render' or 'renderLocal', use them
creator_id = (instance_data.get("creator_identifier") or
instance_data.get("family", '').replace("Local", ''))
if creator_id == self.identifier:
instance_data = self._handle_legacy(instance_data)
instance = CreatedInstance.from_existing(
instance_data, self
)
self._add_instance_to_context(instance)
def update_instances(self, update_list):
for created_inst, _changes in update_list:
api.get_stub().imprint(created_inst.get("instance_id"),
created_inst.data_to_store())
subset_change = _changes.get("subset")
if subset_change:
api.get_stub().rename_item(created_inst.data["members"][0],
subset_change[1])
def remove_instances(self, instances):
for instance in instances:
self._remove_instance_from_context(instance)
self.host.remove_instance(instance)
subset = instance.data["subset"]
comp_id = instance.data["members"][0]
comp = api.get_stub().get_item(comp_id)
if comp:
new_comp_name = comp.name.replace(subset, '')
if not new_comp_name:
new_comp_name = "dummyCompName"
api.get_stub().rename_item(comp_id,
new_comp_name)
def create(self, subset_name_from_ui, data, pre_create_data):
stub = api.get_stub() # only after After Effects is up
if pre_create_data.get("use_selection"):
@ -82,10 +48,19 @@ class RenderCreator(Creator):
"if 'useSelection' or create at least "
"one composition."
)
use_composition_name = (pre_create_data.get("use_composition_name") or
len(comps) > 1)
for comp in comps:
if pre_create_data.get("use_composition_name"):
composition_name = comp.name
if use_composition_name:
if "{composition}" not in subset_name_from_ui.lower():
subset_name_from_ui += "{Composition}"
composition_name = re.sub(
"[^{}]+".format(SUBSET_NAME_ALLOWED_SYMBOLS),
"",
comp.name
)
dynamic_fill = prepare_template_data({"composition":
composition_name})
subset_name = subset_name_from_ui.format(**dynamic_fill)
@ -129,8 +104,72 @@ class RenderCreator(Creator):
]
return output
def get_icon(self):
return resources.get_openpype_splash_filepath()
def collect_instances(self):
for instance_data in cache_and_get_instances(self):
# legacy instances have family=='render' or 'renderLocal', use them
creator_id = (instance_data.get("creator_identifier") or
instance_data.get("family", '').replace("Local", ''))
if creator_id == self.identifier:
instance_data = self._handle_legacy(instance_data)
instance = CreatedInstance.from_existing(
instance_data, self
)
self._add_instance_to_context(instance)
def update_instances(self, update_list):
for created_inst, _changes in update_list:
api.get_stub().imprint(created_inst.get("instance_id"),
created_inst.data_to_store())
subset_change = _changes.get("subset")
if subset_change:
api.get_stub().rename_item(created_inst.data["members"][0],
subset_change.new_value)
def remove_instances(self, instances):
for instance in instances:
self._remove_instance_from_context(instance)
self.host.remove_instance(instance)
subset = instance.data["subset"]
comp_id = instance.data["members"][0]
comp = api.get_stub().get_item(comp_id)
if comp:
new_comp_name = comp.name.replace(subset, '')
if not new_comp_name:
new_comp_name = "dummyCompName"
api.get_stub().rename_item(comp_id,
new_comp_name)
def get_detail_description(self):
return """Creator for Render instances"""
return """Creator for Render instances
Main publishable item in AfterEffects will be of `render` family.
Result of this item (instance) is picture sequence or video that could
be a final delivery product or loaded and used in another DCCs.
Select single composition and create instance of 'render' family or
turn off 'Use selection' to create instance for all compositions.
'Use composition name in subset' allows to explicitly add composition
name into created subset name.
Position of composition name could be set in
`project_settings/global/tools/creator/subset_name_profiles` with some
form of '{composition}' placeholder.
Composition name will be used implicitly if multiple composition should
be handled at same time.
If {composition} placeholder is not us 'subset_name_profiles'
composition name will be capitalized and set at the end of subset name
if necessary.
If composition name should be used, it will be cleaned up of characters
that would cause an issue in published file names.
"""
def get_dynamic_data(self, variant, task_name, asset_doc,
project_name, host_name, instance):
@ -155,7 +194,7 @@ class RenderCreator(Creator):
instance_data.pop("uuid")
if not instance_data.get("task"):
instance_data["task"] = legacy_io.Session.get("AVALON_TASK")
instance_data["task"] = self.create_context.get_current_task_name()
if not instance_data.get("creator_attributes"):
is_old_farm = instance_data["family"] != "renderLocal"

View file

@ -2,8 +2,7 @@ import openpype.hosts.aftereffects.api as api
from openpype.client import get_asset_by_name
from openpype.pipeline import (
AutoCreator,
CreatedInstance,
legacy_io,
CreatedInstance
)
from openpype.hosts.aftereffects.api.pipeline import cache_and_get_instances
@ -38,10 +37,11 @@ class AEWorkfileCreator(AutoCreator):
existing_instance = instance
break
project_name = legacy_io.Session["AVALON_PROJECT"]
asset_name = legacy_io.Session["AVALON_ASSET"]
task_name = legacy_io.Session["AVALON_TASK"]
host_name = legacy_io.Session["AVALON_APP"]
context = self.create_context
project_name = context.get_current_project_name()
asset_name = context.get_current_asset_name()
task_name = context.get_current_task_name()
host_name = context.host_name
if existing_instance is None:
asset_doc = get_asset_by_name(project_name, asset_name)

View file

@ -44,7 +44,7 @@ class AppendBlendLoader(plugin.AssetLoader):
"""
representations = ["blend"]
families = ["*"]
families = ["workfile"]
label = "Append Workfile"
order = 9
@ -68,7 +68,7 @@ class ImportBlendLoader(plugin.AssetLoader):
"""
representations = ["blend"]
families = ["*"]
families = ["workfile"]
label = "Import Workfile"
order = 9

View file

@ -19,7 +19,6 @@ class ValidateCameraZeroKeyframe(pyblish.api.InstancePlugin):
order = ValidateContentsOrder
hosts = ["blender"]
families = ["camera"]
version = (0, 1, 0)
label = "Zero Keyframe"
actions = [openpype.hosts.blender.api.action.SelectInvalidAction]

View file

@ -14,7 +14,6 @@ class ValidateMeshHasUvs(pyblish.api.InstancePlugin):
order = ValidateContentsOrder
hosts = ["blender"]
families = ["model"]
category = "geometry"
label = "Mesh Has UV's"
actions = [openpype.hosts.blender.api.action.SelectInvalidAction]
optional = True

View file

@ -14,7 +14,6 @@ class ValidateMeshNoNegativeScale(pyblish.api.Validator):
order = ValidateContentsOrder
hosts = ["blender"]
families = ["model"]
category = "geometry"
label = "Mesh No Negative Scale"
actions = [openpype.hosts.blender.api.action.SelectInvalidAction]

View file

@ -19,7 +19,6 @@ class ValidateNoColonsInName(pyblish.api.InstancePlugin):
order = ValidateContentsOrder
hosts = ["blender"]
families = ["model", "rig"]
version = (0, 1, 0)
label = "No Colons in names"
actions = [openpype.hosts.blender.api.action.SelectInvalidAction]

View file

@ -21,7 +21,6 @@ class ValidateTransformZero(pyblish.api.InstancePlugin):
order = ValidateContentsOrder
hosts = ["blender"]
families = ["model"]
version = (0, 1, 0)
label = "Transform Zero"
actions = [openpype.hosts.blender.api.action.SelectInvalidAction]

View file

@ -143,6 +143,9 @@ class ExtractSubsetResources(publish.Extractor):
# create staging dir path
staging_dir = self.staging_dir(instance)
# append staging dir for later cleanup
instance.context.data["cleanupFullPaths"].append(staging_dir)
# add default preset type for thumbnail and reviewable video
# update them with settings and override in case the same
# are found in there
@ -548,30 +551,3 @@ class ExtractSubsetResources(publish.Extractor):
"Path `{}` is containing more that one clip".format(path)
)
return clips[0]
def staging_dir(self, instance):
"""Provide a temporary directory in which to store extracted files
Upon calling this method the staging directory is stored inside
the instance.data['stagingDir']
"""
staging_dir = instance.data.get('stagingDir', None)
openpype_temp_dir = os.getenv("OPENPYPE_TEMP_DIR")
if not staging_dir:
if openpype_temp_dir and os.path.exists(openpype_temp_dir):
staging_dir = os.path.normpath(
tempfile.mkdtemp(
prefix="pyblish_tmp_",
dir=openpype_temp_dir
)
)
else:
staging_dir = os.path.normpath(
tempfile.mkdtemp(prefix="pyblish_tmp_")
)
instance.data['stagingDir'] = staging_dir
instance.context.data["cleanupFullPaths"].append(staging_dir)
return staging_dir

View file

@ -108,9 +108,9 @@ class ExtractRender(pyblish.api.InstancePlugin):
output = process.communicate()[0]
if process.returncode != 0:
raise ValueError(output.decode("utf-8"))
raise ValueError(output.decode("utf-8", errors="backslashreplace"))
self.log.debug(output.decode("utf-8"))
self.log.debug(output.decode("utf-8", errors="backslashreplace"))
# Generate representations.
extension = collection.tail[1:]

View file

@ -113,7 +113,7 @@ class HoudiniCreatorBase(object):
Dict[str, Any]: Shared data dictionary.
"""
if shared_data.get("houdini_cached_subsets") is not None:
if shared_data.get("houdini_cached_subsets") is None:
cache = dict()
cache_legacy = dict()
@ -225,12 +225,12 @@ class HoudiniCreator(NewCreator, HoudiniCreatorBase):
self._add_instance_to_context(created_instance)
def update_instances(self, update_list):
for created_inst, _changes in update_list:
for created_inst, changes in update_list:
instance_node = hou.node(created_inst.get("instance_node"))
new_values = {
key: new_value
for key, (_old_value, new_value) in _changes.items()
key: changes[key].new_value
for key in changes.changed_keys
}
imprint(
instance_node,

View file

@ -1,4 +1,5 @@
import os
import re
import logging
import platform
@ -66,7 +67,7 @@ def generate_shelves():
)
continue
mandatory_attributes = {'name', 'script'}
mandatory_attributes = {'label', 'script'}
for tool_definition in shelf_definition.get('tools_list'):
# We verify that the name and script attibutes of the tool
# are set
@ -152,31 +153,32 @@ def get_or_create_tool(tool_definition, shelf):
Returns:
hou.Tool: The tool updated or the new one
"""
existing_tools = shelf.tools()
tool_label = tool_definition.get('label')
tool_label = tool_definition.get("label")
if not tool_label:
log.warning("Skipped shelf without label")
return
script_path = tool_definition["script"]
if not script_path or not os.path.exists(script_path):
log.warning("This path doesn't exist - {}".format(script_path))
return
existing_tools = shelf.tools()
existing_tool = next(
(tool for tool in existing_tools if tool.label() == tool_label),
None
)
with open(script_path) as stream:
script = stream.read()
tool_definition["script"] = script
if existing_tool:
tool_definition.pop('name', None)
tool_definition.pop('label', None)
tool_definition.pop("label", None)
existing_tool.setData(**tool_definition)
return existing_tool
tool_name = tool_label.replace(' ', '_').lower()
if not os.path.exists(tool_definition['script']):
log.warning(
"This path doesn't exist - {}".format(tool_definition['script'])
)
return
with open(tool_definition['script']) as f:
script = f.read()
tool_definition.update({'script': script})
new_tool = hou.shelves.newTool(name=tool_name, **tool_definition)
return new_tool
tool_name = re.sub(r"[^\w\d]+", "_", tool_label).lower()
return hou.shelves.newTool(name=tool_name, **tool_definition)

View file

@ -12,6 +12,11 @@ class MaxAddon(OpenPypeModule, IHostAddon):
def initialize(self, module_settings):
self.enabled = True
def add_implementation_envs(self, env, _app):
# Remove auto screen scale factor for Qt
# - let 3dsmax decide it's value
env.pop("QT_AUTO_SCREEN_SCALE_FACTOR", None)
def get_workfile_extensions(self):
return [".max"]

View file

@ -78,12 +78,12 @@ class MaxCreator(Creator, MaxCreatorBase):
self._add_instance_to_context(created_instance)
def update_instances(self, update_list):
for created_inst, _changes in update_list:
for created_inst, changes in update_list:
instance_node = created_inst.get("instance_node")
new_values = {
key: new_value
for key, (_old_value, new_value) in _changes.items()
key: changes[key].new_value
for key in changes.changed_keys
}
imprint(
instance_node,

View file

@ -1,4 +1,13 @@
# -*- coding: utf-8 -*-
import os
import sys
# this might happen in some 3dsmax version where PYTHONPATH isn't added
# to sys.path automatically
for path in os.environ["PYTHONPATH"].split(os.pathsep):
if path and path not in sys.path:
sys.path.append(path)
from openpype.hosts.max.api import MaxHost
from openpype.pipeline import install_host

View file

@ -4,6 +4,7 @@ from maya import cmds
from openpype.client import get_asset_by_name, get_project
from openpype.pipeline import legacy_io
from . import lib
class ToolWindows:
@ -59,25 +60,11 @@ def edit_shader_definitions():
def reset_frame_range():
"""Set frame range to current asset"""
# Set FPS first
fps = {15: 'game',
24: 'film',
25: 'pal',
30: 'ntsc',
48: 'show',
50: 'palf',
60: 'ntscf',
23.98: '23.976fps',
23.976: '23.976fps',
29.97: '29.97fps',
47.952: '47.952fps',
47.95: '47.952fps',
59.94: '59.94fps',
44100: '44100fps',
48000: '48000fps'
}.get(float(legacy_io.Session.get("AVALON_FPS", 25)), "pal")
cmds.currentUnit(time=fps)
fps = lib.convert_to_maya_fps(
float(legacy_io.Session.get("AVALON_FPS", 25))
)
lib.set_scene_fps(fps)
# Set frame start/end
project_name = legacy_io.active_project()

View file

@ -5,6 +5,7 @@ import sys
import platform
import uuid
import math
import re
import json
import logging
@ -254,11 +255,6 @@ def read(node):
return data
def _get_mel_global(name):
"""Return the value of a mel global variable"""
return mel.eval("$%s = $%s;" % (name, name))
def matrix_equals(a, b, tolerance=1e-10):
"""
Compares two matrices with an imperfection tolerance
@ -624,15 +620,15 @@ class delete_after(object):
cmds.delete(self._nodes)
def get_current_renderlayer():
return cmds.editRenderLayerGlobals(query=True, currentRenderLayer=True)
def get_renderer(layer):
with renderlayer(layer):
return cmds.getAttr("defaultRenderGlobals.currentRenderer")
def get_current_renderlayer():
return cmds.editRenderLayerGlobals(query=True, currentRenderLayer=True)
@contextlib.contextmanager
def no_undo(flush=False):
"""Disable the undo queue during the context
@ -1373,27 +1369,6 @@ def set_id(node, unique_id, overwrite=False):
cmds.setAttr(attr, unique_id, type="string")
# endregion ID
def get_reference_node(path):
"""
Get the reference node when the path is found being used in a reference
Args:
path (str): the file path to check
Returns:
node (str): name of the reference node in question
"""
try:
node = cmds.file(path, query=True, referenceNode=True)
except RuntimeError:
log.debug('File is not referenced : "{}"'.format(path))
return
reference_path = cmds.referenceQuery(path, filename=True)
if os.path.normpath(path) == os.path.normpath(reference_path):
return node
def set_attribute(attribute, value, node):
"""Adjust attributes based on the value from the attribute data
@ -1995,8 +1970,6 @@ def get_id_from_sibling(node, history_only=True):
return first_id
# Project settings
def set_scene_fps(fps, update=True):
"""Set FPS from project configuration
@ -2009,28 +1982,21 @@ def set_scene_fps(fps, update=True):
"""
fps_mapping = {'15': 'game',
'24': 'film',
'25': 'pal',
'30': 'ntsc',
'48': 'show',
'50': 'palf',
'60': 'ntscf',
'23.98': '23.976fps',
'23.976': '23.976fps',
'29.97': '29.97fps',
'47.952': '47.952fps',
'47.95': '47.952fps',
'59.94': '59.94fps',
'44100': '44100fps',
'48000': '48000fps'}
# pull from mapping
# this should convert float string to float and int to int
# so 25.0 is converted to 25, but 23.98 will be still float.
dec, ipart = math.modf(fps)
if dec == 0.0:
fps = int(ipart)
fps_mapping = {
'15': 'game',
'24': 'film',
'25': 'pal',
'30': 'ntsc',
'48': 'show',
'50': 'palf',
'60': 'ntscf',
'23.976023976023978': '23.976fps',
'29.97002997002997': '29.97fps',
'47.952047952047955': '47.952fps',
'59.94005994005994': '59.94fps',
'44100': '44100fps',
'48000': '48000fps'
}
unit = fps_mapping.get(str(fps), None)
if unit is None:
@ -2150,7 +2116,9 @@ def set_context_settings():
asset_data = asset_doc.get("data", {})
# Set project fps
fps = asset_data.get("fps", project_data.get("fps", 25))
fps = convert_to_maya_fps(
asset_data.get("fps", project_data.get("fps", 25))
)
legacy_io.Session["AVALON_FPS"] = str(fps)
set_scene_fps(fps)
@ -2172,15 +2140,12 @@ def validate_fps():
"""
fps = get_current_project_asset(fields=["data.fps"])["data"]["fps"]
# TODO(antirotor): This is hack as for framerates having multiple
# decimal places. FTrack is ceiling decimal values on
# fps to two decimal places but Maya 2019+ is reporting those fps
# with much higher resolution. As we currently cannot fix Ftrack
# rounding, we have to round those numbers coming from Maya.
current_fps = float_round(mel.eval('currentTimeUnitToFPS()'), 2)
expected_fps = convert_to_maya_fps(
get_current_project_asset(fields=["data.fps"])["data"]["fps"]
)
current_fps = mel.eval('currentTimeUnitToFPS()')
fps_match = current_fps == fps
fps_match = current_fps == expected_fps
if not fps_match and not IS_HEADLESS:
from openpype.widgets import popup
@ -2189,14 +2154,19 @@ def validate_fps():
dialog = popup.PopupUpdateKeys(parent=parent)
dialog.setModal(True)
dialog.setWindowTitle("Maya scene does not match project FPS")
dialog.setMessage("Scene %i FPS does not match project %i FPS" %
(current_fps, fps))
dialog.setMessage(
"Scene {} FPS does not match project {} FPS".format(
current_fps, expected_fps
)
)
dialog.setButtonText("Fix")
# Set new text for button (add optional argument for the popup?)
toggle = dialog.widgets["toggle"]
update = toggle.isChecked()
dialog.on_clicked_state.connect(lambda: set_scene_fps(fps, update))
dialog.on_clicked_state.connect(
lambda: set_scene_fps(expected_fps, update)
)
dialog.show()
@ -3379,3 +3349,116 @@ def iter_visible_nodes_in_range(nodes, start, end):
def get_attribute_input(attr):
connections = cmds.listConnections(attr, plugs=True, destination=False)
return connections[0] if connections else None
def convert_to_maya_fps(fps):
"""Convert any fps to supported Maya framerates."""
float_framerates = [
23.976023976023978,
# WTF is 29.97 df vs fps?
29.97002997002997,
47.952047952047955,
59.94005994005994
]
# 44100 fps evaluates as 41000.0. Why? Omitting for now.
int_framerates = [
2,
3,
4,
5,
6,
8,
10,
12,
15,
16,
20,
24,
25,
30,
40,
48,
50,
60,
75,
80,
90,
100,
120,
125,
150,
200,
240,
250,
300,
375,
400,
500,
600,
750,
1200,
1500,
2000,
3000,
6000,
48000
]
# If input fps is a whole number we'll return.
if float(fps).is_integer():
# Validate fps is part of Maya's fps selection.
if fps not in int_framerates:
raise ValueError(
"Framerate \"{}\" is not supported in Maya".format(fps)
)
return fps
else:
# Differences to supported float frame rates.
differences = []
for i in float_framerates:
differences.append(abs(i - fps))
# Validate difference does not stray too far from supported framerates.
min_difference = min(differences)
min_index = differences.index(min_difference)
supported_framerate = float_framerates[min_index]
if min_difference > 0.1:
raise ValueError(
"Framerate \"{}\" strays too far from any supported framerate"
" in Maya. Closest supported framerate is \"{}\"".format(
fps, supported_framerate
)
)
return supported_framerate
def write_xgen_file(data, filepath):
"""Overwrites data in .xgen files.
Quite naive approach to mainly overwrite "xgDataPath" and "xgProjectPath".
Args:
data (dict): Dictionary of key, value. Key matches with xgen file.
For example:
{"xgDataPath": "some/path"}
filepath (string): Absolute path of .xgen file.
"""
# Generate regex lookup for line to key basically
# match any of the keys in `\t{key}\t\t`
keys = "|".join(re.escape(key) for key in data.keys())
re_keys = re.compile("^\t({})\t\t".format(keys))
lines = []
with open(filepath, "r") as f:
for line in f:
match = re_keys.match(line)
if match:
key = match.group(1)
value = data[key]
line = "\t{}\t\t{}\n".format(key, value)
lines.append(line)
with open(filepath, "w") as f:
f.writelines(lines)

View file

@ -50,7 +50,6 @@ def install():
parent="MayaWindow"
)
renderer = cmds.getAttr('defaultRenderGlobals.currentRenderer').lower()
# Create context menu
context_label = "{}, {}".format(
legacy_io.Session["AVALON_ASSET"],

View file

@ -514,6 +514,9 @@ def check_lock_on_current_file():
# add the lock file when opening the file
filepath = current_file()
# Skip if current file is 'untitled'
if not filepath:
return
if is_workfile_locked(filepath):
# add lockfile dialog
@ -680,10 +683,12 @@ def before_workfile_save(event):
def after_workfile_save(event):
workfile_name = event["filename"]
if handle_workfile_locks():
if workfile_name:
if not is_workfile_locked(workfile_name):
create_workfile_lock(workfile_name)
if (
handle_workfile_locks()
and workfile_name
and not is_workfile_locked(workfile_name)
):
create_workfile_lock(workfile_name)
class MayaDirmap(HostDirmap):

View file

@ -300,6 +300,39 @@ class ReferenceLoader(Loader):
str(representation["_id"]),
type="string")
# When an animation or pointcache gets connected to an Xgen container,
# the compound attribute "xgenContainers" gets created. When animation
# containers gets updated we also need to update the cacheFileName on
# the Xgen collection.
compound_name = "xgenContainers"
if cmds.objExists("{}.{}".format(node, compound_name)):
import xgenm
container_amount = cmds.getAttr(
"{}.{}".format(node, compound_name), size=True
)
# loop through all compound children
for i in range(container_amount):
attr = "{}.{}[{}].container".format(node, compound_name, i)
objectset = cmds.listConnections(attr)[0]
reference_node = cmds.sets(objectset, query=True)[0]
palettes = cmds.ls(
cmds.referenceQuery(reference_node, nodes=True),
type="xgmPalette"
)
for palette in palettes:
for description in xgenm.descriptions(palette):
xgenm.setAttr(
"cacheFileName",
path.replace("\\", "/"),
palette,
description,
"SplinePrimitive"
)
# Refresh UI and viewport.
de = xgenm.xgGlobal.DescriptionEditor
de.refresh("Full")
def remove(self, container):
"""Remove an existing `container` from Maya scene

View file

@ -2,7 +2,7 @@ import json
from maya import cmds
from openpype.pipeline import registered_host
from openpype.pipeline import registered_host, get_current_asset_name
from openpype.pipeline.workfile.workfile_template_builder import (
TemplateAlreadyImported,
AbstractTemplateBuilder,
@ -41,10 +41,27 @@ class MayaTemplateBuilder(AbstractTemplateBuilder):
))
cmds.sets(name=PLACEHOLDER_SET, empty=True)
cmds.file(path, i=True, returnNewNodes=True)
new_nodes = cmds.file(path, i=True, returnNewNodes=True)
cmds.setAttr(PLACEHOLDER_SET + ".hiddenInOutliner", True)
imported_sets = cmds.ls(new_nodes, set=True)
if not imported_sets:
return True
# update imported sets information
asset_name = get_current_asset_name()
for node in imported_sets:
if not cmds.attributeQuery("id", node=node, exists=True):
continue
if cmds.getAttr("{}.id".format(node)) != "pyblish.avalon.instance":
continue
if not cmds.attributeQuery("asset", node=node, exists=True):
continue
cmds.setAttr(
"{}.asset".format(node), asset_name, type="string")
return True

View file

@ -6,7 +6,7 @@ from openpype.hosts.maya.api import (
from maya import cmds
class CreateAss(plugin.Creator):
class CreateArnoldSceneSource(plugin.Creator):
"""Arnold Scene Source"""
name = "ass"
@ -29,7 +29,7 @@ class CreateAss(plugin.Creator):
maskOperator = False
def __init__(self, *args, **kwargs):
super(CreateAss, self).__init__(*args, **kwargs)
super(CreateArnoldSceneSource, self).__init__(*args, **kwargs)
# Add animation data
self.data.update(lib.collect_animation_data())
@ -52,7 +52,7 @@ class CreateAss(plugin.Creator):
self.data["maskOperator"] = self.maskOperator
def process(self):
instance = super(CreateAss, self).process()
instance = super(CreateArnoldSceneSource, self).process()
nodes = []
@ -61,6 +61,6 @@ class CreateAss(plugin.Creator):
cmds.sets(nodes, rm=instance)
assContent = cmds.sets(name="content_SET")
assProxy = cmds.sets(name="proxy_SET", empty=True)
assContent = cmds.sets(name=instance + "_content_SET")
assProxy = cmds.sets(name=instance + "_proxy_SET", empty=True)
cmds.sets([assContent, assProxy], forceElement=instance)

View file

@ -1,3 +1,5 @@
from maya import cmds
from openpype.hosts.maya.api import (
lib,
plugin
@ -37,3 +39,9 @@ class CreatePointCache(plugin.Creator):
# Default to not send to farm.
self.data["farm"] = False
self.data["priority"] = 50
def process(self):
instance = super(CreatePointCache, self).process()
assProxy = cmds.sets(name=instance + "_proxy_SET", empty=True)
cmds.sets(assProxy, forceElement=instance)

View file

@ -54,6 +54,7 @@ class CreateRender(plugin.Creator):
tileRendering (bool): Instance is set to tile rendering mode. We
won't submit actual render, but we'll make publish job to wait
for Tile Assembly job done and then publish.
strict_error_checking (bool): Enable/disable error checking on DL
See Also:
https://pype.club/docs/artist_hosts_maya#creating-basic-render-setup
@ -271,6 +272,9 @@ class CreateRender(plugin.Creator):
secondary_pool = pool_setting["secondary_pool"]
self.data["secondaryPool"] = self._set_default_pool(pool_names,
secondary_pool)
strict_error_checking = maya_submit_dl.get("strict_error_checking",
True)
self.data["strict_error_checking"] = strict_error_checking
if muster_enabled:
self.log.info(">>> Loading Muster credentials ...")

View file

@ -2,9 +2,9 @@ from openpype.hosts.maya.api import plugin
class CreateXgen(plugin.Creator):
"""Xgen interactive export"""
"""Xgen"""
name = "xgen"
label = "Xgen Interactive"
label = "Xgen"
family = "xgen"
icon = "pagelines"

View file

@ -0,0 +1,153 @@
from maya import cmds
from openpype.pipeline import InventoryAction, get_representation_context
from openpype.hosts.maya.api.lib import get_id
class ConnectGeometry(InventoryAction):
"""Connect geometries within containers.
Source container will connect to the target containers, by searching for
matching geometry IDs (cbid).
Source containers are of family; "animation" and "pointcache".
The connection with be done with a live world space blendshape.
"""
label = "Connect Geometry"
icon = "link"
color = "white"
def process(self, containers):
# Validate selection is more than 1.
message = (
"Only 1 container selected. 2+ containers needed for this action."
)
if len(containers) == 1:
self.display_warning(message)
return
# Categorize containers by family.
containers_by_family = {}
for container in containers:
family = get_representation_context(
container["representation"]
)["subset"]["data"]["family"]
try:
containers_by_family[family].append(container)
except KeyError:
containers_by_family[family] = [container]
# Validate to only 1 source container.
source_containers = containers_by_family.get("animation", [])
source_containers += containers_by_family.get("pointcache", [])
source_container_namespaces = [
x["namespace"] for x in source_containers
]
message = (
"{} animation containers selected:\n\n{}\n\nOnly select 1 of type "
"\"animation\" or \"pointcache\".".format(
len(source_containers), source_container_namespaces
)
)
if len(source_containers) != 1:
self.display_warning(message)
return
source_object = source_containers[0]["objectName"]
# Collect matching geometry transforms based cbId attribute.
target_containers = []
for family, containers in containers_by_family.items():
if family in ["animation", "pointcache"]:
continue
target_containers.extend(containers)
source_data = self.get_container_data(source_object)
matches = []
node_types = set()
for target_container in target_containers:
target_data = self.get_container_data(
target_container["objectName"]
)
node_types.update(target_data["node_types"])
for id, transform in target_data["ids"].items():
source_match = source_data["ids"].get(id)
if source_match:
matches.append([source_match, transform])
# Message user about what is about to happen.
if not matches:
self.display_warning("No matching geometries found.")
return
message = "Connecting geometries:\n\n"
for match in matches:
message += "{} > {}\n".format(match[0], match[1])
choice = self.display_warning(message, show_cancel=True)
if choice is False:
return
# Setup live worldspace blendshape connection.
for source, target in matches:
blendshape = cmds.blendShape(source, target)[0]
cmds.setAttr(blendshape + ".origin", 0)
cmds.setAttr(blendshape + "." + target.split(":")[-1], 1)
# Update Xgen if in any of the containers.
if "xgmPalette" in node_types:
cmds.xgmPreview()
def get_container_data(self, container):
"""Collects data about the container nodes.
Args:
container (dict): Container instance.
Returns:
data (dict):
"node_types": All node types in container nodes.
"ids": If the node is a mesh, we collect its parent transform
id.
"""
data = {"node_types": set(), "ids": {}}
ref_node = cmds.sets(container, query=True, nodesOnly=True)[0]
for node in cmds.referenceQuery(ref_node, nodes=True):
node_type = cmds.nodeType(node)
data["node_types"].add(node_type)
# Only interested in mesh transforms for connecting geometry with
# blendshape.
if node_type != "mesh":
continue
transform = cmds.listRelatives(node, parent=True)[0]
data["ids"][get_id(transform)] = transform
return data
def display_warning(self, message, show_cancel=False):
"""Show feedback to user.
Returns:
bool
"""
from Qt import QtWidgets
accept = QtWidgets.QMessageBox.Ok
if show_cancel:
buttons = accept | QtWidgets.QMessageBox.Cancel
else:
buttons = accept
state = QtWidgets.QMessageBox.warning(
None,
"",
message,
buttons=buttons,
defaultButton=accept
)
return state == accept

View file

@ -0,0 +1,168 @@
from maya import cmds
import xgenm
from openpype.pipeline import (
InventoryAction, get_representation_context, get_representation_path
)
class ConnectXgen(InventoryAction):
"""Connect Xgen with an animation or pointcache.
"""
label = "Connect Xgen"
icon = "link"
color = "white"
def process(self, containers):
# Validate selection is more than 1.
message = (
"Only 1 container selected. 2+ containers needed for this action."
)
if len(containers) == 1:
self.display_warning(message)
return
# Categorize containers by family.
containers_by_family = {}
for container in containers:
family = get_representation_context(
container["representation"]
)["subset"]["data"]["family"]
try:
containers_by_family[family].append(container)
except KeyError:
containers_by_family[family] = [container]
# Validate to only 1 source container.
source_containers = containers_by_family.get("animation", [])
source_containers += containers_by_family.get("pointcache", [])
source_container_namespaces = [
x["namespace"] for x in source_containers
]
message = (
"{} animation containers selected:\n\n{}\n\nOnly select 1 of type "
"\"animation\" or \"pointcache\".".format(
len(source_containers), source_container_namespaces
)
)
if len(source_containers) != 1:
self.display_warning(message)
return
source_container = source_containers[0]
source_object = source_container["objectName"]
# Validate source representation is an alembic.
source_path = get_representation_path(
get_representation_context(
source_container["representation"]
)["representation"]
).replace("\\", "/")
message = "Animation container \"{}\" is not an alembic:\n{}".format(
source_container["namespace"], source_path
)
if not source_path.endswith(".abc"):
self.display_warning(message)
return
# Target containers.
target_containers = []
for family, containers in containers_by_family.items():
if family in ["animation", "pointcache"]:
continue
target_containers.extend(containers)
# Inform user of connections from source representation to target
# descriptions.
descriptions_data = []
connections_msg = ""
for target_container in target_containers:
reference_node = cmds.sets(
target_container["objectName"], query=True
)[0]
palettes = cmds.ls(
cmds.referenceQuery(reference_node, nodes=True),
type="xgmPalette"
)
for palette in palettes:
for description in xgenm.descriptions(palette):
descriptions_data.append([palette, description])
connections_msg += "\n{}/{}".format(palette, description)
message = "Connecting \"{}\" to:\n".format(
source_container["namespace"]
)
message += connections_msg
choice = self.display_warning(message, show_cancel=True)
if choice is False:
return
# Recreate "xgenContainers" attribute to reset.
compound_name = "xgenContainers"
attr = "{}.{}".format(source_object, compound_name)
if cmds.objExists(attr):
cmds.deleteAttr(attr)
cmds.addAttr(
source_object,
longName=compound_name,
attributeType="compound",
numberOfChildren=1,
multi=True
)
# Connect target containers.
for target_container in target_containers:
cmds.addAttr(
source_object,
longName="container",
attributeType="message",
parent=compound_name
)
index = target_containers.index(target_container)
cmds.connectAttr(
target_container["objectName"] + ".message",
source_object + ".{}[{}].container".format(
compound_name, index
)
)
# Setup cache on Xgen
object = "SplinePrimitive"
for palette, description in descriptions_data:
xgenm.setAttr("useCache", "true", palette, description, object)
xgenm.setAttr("liveMode", "false", palette, description, object)
xgenm.setAttr(
"cacheFileName", source_path, palette, description, object
)
# Refresh UI and viewport.
de = xgenm.xgGlobal.DescriptionEditor
de.refresh("Full")
def display_warning(self, message, show_cancel=False):
"""Show feedback to user.
Returns:
bool
"""
from Qt import QtWidgets
accept = QtWidgets.QMessageBox.Ok
if show_cancel:
buttons = accept | QtWidgets.QMessageBox.Cancel
else:
buttons = accept
state = QtWidgets.QMessageBox.warning(
None,
"",
message,
buttons=buttons,
defaultButton=accept
)
return state == accept

View file

@ -93,7 +93,20 @@ class ImportMayaLoader(load.LoaderPlugin):
"""
representations = ["ma", "mb", "obj"]
families = ["*"]
families = [
"model",
"pointcache",
"proxyAbc",
"animation",
"mayaAscii",
"mayaScene",
"setdress",
"layout",
"camera",
"rig",
"camerarig",
"staticMesh"
]
label = "Import"
order = 10

View file

@ -1,132 +0,0 @@
import os
from openpype.pipeline import (
legacy_io,
load,
get_representation_path
)
from openpype.settings import get_project_settings
class AlembicStandinLoader(load.LoaderPlugin):
"""Load Alembic as Arnold Standin"""
families = ["animation", "model", "proxyAbc", "pointcache"]
representations = ["abc"]
label = "Import Alembic as Arnold Standin"
order = -5
icon = "code-fork"
color = "orange"
def load(self, context, name, namespace, options):
import maya.cmds as cmds
import mtoa.ui.arnoldmenu
from openpype.hosts.maya.api.pipeline import containerise
from openpype.hosts.maya.api.lib import unique_namespace
version = context["version"]
version_data = version.get("data", {})
family = version["data"]["families"]
self.log.info("version_data: {}\n".format(version_data))
self.log.info("family: {}\n".format(family))
frameStart = version_data.get("frameStart", None)
asset = context["asset"]["name"]
namespace = namespace or unique_namespace(
asset + "_",
prefix="_" if asset[0].isdigit() else "",
suffix="_",
)
# Root group
label = "{}:{}".format(namespace, name)
root = cmds.group(name=label, empty=True)
settings = get_project_settings(os.environ['AVALON_PROJECT'])
colors = settings["maya"]["load"]["colors"]
fps = legacy_io.Session["AVALON_FPS"]
c = colors.get(family[0])
if c is not None:
r = (float(c[0]) / 255)
g = (float(c[1]) / 255)
b = (float(c[2]) / 255)
cmds.setAttr(root + ".useOutlinerColor", 1)
cmds.setAttr(root + ".outlinerColor",
r, g, b)
transform_name = label + "_ABC"
standinShape = cmds.ls(mtoa.ui.arnoldmenu.createStandIn())[0]
standin = cmds.listRelatives(standinShape, parent=True,
typ="transform")
standin = cmds.rename(standin, transform_name)
standinShape = cmds.listRelatives(standin, children=True)[0]
cmds.parent(standin, root)
# Set the standin filepath
cmds.setAttr(standinShape + ".dso", self.fname, type="string")
cmds.setAttr(standinShape + ".abcFPS", float(fps))
if frameStart is None:
cmds.setAttr(standinShape + ".useFrameExtension", 0)
elif "model" in family:
cmds.setAttr(standinShape + ".useFrameExtension", 0)
else:
cmds.setAttr(standinShape + ".useFrameExtension", 1)
nodes = [root, standin]
self[:] = nodes
return containerise(
name=name,
namespace=namespace,
nodes=nodes,
context=context,
loader=self.__class__.__name__)
def update(self, container, representation):
import pymel.core as pm
path = get_representation_path(representation)
fps = legacy_io.Session["AVALON_FPS"]
# Update the standin
standins = list()
members = pm.sets(container['objectName'], query=True)
self.log.info("container:{}".format(container))
for member in members:
shape = member.getShape()
if (shape and shape.type() == "aiStandIn"):
standins.append(shape)
for standin in standins:
standin.dso.set(path)
standin.abcFPS.set(float(fps))
if "modelMain" in container['objectName']:
standin.useFrameExtension.set(0)
else:
standin.useFrameExtension.set(1)
container = pm.PyNode(container["objectName"])
container.representation.set(str(representation["_id"]))
def switch(self, container, representation):
self.update(container, representation)
def remove(self, container):
import maya.cmds as cmds
members = cmds.sets(container['objectName'], query=True)
cmds.lockNode(members, lock=False)
cmds.delete([container['objectName']] + members)
# Clean up the namespace
try:
cmds.namespace(removeNamespace=container['namespace'],
deleteNamespaceContent=True)
except RuntimeError:
pass

View file

@ -0,0 +1,218 @@
import os
import clique
import maya.cmds as cmds
import mtoa.ui.arnoldmenu
from openpype.settings import get_project_settings
from openpype.pipeline import (
load,
get_representation_path
)
from openpype.hosts.maya.api.lib import (
unique_namespace, get_attribute_input, maintained_selection
)
from openpype.hosts.maya.api.pipeline import containerise
def is_sequence(files):
sequence = False
collections, remainder = clique.assemble(files)
if collections:
sequence = True
return sequence
class ArnoldStandinLoader(load.LoaderPlugin):
"""Load as Arnold standin"""
families = ["ass", "animation", "model", "proxyAbc", "pointcache"]
representations = ["ass", "abc"]
label = "Load as Arnold standin"
order = -5
icon = "code-fork"
color = "orange"
def load(self, context, name, namespace, options):
version = context['version']
version_data = version.get("data", {})
self.log.info("version_data: {}\n".format(version_data))
asset = context['asset']['name']
namespace = namespace or unique_namespace(
asset + "_",
prefix="_" if asset[0].isdigit() else "",
suffix="_",
)
# Root group
label = "{}:{}".format(namespace, name)
root = cmds.group(name=label, empty=True)
# Set color.
settings = get_project_settings(context["project"]["name"])
color = settings['maya']['load']['colors'].get('ass')
if color is not None:
cmds.setAttr(root + ".useOutlinerColor", True)
cmds.setAttr(
root + ".outlinerColor", color[0], color[1], color[2]
)
with maintained_selection():
# Create transform with shape
transform_name = label + "_standin"
standin_shape = mtoa.ui.arnoldmenu.createStandIn()
standin = cmds.listRelatives(standin_shape, parent=True)[0]
standin = cmds.rename(standin, transform_name)
standin_shape = cmds.listRelatives(standin, shapes=True)[0]
cmds.parent(standin, root)
# Set the standin filepath
path, operator = self._setup_proxy(
standin_shape, self.fname, namespace
)
cmds.setAttr(standin_shape + ".dso", path, type="string")
sequence = is_sequence(os.listdir(os.path.dirname(self.fname)))
cmds.setAttr(standin_shape + ".useFrameExtension", sequence)
nodes = [root, standin]
if operator is not None:
nodes.append(operator)
self[:] = nodes
return containerise(
name=name,
namespace=namespace,
nodes=nodes,
context=context,
loader=self.__class__.__name__)
def get_next_free_multi_index(self, attr_name):
"""Find the next unconnected multi index at the input attribute."""
for index in range(10000000):
connection_info = cmds.connectionInfo(
"{}[{}]".format(attr_name, index),
sourceFromDestination=True
)
if len(connection_info or []) == 0:
return index
def _get_proxy_path(self, path):
basename_split = os.path.basename(path).split(".")
proxy_basename = (
basename_split[0] + "_proxy." + ".".join(basename_split[1:])
)
proxy_path = "/".join([os.path.dirname(path), proxy_basename])
return proxy_basename, proxy_path
def _setup_proxy(self, shape, path, namespace):
proxy_basename, proxy_path = self._get_proxy_path(path)
options_node = "defaultArnoldRenderOptions"
merge_operator = get_attribute_input(options_node + ".operator")
if merge_operator is None:
merge_operator = cmds.createNode("aiMerge")
cmds.connectAttr(
merge_operator + ".message", options_node + ".operator"
)
merge_operator = merge_operator.split(".")[0]
string_replace_operator = cmds.createNode(
"aiStringReplace", name=namespace + ":string_replace_operator"
)
node_type = "alembic" if path.endswith(".abc") else "procedural"
cmds.setAttr(
string_replace_operator + ".selection",
"*.(@node=='{}')".format(node_type),
type="string"
)
cmds.setAttr(
string_replace_operator + ".match",
proxy_basename,
type="string"
)
cmds.setAttr(
string_replace_operator + ".replace",
os.path.basename(path),
type="string"
)
cmds.connectAttr(
string_replace_operator + ".out",
"{}.inputs[{}]".format(
merge_operator,
self.get_next_free_multi_index(merge_operator + ".inputs")
)
)
# We setup the string operator no matter whether there is a proxy or
# not. This makes it easier to update since the string operator will
# always be created. Return original path to use for standin.
if not os.path.exists(proxy_path):
return path, string_replace_operator
return proxy_path, string_replace_operator
def update(self, container, representation):
# Update the standin
members = cmds.sets(container['objectName'], query=True)
for member in members:
if cmds.nodeType(member) == "aiStringReplace":
string_replace_operator = member
shapes = cmds.listRelatives(member, shapes=True)
if not shapes:
continue
if cmds.nodeType(shapes[0]) == "aiStandIn":
standin = shapes[0]
path = get_representation_path(representation)
proxy_basename, proxy_path = self._get_proxy_path(path)
# Whether there is proxy or so, we still update the string operator.
# If no proxy exists, the string operator wont replace anything.
cmds.setAttr(
string_replace_operator + ".match",
"resources/" + proxy_basename,
type="string"
)
cmds.setAttr(
string_replace_operator + ".replace",
os.path.basename(path),
type="string"
)
dso_path = path
if os.path.exists(proxy_path):
dso_path = proxy_path
cmds.setAttr(standin + ".dso", dso_path, type="string")
sequence = is_sequence(os.listdir(os.path.dirname(path)))
cmds.setAttr(standin + ".useFrameExtension", sequence)
cmds.setAttr(
container["objectName"] + ".representation",
str(representation["_id"]),
type="string"
)
def switch(self, container, representation):
self.update(container, representation)
def remove(self, container):
members = cmds.sets(container['objectName'], query=True)
cmds.lockNode(members, lock=False)
cmds.delete([container['objectName']] + members)
# Clean up the namespace
try:
cmds.namespace(removeNamespace=container['namespace'],
deleteNamespaceContent=True)
except RuntimeError:
pass

View file

@ -1,290 +0,0 @@
import os
import clique
from openpype.settings import get_project_settings
from openpype.pipeline import (
load,
get_representation_path
)
import openpype.hosts.maya.api.plugin
from openpype.hosts.maya.api.plugin import get_reference_node
from openpype.hosts.maya.api.lib import (
maintained_selection,
unique_namespace
)
from openpype.hosts.maya.api.pipeline import containerise
class AssProxyLoader(openpype.hosts.maya.api.plugin.ReferenceLoader):
"""Load Arnold Proxy as reference"""
families = ["ass"]
representations = ["ass"]
label = "Reference .ASS standin with Proxy"
order = -10
icon = "code-fork"
color = "orange"
def process_reference(self, context, name, namespace, options):
import maya.cmds as cmds
import pymel.core as pm
version = context['version']
version_data = version.get("data", {})
self.log.info("version_data: {}\n".format(version_data))
frameStart = version_data.get("frameStart", None)
try:
family = context["representation"]["context"]["family"]
except ValueError:
family = "ass"
with maintained_selection():
groupName = "{}:{}".format(namespace, name)
path = self.fname
proxyPath_base = os.path.splitext(path)[0]
if frameStart is not None:
proxyPath_base = os.path.splitext(proxyPath_base)[0]
publish_folder = os.path.split(path)[0]
files_in_folder = os.listdir(publish_folder)
collections, remainder = clique.assemble(files_in_folder)
if collections:
hashes = collections[0].padding * '#'
coll = collections[0].format('{head}[index]{tail}')
filename = coll.replace('[index]', hashes)
path = os.path.join(publish_folder, filename)
proxyPath = proxyPath_base + ".ma"
project_name = context["project"]["name"]
file_url = self.prepare_root_value(proxyPath,
project_name)
nodes = cmds.file(file_url,
namespace=namespace,
reference=True,
returnNewNodes=True,
groupReference=True,
groupName=groupName)
cmds.makeIdentity(groupName, apply=False, rotate=True,
translate=True, scale=True)
# Set attributes
proxyShape = pm.ls(nodes, type="mesh")[0]
proxyShape.aiTranslator.set('procedural')
proxyShape.dso.set(path)
proxyShape.aiOverrideShaders.set(0)
settings = get_project_settings(project_name)
colors = settings['maya']['load']['colors']
c = colors.get(family)
if c is not None:
cmds.setAttr(groupName + ".useOutlinerColor", 1)
cmds.setAttr(groupName + ".outlinerColor",
(float(c[0])/255),
(float(c[1])/255),
(float(c[2])/255)
)
self[:] = nodes
return nodes
def switch(self, container, representation):
self.update(container, representation)
def update(self, container, representation):
from maya import cmds
import pymel.core as pm
node = container["objectName"]
representation["context"].pop("frame", None)
path = get_representation_path(representation)
print(path)
# path = self.fname
print(self.fname)
proxyPath = os.path.splitext(path)[0] + ".ma"
print(proxyPath)
# Get reference node from container members
members = cmds.sets(node, query=True, nodesOnly=True)
reference_node = get_reference_node(members)
assert os.path.exists(proxyPath), "%s does not exist." % proxyPath
try:
file_url = self.prepare_root_value(proxyPath,
representation["context"]
["project"]
["name"])
content = cmds.file(file_url,
loadReference=reference_node,
type="mayaAscii",
returnNewNodes=True)
# Set attributes
proxyShape = pm.ls(content, type="mesh")[0]
proxyShape.aiTranslator.set('procedural')
proxyShape.dso.set(path)
proxyShape.aiOverrideShaders.set(0)
except RuntimeError as exc:
# When changing a reference to a file that has load errors the
# command will raise an error even if the file is still loaded
# correctly (e.g. when raising errors on Arnold attributes)
# When the file is loaded and has content, we consider it's fine.
if not cmds.referenceQuery(reference_node, isLoaded=True):
raise
content = cmds.referenceQuery(reference_node,
nodes=True,
dagPath=True)
if not content:
raise
self.log.warning("Ignoring file read error:\n%s", exc)
# Add new nodes of the reference to the container
cmds.sets(content, forceElement=node)
# Remove any placeHolderList attribute entries from the set that
# are remaining from nodes being removed from the referenced file.
members = cmds.sets(node, query=True)
invalid = [x for x in members if ".placeHolderList" in x]
if invalid:
cmds.sets(invalid, remove=node)
# Update metadata
cmds.setAttr("{}.representation".format(node),
str(representation["_id"]),
type="string")
class AssStandinLoader(load.LoaderPlugin):
"""Load .ASS file as standin"""
families = ["ass"]
representations = ["ass"]
label = "Load .ASS file as standin"
order = -5
icon = "code-fork"
color = "orange"
def load(self, context, name, namespace, options):
import maya.cmds as cmds
import mtoa.ui.arnoldmenu
import pymel.core as pm
version = context['version']
version_data = version.get("data", {})
self.log.info("version_data: {}\n".format(version_data))
frameStart = version_data.get("frameStart", None)
asset = context['asset']['name']
namespace = namespace or unique_namespace(
asset + "_",
prefix="_" if asset[0].isdigit() else "",
suffix="_",
)
# cmds.loadPlugin("gpuCache", quiet=True)
# Root group
label = "{}:{}".format(namespace, name)
root = pm.group(name=label, empty=True)
settings = get_project_settings(os.environ['AVALON_PROJECT'])
colors = settings['maya']['load']['colors']
c = colors.get('ass')
if c is not None:
cmds.setAttr(root + ".useOutlinerColor", 1)
cmds.setAttr(root + ".outlinerColor",
c[0], c[1], c[2])
# Create transform with shape
transform_name = label + "_ASS"
# transform = pm.createNode("transform", name=transform_name,
# parent=root)
standinShape = pm.PyNode(mtoa.ui.arnoldmenu.createStandIn())
standin = standinShape.getParent()
standin.rename(transform_name)
pm.parent(standin, root)
# Set the standin filepath
standinShape.dso.set(self.fname)
if frameStart is not None:
standinShape.useFrameExtension.set(1)
nodes = [root, standin]
self[:] = nodes
return containerise(
name=name,
namespace=namespace,
nodes=nodes,
context=context,
loader=self.__class__.__name__)
def update(self, container, representation):
import pymel.core as pm
path = get_representation_path(representation)
files_in_path = os.listdir(os.path.split(path)[0])
sequence = 0
collections, remainder = clique.assemble(files_in_path)
if collections:
sequence = 1
# Update the standin
standins = list()
members = pm.sets(container['objectName'], query=True)
for member in members:
shape = member.getShape()
if (shape and shape.type() == "aiStandIn"):
standins.append(shape)
for standin in standins:
standin.dso.set(path)
standin.useFrameExtension.set(sequence)
container = pm.PyNode(container["objectName"])
container.representation.set(str(representation["_id"]))
def switch(self, container, representation):
self.update(container, representation)
def remove(self, container):
import maya.cmds as cmds
members = cmds.sets(container['objectName'], query=True)
cmds.lockNode(members, lock=False)
cmds.delete([container['objectName']] + members)
# Clean up the namespace
try:
cmds.namespace(removeNamespace=container['namespace'],
deleteNamespaceContent=True)
except RuntimeError:
pass

View file

@ -25,9 +25,9 @@ class ReferenceLoader(openpype.hosts.maya.api.plugin.ReferenceLoader):
"camera",
"rig",
"camerarig",
"xgen",
"staticMesh",
"mvLook"]
representations = ["ma", "abc", "fbx", "mb"]
label = "Reference"

View file

@ -81,10 +81,11 @@ class VRayProxyLoader(load.LoaderPlugin):
c = colors.get(family)
if c is not None:
cmds.setAttr("{0}.useOutlinerColor".format(group_node), 1)
cmds.setAttr("{0}.outlinerColor".format(group_node),
(float(c[0])/255),
(float(c[1])/255),
(float(c[2])/255)
cmds.setAttr(
"{0}.outlinerColor".format(group_node),
(float(c[0]) / 255),
(float(c[1]) / 255),
(float(c[2]) / 255)
)
return containerise(
@ -101,7 +102,7 @@ class VRayProxyLoader(load.LoaderPlugin):
assert cmds.objExists(node), "Missing container"
members = cmds.sets(node, query=True) or []
vraymeshes = cmds.ls(members, type="VRayMesh")
vraymeshes = cmds.ls(members, type="VRayProxy")
assert vraymeshes, "Cannot find VRayMesh in container"
# get all representations for this version

View file

@ -0,0 +1,173 @@
import os
import maya.cmds as cmds
import xgenm
from Qt import QtWidgets
import openpype.hosts.maya.api.plugin
from openpype.hosts.maya.api.lib import (
maintained_selection,
get_container_members,
attribute_values,
write_xgen_file
)
from openpype.hosts.maya.api import current_file
from openpype.pipeline import get_representation_path
class XgenLoader(openpype.hosts.maya.api.plugin.ReferenceLoader):
"""Load Xgen as reference"""
families = ["xgen"]
representations = ["ma", "mb"]
label = "Reference Xgen"
icon = "code-fork"
color = "orange"
def get_xgen_xgd_paths(self, palette):
_, maya_extension = os.path.splitext(current_file())
xgen_file = current_file().replace(
maya_extension,
"__{}.xgen".format(palette.replace("|", "").replace(":", "__"))
)
xgd_file = xgen_file.replace(".xgen", ".xgd")
return xgen_file, xgd_file
def process_reference(self, context, name, namespace, options):
# Validate workfile has a path.
if current_file() is None:
QtWidgets.QMessageBox.warning(
None,
"",
"Current workfile has not been saved. Please save the workfile"
" before loading an Xgen."
)
return
maya_filepath = self.prepare_root_value(
self.fname, context["project"]["name"]
)
# Reference xgen. Xgen does not like being referenced in under a group.
new_nodes = []
with maintained_selection():
nodes = cmds.file(
maya_filepath,
namespace=namespace,
sharedReferenceFile=False,
reference=True,
returnNewNodes=True
)
xgen_palette = cmds.ls(
nodes, type="xgmPalette", long=True
)[0].replace("|", "")
xgen_file, xgd_file = self.get_xgen_xgd_paths(xgen_palette)
self.set_palette_attributes(xgen_palette, xgen_file, xgd_file)
# Change the cache and disk values of xgDataPath and xgProjectPath
# to ensure paths are setup correctly.
project_path = os.path.dirname(current_file()).replace("\\", "/")
xgenm.setAttr("xgProjectPath", project_path, xgen_palette)
data_path = "${{PROJECT}}xgen/collections/{};{}".format(
xgen_palette.replace(":", "__ns__"),
xgenm.getAttr("xgDataPath", xgen_palette)
)
xgenm.setAttr("xgDataPath", data_path, xgen_palette)
data = {"xgProjectPath": project_path, "xgDataPath": data_path}
write_xgen_file(data, xgen_file)
# This create an expression attribute of float. If we did not add
# any changes to collection, then Xgen does not create an xgd file
# on save. This gives errors when launching the workfile again due
# to trying to find the xgd file.
name = "custom_float_ignore"
if name not in xgenm.customAttrs(xgen_palette):
xgenm.addCustomAttr(
"custom_float_ignore", xgen_palette
)
shapes = cmds.ls(nodes, shapes=True, long=True)
new_nodes = (list(set(nodes) - set(shapes)))
self[:] = new_nodes
return new_nodes
def set_palette_attributes(self, xgen_palette, xgen_file, xgd_file):
cmds.setAttr(
"{}.xgBaseFile".format(xgen_palette),
os.path.basename(xgen_file),
type="string"
)
cmds.setAttr(
"{}.xgFileName".format(xgen_palette),
os.path.basename(xgd_file),
type="string"
)
cmds.setAttr("{}.xgExportAsDelta".format(xgen_palette), True)
def update(self, container, representation):
"""Workflow for updating Xgen.
- Copy and potentially overwrite the workspace .xgen file.
- Export changes to delta file.
- Set collection attributes to not include delta files.
- Update xgen maya file reference.
- Apply the delta file changes.
- Reset collection attributes to include delta files.
We have to do this workflow because when using referencing of the xgen
collection, Maya implicitly imports the Xgen data from the xgen file so
we dont have any control over when adding the delta file changes.
There is an implicit increment of the xgen and delta files, due to
using the workfile basename.
"""
container_node = container["objectName"]
members = get_container_members(container_node)
xgen_palette = cmds.ls(
members, type="xgmPalette", long=True
)[0].replace("|", "")
xgen_file, xgd_file = self.get_xgen_xgd_paths(xgen_palette)
# Export current changes to apply later.
xgenm.createDelta(xgen_palette.replace("|", ""), xgd_file)
self.set_palette_attributes(xgen_palette, xgen_file, xgd_file)
maya_file = get_representation_path(representation)
_, extension = os.path.splitext(maya_file)
new_xgen_file = maya_file.replace(extension, ".xgen")
data_path = ""
with open(new_xgen_file, "r") as f:
for line in f:
if line.startswith("\txgDataPath"):
line = line.rstrip()
data_path = line.split("\t")[-1]
break
project_path = os.path.dirname(current_file()).replace("\\", "/")
data_path = "${{PROJECT}}xgen/collections/{};{}".format(
xgen_palette.replace(":", "__ns__"),
data_path
)
data = {"xgProjectPath": project_path, "xgDataPath": data_path}
write_xgen_file(data, xgen_file)
attribute_data = {
"{}.xgFileName".format(xgen_palette): os.path.basename(xgen_file),
"{}.xgBaseFile".format(xgen_palette): "",
"{}.xgExportAsDelta".format(xgen_palette): False
}
with attribute_values(attribute_data):
super().update(container, representation)
xgenm.applyDelta(xgen_palette.replace("|", ""), xgd_file)

View file

@ -1,19 +1,18 @@
from maya import cmds
from openpype.pipeline.publish import KnownPublishError
import pyblish.api
class CollectAssData(pyblish.api.InstancePlugin):
"""Collect Ass data."""
class CollectArnoldSceneSource(pyblish.api.InstancePlugin):
"""Collect Arnold Scene Source data."""
# Offset to be after renderable camera collection.
order = pyblish.api.CollectorOrder + 0.2
label = 'Collect Ass'
label = "Collect Arnold Scene Source"
families = ["ass"]
def process(self, instance):
objsets = instance.data['setMembers']
objsets = instance.data["setMembers"]
for objset in objsets:
objset = str(objset)
@ -21,15 +20,12 @@ class CollectAssData(pyblish.api.InstancePlugin):
if members is None:
self.log.warning("Skipped empty instance: \"%s\" " % objset)
continue
if "content_SET" in objset:
instance.data['setMembers'] = members
self.log.debug('content members: {}'.format(members))
elif objset.startswith("proxy_SET"):
if len(members) != 1:
msg = "You have multiple proxy meshes, please only use one"
raise KnownPublishError(msg)
instance.data['proxy'] = members
self.log.debug('proxy members: {}'.format(members))
if objset.endswith("content_SET"):
instance.data["setMembers"] = cmds.ls(members, long=True)
self.log.debug("content members: {}".format(members))
elif objset.endswith("proxy_SET"):
instance.data["proxy"] = cmds.ls(members, long=True)
self.log.debug("proxy members: {}".format(members))
# Use camera in object set if present else default to render globals
# camera.

View file

@ -12,7 +12,6 @@ class CollectMayaWorkspace(pyblish.api.ContextPlugin):
label = "Maya Workspace"
hosts = ['maya']
version = (0, 1, 0)
def process(self, context):
workspace = cmds.workspace(rootDirectory=True, query=True)

View file

@ -1,3 +1,5 @@
from maya import cmds
import pyblish.api
@ -12,3 +14,31 @@ class CollectPointcache(pyblish.api.InstancePlugin):
def process(self, instance):
if instance.data.get("farm"):
instance.data["families"].append("publish.farm")
proxy_set = None
for node in instance.data["setMembers"]:
if cmds.nodeType(node) != "objectSet":
continue
members = cmds.sets(node, query=True)
if members is None:
self.log.warning("Skipped empty objectset: \"%s\" " % node)
continue
if node.endswith("proxy_SET"):
proxy_set = node
instance.data["proxy"] = []
instance.data["proxyRoots"] = []
for member in members:
instance.data["proxy"].extend(cmds.ls(member, long=True))
instance.data["proxyRoots"].extend(
cmds.ls(member, long=True)
)
instance.data["proxy"].extend(
cmds.listRelatives(member, shapes=True, fullPath=True)
)
self.log.debug(
"proxy members: {}".format(instance.data["proxy"])
)
if proxy_set:
instance.remove(proxy_set)
instance.data["setMembers"].remove(proxy_set)

View file

@ -42,7 +42,6 @@ Provides:
import re
import os
import platform
import json
from maya import cmds
import maya.app.renderSetup.model.renderSetup as renderSetup
@ -318,6 +317,9 @@ class CollectMayaRender(pyblish.api.ContextPlugin):
"aovSeparator": layer_render_products.layer_data.aov_separator, # noqa: E501
"renderSetupIncludeLights": render_instance.data.get(
"renderSetupIncludeLights"
),
"strict_error_checking": render_instance.data.get(
"strict_error_checking", True
)
}

View file

@ -0,0 +1,71 @@
import os
from maya import cmds
import pyblish.api
from openpype.hosts.maya.api.lib import get_attribute_input
class CollectXgen(pyblish.api.InstancePlugin):
"""Collect Xgen"""
order = pyblish.api.CollectorOrder + 0.499999
label = "Collect Xgen"
families = ["xgen"]
def process(self, instance):
data = {
"xgmPalettes": cmds.ls(instance, type="xgmPalette", long=True),
"xgmDescriptions": cmds.ls(
instance, type="xgmDescription", long=True
),
"xgmSubdPatches": cmds.ls(instance, type="xgmSubdPatch", long=True)
}
data["xgenNodes"] = (
data["xgmPalettes"] +
data["xgmDescriptions"] +
data["xgmSubdPatches"]
)
if data["xgmPalettes"]:
data["xgmPalette"] = data["xgmPalettes"][0]
data["xgenConnections"] = {}
for node in data["xgmSubdPatches"]:
data["xgenConnections"][node] = {}
for attr in ["transform", "geometry"]:
input = get_attribute_input("{}.{}".format(node, attr))
data["xgenConnections"][node][attr] = input
# Collect all files under palette root as resources.
import xgenm
data_path = xgenm.getAttr(
"xgDataPath", data["xgmPalette"].replace("|", "")
).split(os.pathsep)[0]
data_path = data_path.replace(
"${PROJECT}",
xgenm.getAttr("xgProjectPath", data["xgmPalette"].replace("|", ""))
)
transfers = []
# Since we are duplicating this palette when extracting we predict that
# the name will be the basename without namespaces.
predicted_palette_name = data["xgmPalette"].split(":")[-1]
predicted_palette_name = predicted_palette_name.replace("|", "")
for root, _, files in os.walk(data_path):
for file in files:
source = os.path.join(root, file).replace("\\", "/")
destination = os.path.join(
instance.data["resourcesDir"],
"collections",
predicted_palette_name,
source.replace(data_path, "")[1:]
)
transfers.append((source, destination.replace("\\", "/")))
data["transfers"] = transfers
self.log.info(data)
instance.data.update(data)

View file

@ -0,0 +1,160 @@
import os
from maya import cmds
import arnold
from openpype.pipeline import publish
from openpype.hosts.maya.api.lib import (
maintained_selection, attribute_values, delete_after
)
class ExtractArnoldSceneSource(publish.Extractor):
"""Extract the content of the instance to an Arnold Scene Source file."""
label = "Extract Arnold Scene Source"
hosts = ["maya"]
families = ["ass"]
asciiAss = False
def process(self, instance):
staging_dir = self.staging_dir(instance)
filename = "{}.ass".format(instance.name)
file_path = os.path.join(staging_dir, filename)
# Mask
mask = arnold.AI_NODE_ALL
node_types = {
"options": arnold.AI_NODE_OPTIONS,
"camera": arnold.AI_NODE_CAMERA,
"light": arnold.AI_NODE_LIGHT,
"shape": arnold.AI_NODE_SHAPE,
"shader": arnold.AI_NODE_SHADER,
"override": arnold.AI_NODE_OVERRIDE,
"driver": arnold.AI_NODE_DRIVER,
"filter": arnold.AI_NODE_FILTER,
"color_manager": arnold.AI_NODE_COLOR_MANAGER,
"operator": arnold.AI_NODE_OPERATOR
}
for key in node_types.keys():
if instance.data.get("mask" + key.title()):
mask = mask ^ node_types[key]
# Motion blur
attribute_data = {
"defaultArnoldRenderOptions.motion_blur_enable": instance.data.get(
"motionBlur", True
),
"defaultArnoldRenderOptions.motion_steps": instance.data.get(
"motionBlurKeys", 2
),
"defaultArnoldRenderOptions.motion_frames": instance.data.get(
"motionBlurLength", 0.5
)
}
# Write out .ass file
kwargs = {
"filename": file_path,
"startFrame": instance.data.get("frameStartHandle", 1),
"endFrame": instance.data.get("frameEndHandle", 1),
"frameStep": instance.data.get("step", 1),
"selected": True,
"asciiAss": self.asciiAss,
"shadowLinks": True,
"lightLinks": True,
"boundingBox": True,
"expandProcedurals": instance.data.get("expandProcedurals", False),
"camera": instance.data["camera"],
"mask": mask
}
filenames = self._extract(
instance.data["setMembers"], attribute_data, kwargs
)
if "representations" not in instance.data:
instance.data["representations"] = []
representation = {
"name": "ass",
"ext": "ass",
"files": filenames if len(filenames) > 1 else filenames[0],
"stagingDir": staging_dir,
"frameStart": kwargs["startFrame"]
}
instance.data["representations"].append(representation)
self.log.info(
"Extracted instance {} to: {}".format(instance.name, staging_dir)
)
# Extract proxy.
if not instance.data.get("proxy", []):
return
kwargs["filename"] = file_path.replace(".ass", "_proxy.ass")
filenames = self._extract(
instance.data["proxy"], attribute_data, kwargs
)
representation = {
"name": "proxy",
"ext": "ass",
"files": filenames if len(filenames) > 1 else filenames[0],
"stagingDir": staging_dir,
"frameStart": kwargs["startFrame"],
"outputName": "proxy"
}
instance.data["representations"].append(representation)
def _extract(self, nodes, attribute_data, kwargs):
self.log.info("Writing: " + kwargs["filename"])
filenames = []
# Duplicating nodes so they are direct children of the world. This
# makes the hierarchy of any exported ass file the same.
with delete_after() as delete_bin:
duplicate_nodes = []
for node in nodes:
duplicate_transform = cmds.duplicate(node)[0]
# Discard the children.
shapes = cmds.listRelatives(duplicate_transform, shapes=True)
children = cmds.listRelatives(
duplicate_transform, children=True
)
cmds.delete(set(children) - set(shapes))
duplicate_transform = cmds.parent(
duplicate_transform, world=True
)[0]
cmds.rename(duplicate_transform, node.split("|")[-1])
duplicate_transform = "|" + node.split("|")[-1]
duplicate_nodes.append(duplicate_transform)
delete_bin.append(duplicate_transform)
with attribute_values(attribute_data):
with maintained_selection():
self.log.info(
"Writing: {}".format(duplicate_nodes)
)
cmds.select(duplicate_nodes, noExpand=True)
self.log.info(
"Extracting ass sequence with: {}".format(kwargs)
)
exported_files = cmds.arnoldExportAss(**kwargs)
for file in exported_files:
filenames.append(os.path.split(file)[1])
self.log.info("Exported: {}".format(filenames))
return filenames

View file

@ -1,106 +0,0 @@
import os
from maya import cmds
import arnold
from openpype.pipeline import publish
from openpype.hosts.maya.api.lib import maintained_selection, attribute_values
class ExtractAssStandin(publish.Extractor):
"""Extract the content of the instance to a ass file"""
label = "Arnold Scene Source (.ass)"
hosts = ["maya"]
families = ["ass"]
asciiAss = False
def process(self, instance):
staging_dir = self.staging_dir(instance)
filename = "{}.ass".format(instance.name)
filenames = []
file_path = os.path.join(staging_dir, filename)
# Mask
mask = arnold.AI_NODE_ALL
node_types = {
"options": arnold.AI_NODE_OPTIONS,
"camera": arnold.AI_NODE_CAMERA,
"light": arnold.AI_NODE_LIGHT,
"shape": arnold.AI_NODE_SHAPE,
"shader": arnold.AI_NODE_SHADER,
"override": arnold.AI_NODE_OVERRIDE,
"driver": arnold.AI_NODE_DRIVER,
"filter": arnold.AI_NODE_FILTER,
"color_manager": arnold.AI_NODE_COLOR_MANAGER,
"operator": arnold.AI_NODE_OPERATOR
}
for key in node_types.keys():
if instance.data.get("mask" + key.title()):
mask = mask ^ node_types[key]
# Motion blur
values = {
"defaultArnoldRenderOptions.motion_blur_enable": instance.data.get(
"motionBlur", True
),
"defaultArnoldRenderOptions.motion_steps": instance.data.get(
"motionBlurKeys", 2
),
"defaultArnoldRenderOptions.motion_frames": instance.data.get(
"motionBlurLength", 0.5
)
}
# Write out .ass file
kwargs = {
"filename": file_path,
"startFrame": instance.data.get("frameStartHandle", 1),
"endFrame": instance.data.get("frameEndHandle", 1),
"frameStep": instance.data.get("step", 1),
"selected": True,
"asciiAss": self.asciiAss,
"shadowLinks": True,
"lightLinks": True,
"boundingBox": True,
"expandProcedurals": instance.data.get("expandProcedurals", False),
"camera": instance.data["camera"],
"mask": mask
}
self.log.info("Writing: '%s'" % file_path)
with attribute_values(values):
with maintained_selection():
self.log.info(
"Writing: {}".format(instance.data["setMembers"])
)
cmds.select(instance.data["setMembers"], noExpand=True)
self.log.info(
"Extracting ass sequence with: {}".format(kwargs)
)
exported_files = cmds.arnoldExportAss(**kwargs)
for file in exported_files:
filenames.append(os.path.split(file)[1])
self.log.info("Exported: {}".format(filenames))
if "representations" not in instance.data:
instance.data["representations"] = []
representation = {
'name': 'ass',
'ext': 'ass',
'files': filenames if len(filenames) > 1 else filenames[0],
"stagingDir": staging_dir,
'frameStart': kwargs["startFrame"]
}
instance.data["representations"].append(representation)
self.log.info("Extracted instance '%s' to: %s"
% (instance.name, staging_dir))

View file

@ -1,81 +0,0 @@
import os
import contextlib
from maya import cmds
from openpype.pipeline import publish
from openpype.hosts.maya.api.lib import maintained_selection
class ExtractAssProxy(publish.Extractor):
"""Extract proxy model as Maya Ascii to use as arnold standin
"""
order = publish.Extractor.order + 0.2
label = "Ass Proxy (Maya ASCII)"
hosts = ["maya"]
families = ["ass"]
def process(self, instance):
@contextlib.contextmanager
def unparent(root):
"""Temporarily unparent `root`"""
parent = cmds.listRelatives(root, parent=True)
if parent:
cmds.parent(root, world=True)
yield
self.log.info("{} - {}".format(root, parent))
cmds.parent(root, parent)
else:
yield
# Define extract output file path
stagingdir = self.staging_dir(instance)
filename = "{0}.ma".format(instance.name)
path = os.path.join(stagingdir, filename)
# Perform extraction
self.log.info("Performing extraction..")
# Get only the shape contents we need in such a way that we avoid
# taking along intermediateObjects
proxy = instance.data.get('proxy', None)
if not proxy:
self.log.info("no proxy mesh")
return
members = cmds.ls(proxy,
dag=True,
transforms=True,
noIntermediate=True)
self.log.info(members)
with maintained_selection():
with unparent(members[0]):
cmds.select(members, noExpand=True)
cmds.file(path,
force=True,
typ="mayaAscii",
exportSelected=True,
preserveReferences=False,
channels=False,
constraints=False,
expressions=False,
constructionHistory=False)
if "representations" not in instance.data:
instance.data["representations"] = []
representation = {
'name': 'ma',
'ext': 'ma',
'files': filename,
"stagingDir": stagingdir
}
instance.data["representations"].append(representation)
self.log.info("Extracted instance '%s' to: %s" % (instance.name, path))

View file

@ -20,8 +20,7 @@ class ExtractMayaSceneRaw(publish.Extractor):
"mayaScene",
"setdress",
"layout",
"camerarig",
"xgen"]
"camerarig"]
scene_type = "ma"
def process(self, instance):

View file

@ -1,4 +1,5 @@
import os
import copy
from maya import cmds
@ -9,6 +10,7 @@ from openpype.hosts.maya.api.lib import (
maintained_selection,
iter_visible_nodes_in_range
)
from openpype.lib import StringTemplate
class ExtractAlembic(publish.Extractor):
@ -23,9 +25,7 @@ class ExtractAlembic(publish.Extractor):
label = "Extract Pointcache (Alembic)"
hosts = ["maya"]
families = ["pointcache",
"model",
"vrayproxy"]
families = ["pointcache", "model", "vrayproxy"]
targets = ["local", "remote"]
def process(self, instance):
@ -87,6 +87,7 @@ class ExtractAlembic(publish.Extractor):
end=end))
suspend = not instance.data.get("refresh", False)
self.log.info(nodes)
with suspended_refresh(suspend=suspend):
with maintained_selection():
cmds.select(nodes, noExpand=True)
@ -101,9 +102,9 @@ class ExtractAlembic(publish.Extractor):
instance.data["representations"] = []
representation = {
'name': 'abc',
'ext': 'abc',
'files': filename,
"name": "abc",
"ext": "abc",
"files": filename,
"stagingDir": dirname
}
instance.data["representations"].append(representation)
@ -112,6 +113,48 @@ class ExtractAlembic(publish.Extractor):
self.log.info("Extracted {} to {}".format(instance, dirname))
# Extract proxy.
if not instance.data.get("proxy"):
return
path = path.replace(".abc", "_proxy.abc")
if not instance.data.get("includeParentHierarchy", True):
# Set the root nodes if we don't want to include parents
# The roots are to be considered the ones that are the actual
# direct members of the set
options["root"] = instance.data["proxyRoots"]
with suspended_refresh(suspend=suspend):
with maintained_selection():
cmds.select(instance.data["proxy"])
extract_alembic(
file=path,
startFrame=start,
endFrame=end,
**options
)
template_data = copy.deepcopy(instance.data["anatomyData"])
template_data.update({"ext": "abc"})
templates = instance.context.data["anatomy"].templates["publish"]
published_filename_without_extension = StringTemplate(
templates["file"]
).format(template_data).replace(".abc", "_proxy")
transfers = []
destination = os.path.join(
instance.data["resourcesDir"],
filename.replace(
filename.split(".")[0],
published_filename_without_extension
)
)
transfers.append((path, destination))
for source, destination in transfers:
self.log.debug("Transfer: {} > {}".format(source, destination))
instance.data["transfers"] = transfers
def get_members_and_roots(self, instance):
return instance[:], instance.data.get("setMembers")

View file

@ -0,0 +1,250 @@
import os
import shutil
import copy
from maya import cmds
import pyblish.api
from openpype.hosts.maya.api.lib import extract_alembic
from openpype.pipeline import publish
from openpype.lib import StringTemplate
class ExtractWorkfileXgen(publish.Extractor):
"""Extract Workfile Xgen.
When submitting a render, we need to prep Xgen side car files.
"""
# Offset to run before workfile scene save.
order = pyblish.api.ExtractorOrder - 0.499
label = "Extract Workfile Xgen"
families = ["workfile"]
hosts = ["maya"]
def get_render_max_frame_range(self, context):
"""Return start to end frame range including all renderlayers in
context.
This will return the full frame range which includes all frames of the
renderlayer instances to be published/submitted.
Args:
context (pyblish.api.Context): Current publishing context.
Returns:
tuple or None: Start frame, end frame tuple if any renderlayers
found. Otherwise None is returned.
"""
def _is_active_renderlayer(i):
"""Return whether instance is active renderlayer"""
if not i.data.get("publish", True):
return False
is_renderlayer = (
"renderlayer" in i.data.get("families", []) or
i.data["family"] == "renderlayer"
)
return is_renderlayer
start_frame = None
end_frame = None
for instance in context:
if not _is_active_renderlayer(instance):
# Only consider renderlyare instances
continue
render_start_frame = instance.data["frameStart"]
render_end_frame = instance.data["frameStart"]
if start_frame is None:
start_frame = render_start_frame
else:
start_frame = min(start_frame, render_start_frame)
if end_frame is None:
end_frame = render_end_frame
else:
end_frame = max(end_frame, render_end_frame)
if start_frame is None or end_frame is None:
return
return start_frame, end_frame
def process(self, instance):
transfers = []
# Validate there is any palettes in the scene.
if not cmds.ls(type="xgmPalette"):
self.log.debug(
"No collections found in the scene. Skipping Xgen extraction."
)
return
import xgenm
# Validate to extract only when we are publishing a renderlayer as
# well.
render_range = self.get_render_max_frame_range(instance.context)
if not render_range:
self.log.debug(
"No publishable renderlayers found in context. Skipping Xgen"
" extraction."
)
return
start_frame, end_frame = render_range
# We decrement start frame and increment end frame so motion blur will
# render correctly.
start_frame -= 1
end_frame += 1
# Extract patches alembic.
path_no_ext, _ = os.path.splitext(instance.context.data["currentFile"])
kwargs = {"attrPrefix": ["xgen"], "stripNamespaces": True}
alembic_files = []
for palette in cmds.ls(type="xgmPalette"):
patch_names = []
for description in xgenm.descriptions(palette):
for name in xgenm.boundGeometry(palette, description):
patch_names.append(name)
alembic_file = "{}__{}.abc".format(
path_no_ext, palette.replace(":", "__ns__")
)
extract_alembic(
alembic_file,
root=patch_names,
selection=False,
startFrame=float(start_frame),
endFrame=float(end_frame),
verbose=True,
**kwargs
)
alembic_files.append(alembic_file)
template_data = copy.deepcopy(instance.data["anatomyData"])
published_maya_path = StringTemplate(
instance.context.data["anatomy"].templates["publish"]["file"]
).format(template_data)
published_basename, _ = os.path.splitext(published_maya_path)
for source in alembic_files:
destination = os.path.join(
os.path.dirname(instance.data["resourcesDir"]),
os.path.basename(
source.replace(path_no_ext, published_basename)
)
)
transfers.append((source, destination))
# Validate that we are using the published workfile.
deadline_settings = instance.context.get("deadline")
if deadline_settings:
publish_settings = deadline_settings["publish"]
if not publish_settings["MayaSubmitDeadline"]["use_published"]:
self.log.debug(
"Not using the published workfile. Abort Xgen extraction."
)
return
# Collect Xgen and Delta files.
xgen_files = []
sources = []
current_dir = os.path.dirname(instance.context.data["currentFile"])
attrs = ["xgFileName", "xgBaseFile"]
for palette in cmds.ls(type="xgmPalette"):
for attr in attrs:
source = os.path.join(
current_dir, cmds.getAttr(palette + "." + attr)
)
if not os.path.exists(source):
continue
ext = os.path.splitext(source)[1]
if ext == ".xgen":
xgen_files.append(source)
if ext == ".xgd":
sources.append(source)
# Copy .xgen file to temporary location and modify.
staging_dir = self.staging_dir(instance)
for source in xgen_files:
destination = os.path.join(staging_dir, os.path.basename(source))
shutil.copy(source, destination)
lines = []
with open(destination, "r") as f:
for line in [line.rstrip() for line in f]:
if line.startswith("\txgProjectPath"):
path = os.path.dirname(instance.data["resourcesDir"])
line = "\txgProjectPath\t\t{}/".format(
path.replace("\\", "/")
)
lines.append(line)
with open(destination, "w") as f:
f.write("\n".join(lines))
sources.append(destination)
# Add resource files to workfile instance.
for source in sources:
basename = os.path.basename(source)
destination = os.path.join(
os.path.dirname(instance.data["resourcesDir"]), basename
)
transfers.append((source, destination))
destination_dir = os.path.join(
instance.data["resourcesDir"], "collections"
)
for palette in cmds.ls(type="xgmPalette"):
project_path = xgenm.getAttr("xgProjectPath", palette)
data_path = xgenm.getAttr("xgDataPath", palette)
data_path = data_path.replace("${PROJECT}", project_path)
for path in data_path.split(";"):
for root, _, files in os.walk(path):
for f in files:
source = os.path.join(root, f)
destination = "{}/{}{}".format(
destination_dir,
palette.replace(":", "__ns__"),
source.replace(path, "")
)
transfers.append((source, destination))
for source, destination in transfers:
self.log.debug("Transfer: {} > {}".format(source, destination))
instance.data["transfers"] = transfers
# Set palette attributes in preparation for workfile publish.
attrs = {"xgFileName": None, "xgBaseFile": ""}
data = {}
for palette in cmds.ls(type="xgmPalette"):
attrs["xgFileName"] = "resources/{}.xgen".format(
palette.replace(":", "__ns__")
)
for attr, value in attrs.items():
node_attr = palette + "." + attr
old_value = cmds.getAttr(node_attr)
try:
data[palette][attr] = old_value
except KeyError:
data[palette] = {attr: old_value}
cmds.setAttr(node_attr, value, type="string")
self.log.info(
"Setting \"{}\" on \"{}\"".format(value, node_attr)
)
cmds.setAttr(palette + "." + "xgExportAsDelta", False)
instance.data["xgenAttributes"] = data

View file

@ -0,0 +1,142 @@
import os
import copy
import tempfile
from maya import cmds
import xgenm
from openpype.pipeline import publish
from openpype.hosts.maya.api.lib import (
maintained_selection, attribute_values, write_xgen_file, delete_after
)
from openpype.lib import StringTemplate
class ExtractXgen(publish.Extractor):
"""Extract Xgen
Workflow:
- Duplicate nodes used for patches.
- Export palette and import onto duplicate nodes.
- Export/Publish duplicate nodes and palette.
- Export duplicate palette to .xgen file and add to publish.
- Publish all xgen files as resources.
"""
label = "Extract Xgen"
hosts = ["maya"]
families = ["xgen"]
scene_type = "ma"
def process(self, instance):
if "representations" not in instance.data:
instance.data["representations"] = []
staging_dir = self.staging_dir(instance)
maya_filename = "{}.{}".format(instance.data["name"], self.scene_type)
maya_filepath = os.path.join(staging_dir, maya_filename)
# Get published xgen file name.
template_data = copy.deepcopy(instance.data["anatomyData"])
template_data.update({"ext": "xgen"})
templates = instance.context.data["anatomy"].templates["publish"]
xgen_filename = StringTemplate(templates["file"]).format(template_data)
xgen_path = os.path.join(
self.staging_dir(instance), xgen_filename
).replace("\\", "/")
type = "mayaAscii" if self.scene_type == "ma" else "mayaBinary"
# Duplicate xgen setup.
with delete_after() as delete_bin:
duplicate_nodes = []
# Collect nodes to export.
for _, connections in instance.data["xgenConnections"].items():
transform_name = connections["transform"].split(".")[0]
# Duplicate_transform subd patch geometry.
duplicate_transform = cmds.duplicate(transform_name)[0]
delete_bin.append(duplicate_transform)
# Discard the children.
shapes = cmds.listRelatives(duplicate_transform, shapes=True)
children = cmds.listRelatives(
duplicate_transform, children=True
)
cmds.delete(set(children) - set(shapes))
duplicate_transform = cmds.parent(
duplicate_transform, world=True
)[0]
duplicate_nodes.append(duplicate_transform)
# Export temp xgen palette files.
temp_xgen_path = os.path.join(
tempfile.gettempdir(), "temp.xgen"
).replace("\\", "/")
xgenm.exportPalette(
instance.data["xgmPalette"].replace("|", ""), temp_xgen_path
)
self.log.info("Extracted to {}".format(temp_xgen_path))
# Import xgen onto the duplicate.
with maintained_selection():
cmds.select(duplicate_nodes)
palette = xgenm.importPalette(temp_xgen_path, [])
delete_bin.append(palette)
# Export duplicated palettes.
xgenm.exportPalette(palette, xgen_path)
# Export Maya file.
attribute_data = {"{}.xgFileName".format(palette): xgen_filename}
with attribute_values(attribute_data):
with maintained_selection():
cmds.select(duplicate_nodes + [palette])
cmds.file(
maya_filepath,
force=True,
type=type,
exportSelected=True,
preserveReferences=False,
constructionHistory=True,
shader=True,
constraints=True,
expressions=True
)
self.log.info("Extracted to {}".format(maya_filepath))
if os.path.exists(temp_xgen_path):
os.remove(temp_xgen_path)
data = {
"xgDataPath": os.path.join(
instance.data["resourcesDir"],
"collections",
palette.replace(":", "__ns__")
).replace("\\", "/"),
"xgProjectPath": os.path.dirname(
instance.data["resourcesDir"]
).replace("\\", "/")
}
write_xgen_file(data, xgen_path)
# Adding representations.
representation = {
"name": "xgen",
"ext": "xgen",
"files": xgen_filename,
"stagingDir": staging_dir,
}
instance.data["representations"].append(representation)
representation = {
"name": self.scene_type,
"ext": self.scene_type,
"files": maya_filename,
"stagingDir": staging_dir
}
instance.data["representations"].append(representation)

View file

@ -1,64 +0,0 @@
import os
from maya import cmds
from openpype.pipeline import publish
from openpype.hosts.maya.api.lib import (
suspended_refresh,
maintained_selection
)
class ExtractXgenCache(publish.Extractor):
"""Produce an alembic of just xgen interactive groom
"""
label = "Extract Xgen ABC Cache"
hosts = ["maya"]
families = ["xgen"]
optional = True
def process(self, instance):
# Collect the out set nodes
out_descriptions = [node for node in instance
if cmds.nodeType(node) == "xgmSplineDescription"]
start = 1
end = 1
self.log.info("Extracting Xgen Cache..")
dirname = self.staging_dir(instance)
parent_dir = self.staging_dir(instance)
filename = "{name}.abc".format(**instance.data)
path = os.path.join(parent_dir, filename)
with suspended_refresh():
with maintained_selection():
command = (
'-file '
+ path
+ ' -df "ogawa" -fr '
+ str(start)
+ ' '
+ str(end)
+ ' -step 1 -mxf -wfw'
)
for desc in out_descriptions:
command += (" -obj " + desc)
cmds.xgmSplineCache(export=True, j=command)
if "representations" not in instance.data:
instance.data["representations"] = []
representation = {
'name': 'abc',
'ext': 'abc',
'files': filename,
"stagingDir": dirname,
}
instance.data["representations"].append(representation)
self.log.info("Extracted {} to {}".format(instance, dirname))

View file

@ -0,0 +1,36 @@
from maya import cmds
import pyblish.api
class ResetXgenAttributes(pyblish.api.InstancePlugin):
"""Reset Xgen attributes.
When the incremental save of the workfile triggers, the Xgen attributes
changes so this plugin will change it back to the values before publishing.
"""
label = "Reset Xgen Attributes."
# Offset to run after workfile increment plugin.
order = pyblish.api.IntegratorOrder + 10.0
families = ["workfile"]
def process(self, instance):
xgen_attributes = instance.data.get("xgenAttributes", {})
if not xgen_attributes:
return
for palette, data in xgen_attributes.items():
for attr, value in data.items():
node_attr = "{}.{}".format(palette, attr)
self.log.info(
"Setting \"{}\" on \"{}\"".format(value, node_attr)
)
cmds.setAttr(node_attr, value, type="string")
cmds.setAttr(palette + ".xgExportAsDelta", True)
# Need to save the scene, cause the attribute changes above does not
# mark the scene as modified so user can exit without commiting the
# changes.
self.log.info("Saving changes.")
cmds.file(save=True)

View file

@ -0,0 +1,106 @@
import maya.cmds as cmds
import pyblish.api
from openpype.pipeline.publish import (
ValidateContentsOrder, PublishValidationError
)
class ValidateArnoldSceneSource(pyblish.api.InstancePlugin):
"""Validate Arnold Scene Source.
We require at least 1 root node/parent for the meshes. This is to ensure we
can duplicate the nodes and preserve the names.
If using proxies we need the nodes to share the same names and not be
parent to the world. This ends up needing at least two groups with content
nodes and proxy nodes in another.
"""
order = ValidateContentsOrder
hosts = ["maya"]
families = ["ass"]
label = "Validate Arnold Scene Source"
def _get_nodes_data(self, nodes):
ungrouped_nodes = []
nodes_by_name = {}
parents = []
for node in nodes:
node_split = node.split("|")
if len(node_split) == 2:
ungrouped_nodes.append(node)
parent = "|".join(node_split[:-1])
if parent:
parents.append(parent)
nodes_by_name[node_split[-1]] = node
for shape in cmds.listRelatives(node, shapes=True):
nodes_by_name[shape.split("|")[-1]] = shape
return ungrouped_nodes, nodes_by_name, parents
def process(self, instance):
ungrouped_nodes = []
nodes, content_nodes_by_name, content_parents = self._get_nodes_data(
instance.data["setMembers"]
)
ungrouped_nodes.extend(nodes)
nodes, proxy_nodes_by_name, proxy_parents = self._get_nodes_data(
instance.data.get("proxy", [])
)
ungrouped_nodes.extend(nodes)
# Validate against nodes directly parented to world.
if ungrouped_nodes:
raise PublishValidationError(
"Found nodes parented to the world: {}\n"
"All nodes need to be grouped.".format(ungrouped_nodes)
)
# Proxy validation.
if not instance.data.get("proxy", []):
return
# Validate for content and proxy nodes amount being the same.
if len(instance.data["setMembers"]) != len(instance.data["proxy"]):
raise PublishValidationError(
"Amount of content nodes ({}) and proxy nodes ({}) needs to "
"be the same.".format(
len(instance.data["setMembers"]),
len(instance.data["proxy"])
)
)
# Validate against content and proxy nodes sharing same parent.
if list(set(content_parents) & set(proxy_parents)):
raise PublishValidationError(
"Content and proxy nodes cannot share the same parent."
)
# Validate for content and proxy nodes sharing same names.
sorted_content_names = sorted(content_nodes_by_name.keys())
sorted_proxy_names = sorted(proxy_nodes_by_name.keys())
odd_content_names = list(
set(sorted_content_names) - set(sorted_proxy_names)
)
odd_content_nodes = [
content_nodes_by_name[x] for x in odd_content_names
]
odd_proxy_names = list(
set(sorted_proxy_names) - set(sorted_content_names)
)
odd_proxy_nodes = [
proxy_nodes_by_name[x] for x in odd_proxy_names
]
if not sorted_content_names == sorted_proxy_names:
raise PublishValidationError(
"Content and proxy nodes need to share the same names.\n"
"Content nodes not matching: {}\n"
"Proxy nodes not matching: {}".format(
odd_content_nodes, odd_proxy_nodes
)
)

View file

@ -2,11 +2,13 @@ import os
import types
import maya.cmds as cmds
from mtoa.core import createOptions
import pyblish.api
from openpype.pipeline.publish import (
RepairAction,
ValidateContentsOrder,
PublishValidationError
)
@ -34,8 +36,9 @@ class ValidateAssRelativePaths(pyblish.api.InstancePlugin):
"defaultArnoldRenderOptions.pspath"
)
except ValueError:
assert False, ("Can not validate, render setting were not opened "
"yet so Arnold setting cannot be validate")
raise PublishValidationError(
"Default Arnold options has not been created yet."
)
scene_dir, scene_basename = os.path.split(cmds.file(q=True, loc=True))
scene_name, _ = os.path.splitext(scene_basename)
@ -66,6 +69,8 @@ class ValidateAssRelativePaths(pyblish.api.InstancePlugin):
@classmethod
def repair(cls, instance):
createOptions()
texture_path = cmds.getAttr("defaultArnoldRenderOptions.tspath")
procedural_path = cmds.getAttr("defaultArnoldRenderOptions.pspath")

View file

@ -58,23 +58,23 @@ class ValidateAttributes(pyblish.api.ContextPlugin):
# Filter families.
families = [instance.data["family"]]
families += instance.data.get("families", [])
families = list(set(families) & set(self.attributes.keys()))
families = list(set(families) & set(cls.attributes.keys()))
if not families:
continue
# Get all attributes to validate.
attributes = {}
for family in families:
for preset in self.attributes[family]:
for preset in cls.attributes[family]:
[node_name, attribute_name] = preset.split(".")
try:
attributes[node_name].update(
{attribute_name: self.attributes[family][preset]}
{attribute_name: cls.attributes[family][preset]}
)
except KeyError:
attributes.update({
node_name: {
attribute_name: self.attributes[family][preset]
attribute_name: cls.attributes[family][preset]
}
})

View file

@ -19,7 +19,6 @@ class ValidateColorSets(pyblish.api.Validator):
order = ValidateMeshOrder
hosts = ['maya']
families = ['model']
category = 'geometry'
label = 'Mesh ColorSets'
actions = [openpype.hosts.maya.api.action.SelectInvalidAction,
RepairAction]

View file

@ -11,10 +11,6 @@ from openpype.pipeline.publish import (
)
def float_round(num, places=0, direction=ceil):
return direction(num * (10**places)) / float(10**places)
class ValidateMayaUnits(pyblish.api.ContextPlugin):
"""Check if the Maya units are set correct"""
@ -36,18 +32,12 @@ class ValidateMayaUnits(pyblish.api.ContextPlugin):
# Collected units
linearunits = context.data.get('linearUnits')
angularunits = context.data.get('angularUnits')
# TODO(antirotor): This is hack as for framerates having multiple
# decimal places. FTrack is ceiling decimal values on
# fps to two decimal places but Maya 2019+ is reporting those fps
# with much higher resolution. As we currently cannot fix Ftrack
# rounding, we have to round those numbers coming from Maya.
# NOTE: this must be revisited yet again as it seems that Ftrack is
# now flooring the value?
fps = float_round(context.data.get('fps'), 2, ceil)
fps = context.data.get('fps')
# TODO repace query with using 'context.data["assetEntity"]'
asset_doc = get_current_project_asset()
asset_fps = asset_doc["data"]["fps"]
asset_fps = mayalib.convert_to_maya_fps(asset_doc["data"]["fps"])
self.log.info('Units (linear): {0}'.format(linearunits))
self.log.info('Units (angular): {0}'.format(angularunits))

View file

@ -19,7 +19,6 @@ class ValidateMeshArnoldAttributes(pyblish.api.InstancePlugin):
order = ValidateMeshOrder
hosts = ["maya"]
families = ["model"]
category = "geometry"
label = "Mesh Arnold Attributes"
actions = [
openpype.hosts.maya.api.action.SelectInvalidAction,

View file

@ -48,7 +48,6 @@ class ValidateMeshHasUVs(pyblish.api.InstancePlugin):
order = ValidateMeshOrder
hosts = ['maya']
families = ['model']
category = 'geometry'
label = 'Mesh Has UVs'
actions = [openpype.hosts.maya.api.action.SelectInvalidAction]
optional = True

View file

@ -15,8 +15,6 @@ class ValidateMeshLaminaFaces(pyblish.api.InstancePlugin):
order = ValidateMeshOrder
hosts = ['maya']
families = ['model']
category = 'geometry'
version = (0, 1, 0)
label = 'Mesh Lamina Faces'
actions = [openpype.hosts.maya.api.action.SelectInvalidAction]

View file

@ -19,8 +19,6 @@ class ValidateMeshNonZeroEdgeLength(pyblish.api.InstancePlugin):
order = ValidateMeshOrder
families = ['model']
hosts = ['maya']
category = 'geometry'
version = (0, 1, 0)
label = 'Mesh Edge Length Non Zero'
actions = [openpype.hosts.maya.api.action.SelectInvalidAction]
optional = True

View file

@ -20,8 +20,6 @@ class ValidateMeshNormalsUnlocked(pyblish.api.Validator):
order = ValidateMeshOrder
hosts = ['maya']
families = ['model']
category = 'geometry'
version = (0, 1, 0)
label = 'Mesh Normals Unlocked'
actions = [openpype.hosts.maya.api.action.SelectInvalidAction,
RepairAction]

View file

@ -235,7 +235,6 @@ class ValidateMeshHasOverlappingUVs(pyblish.api.InstancePlugin):
order = ValidateMeshOrder
hosts = ['maya']
families = ['model']
category = 'geometry'
label = 'Mesh Has Overlapping UVs'
actions = [openpype.hosts.maya.api.action.SelectInvalidAction]
optional = True

View file

@ -21,9 +21,7 @@ class ValidateMeshSingleUVSet(pyblish.api.InstancePlugin):
order = ValidateMeshOrder
hosts = ['maya']
families = ['model', 'pointcache']
category = 'uv'
optional = True
version = (0, 1, 0)
label = "Mesh Single UV Set"
actions = [openpype.hosts.maya.api.action.SelectInvalidAction,
RepairAction]

View file

@ -63,7 +63,6 @@ class ValidateMeshVerticesHaveEdges(pyblish.api.InstancePlugin):
order = ValidateMeshOrder
hosts = ['maya']
families = ['model']
category = 'geometry'
label = 'Mesh Vertices Have Edges'
actions = [openpype.hosts.maya.api.action.SelectInvalidAction,
RepairAction]

View file

@ -16,7 +16,6 @@ class ValidateNoDefaultCameras(pyblish.api.InstancePlugin):
order = ValidateContentsOrder
hosts = ['maya']
families = ['camera']
version = (0, 1, 0)
label = "No Default Cameras"
actions = [openpype.hosts.maya.api.action.SelectInvalidAction]

View file

@ -23,8 +23,6 @@ class ValidateNoNamespace(pyblish.api.InstancePlugin):
order = ValidateContentsOrder
hosts = ['maya']
families = ['model']
category = 'cleanup'
version = (0, 1, 0)
label = 'No Namespaces'
actions = [openpype.hosts.maya.api.action.SelectInvalidAction,
RepairAction]

View file

@ -43,8 +43,6 @@ class ValidateNoNullTransforms(pyblish.api.InstancePlugin):
order = ValidateContentsOrder
hosts = ['maya']
families = ['model']
category = 'cleanup'
version = (0, 1, 0)
label = 'No Empty/Null Transforms'
actions = [RepairAction,
openpype.hosts.maya.api.action.SelectInvalidAction]

View file

@ -24,7 +24,6 @@ class ValidateRigJointsHidden(pyblish.api.InstancePlugin):
order = ValidateContentsOrder
hosts = ['maya']
families = ['rig']
version = (0, 1, 0)
label = "Joints Hidden"
actions = [openpype.hosts.maya.api.action.SelectInvalidAction,
RepairAction]

View file

@ -31,8 +31,6 @@ class ValidateSceneSetWorkspace(pyblish.api.ContextPlugin):
order = ValidatePipelineOrder
hosts = ['maya']
category = 'scene'
version = (0, 1, 0)
label = 'Maya Workspace Set'
def process(self, context):

View file

@ -38,9 +38,7 @@ class ValidateShapeDefaultNames(pyblish.api.InstancePlugin):
order = ValidateContentsOrder
hosts = ['maya']
families = ['model']
category = 'cleanup'
optional = True
version = (0, 1, 0)
label = "Shape Default Naming"
actions = [openpype.hosts.maya.api.action.SelectInvalidAction,
RepairAction]

View file

@ -32,9 +32,7 @@ class ValidateTransformNamingSuffix(pyblish.api.InstancePlugin):
order = ValidateContentsOrder
hosts = ['maya']
families = ['model']
category = 'cleanup'
optional = True
version = (0, 1, 0)
label = 'Suffix Naming Conventions'
actions = [openpype.hosts.maya.api.action.SelectInvalidAction]
SUFFIX_NAMING_TABLE = {"mesh": ["_GEO", "_GES", "_GEP", "_OSD"],

View file

@ -18,8 +18,6 @@ class ValidateTransformZero(pyblish.api.Validator):
order = ValidateContentsOrder
hosts = ["maya"]
families = ["model"]
category = "geometry"
version = (0, 1, 0)
label = "Transform Zero (Freeze)"
actions = [openpype.hosts.maya.api.action.SelectInvalidAction]

View file

@ -13,7 +13,6 @@ class ValidateUnrealMeshTriangulated(pyblish.api.InstancePlugin):
order = ValidateMeshOrder
hosts = ["maya"]
families = ["staticMesh"]
category = "geometry"
label = "Mesh is Triangulated"
actions = [openpype.hosts.maya.api.action.SelectInvalidAction]
active = False

View file

@ -0,0 +1,18 @@
from maya import cmds
import pyblish.api
from openpype.pipeline import PublishValidationError
class ValidateVray(pyblish.api.InstancePlugin):
"""Validate general Vray setup."""
order = pyblish.api.ValidatorOrder
label = 'VRay'
hosts = ["maya"]
families = ["vrayproxy"]
def process(self, instance):
# Validate vray plugin is loaded.
if not cmds.pluginInfo("vrayformaya", query=True, loaded=True):
raise PublishValidationError("Vray plugin is not loaded.")

View file

@ -0,0 +1,59 @@
import json
import maya.cmds as cmds
import xgenm
import pyblish.api
from openpype.pipeline.publish import PublishValidationError
class ValidateXgen(pyblish.api.InstancePlugin):
"""Validate Xgen data."""
label = "Validate Xgen"
order = pyblish.api.ValidatorOrder
host = ["maya"]
families = ["xgen"]
def process(self, instance):
set_members = instance.data.get("setMembers")
# Only 1 collection/node per instance.
if len(set_members) != 1:
raise PublishValidationError(
"Only one collection per instance is allowed."
" Found:\n{}".format(set_members)
)
# Only xgen palette node is allowed.
node_type = cmds.nodeType(set_members[0])
if node_type != "xgmPalette":
raise PublishValidationError(
"Only node of type \"xgmPalette\" are allowed. Referred to as"
" \"collection\" in the Maya UI."
" Node type found: {}".format(node_type)
)
# Cant have inactive modifiers in collection cause Xgen will try and
# look for them when loading.
palette = instance.data["xgmPalette"].replace("|", "")
inactive_modifiers = {}
for description in instance.data["xgmDescriptions"]:
description = description.split("|")[-2]
modifier_names = xgenm.fxModules(palette, description)
for name in modifier_names:
attr = xgenm.getAttr("active", palette, description, name)
# Attribute value are lowercase strings of false/true.
if attr == "false":
try:
inactive_modifiers[description].append(name)
except KeyError:
inactive_modifiers[description] = [name]
if inactive_modifiers:
raise PublishValidationError(
"There are inactive modifiers on the collection. "
"Please delete these:\n{}".format(
json.dumps(inactive_modifiers, indent=4, sort_keys=True)
)
)

View file

@ -1,16 +1,33 @@
import os
from functools import partial
from openpype.settings import get_project_settings
from openpype.pipeline import install_host
from openpype.hosts.maya.api import MayaHost
from maya import cmds
host = MayaHost()
install_host(host)
print("starting OpenPype usersetup")
print("Starting OpenPype usersetup...")
# build a shelf
# Open Workfile Post Initialization.
key = "OPENPYPE_OPEN_WORKFILE_POST_INITIALIZATION"
if bool(int(os.environ.get(key, "0"))):
cmds.evalDeferred(
partial(
cmds.file,
os.environ["AVALON_LAST_WORKFILE"],
open=True,
force=True
),
lowestPriority=True
)
# Build a shelf.
settings = get_project_settings(os.environ['AVALON_PROJECT'])
shelf_preset = settings['maya'].get('project_shelf')
@ -26,7 +43,10 @@ if shelf_preset:
print(import_string)
exec(import_string)
cmds.evalDeferred("mlib.shelf(name=shelf_preset['name'], iconPath=icon_path, preset=shelf_preset)")
cmds.evalDeferred(
"mlib.shelf(name=shelf_preset['name'], iconPath=icon_path,"
" preset=shelf_preset)"
)
print("finished OpenPype usersetup")
print("Finished OpenPype usersetup.")

View file

@ -53,12 +53,18 @@ class GizmoMenu():
item_type = item.get("sourcetype")
if item_type == ("python" or "file"):
if item_type == "python":
parent.addCommand(
item["title"],
command=str(item["command"]),
icon=item.get("icon"),
shortcut=item.get("hotkey")
shortcut=item.get("shortcut")
)
elif item_type == "file":
parent.addCommand(
item['title'],
"nuke.createNode('{}')".format(item.get('file_name')),
shortcut=item.get('shortcut')
)
# add separator

View file

@ -1,7 +1,7 @@
import os
import nuke
import pyblish.api
import openpype.api as api
from openpype.lib import get_version_from_path
import openpype.hosts.nuke.api as napi
from openpype.pipeline import KnownPublishError
@ -57,7 +57,7 @@ class CollectContextData(pyblish.api.ContextPlugin):
"fps": root_node['fps'].value(),
"currentFile": current_file,
"version": int(api.get_version_from_path(current_file)),
"version": int(get_version_from_path(current_file)),
"host": pyblish.api.current_host(),
"hostVersion": nuke.NUKE_VERSION_STRING

View file

@ -5,7 +5,7 @@ from openpype.lib import BoolDef
from openpype.pipeline import (
Creator,
CreatedInstance,
legacy_io
CreatorError
)
from openpype.lib import prepare_template_data
from openpype.pipeline.create import SUBSET_NAME_ALLOWED_SYMBOLS
@ -13,27 +13,16 @@ from openpype.hosts.photoshop.api.pipeline import cache_and_get_instances
class ImageCreator(Creator):
"""Creates image instance for publishing."""
"""Creates image instance for publishing.
Result of 'image' instance is image of all visible layers, or image(s) of
selected layers.
"""
identifier = "image"
label = "Image"
family = "image"
description = "Image creator"
def collect_instances(self):
for instance_data in cache_and_get_instances(self):
# legacy instances have family=='image'
creator_id = (instance_data.get("creator_identifier") or
instance_data.get("family"))
if creator_id == self.identifier:
instance_data = self._handle_legacy(instance_data)
layer = api.stub().get_layer(instance_data["members"][0])
instance_data["layer"] = layer
instance = CreatedInstance.from_existing(
instance_data, self
)
self._add_instance_to_context(instance)
def create(self, subset_name_from_ui, data, pre_create_data):
groups_to_create = []
top_layers_to_wrap = []
@ -59,9 +48,10 @@ class ImageCreator(Creator):
try:
group = stub.group_selected_layers(subset_name_from_ui)
except:
raise ValueError("Cannot group locked Bakcground layer!")
raise CreatorError("Cannot group locked Background layer!")
groups_to_create.append(group)
# create empty group if nothing selected
if not groups_to_create and not top_layers_to_wrap:
group = stub.create_group(subset_name_from_ui)
groups_to_create.append(group)
@ -73,13 +63,16 @@ class ImageCreator(Creator):
groups_to_create.append(group)
layer_name = ''
creating_multiple_groups = len(groups_to_create) > 1
# use artist chosen option OR force layer if more subsets are created
# to differentiate them
use_layer_name = (pre_create_data.get("use_layer_name") or
len(groups_to_create) > 1)
for group in groups_to_create:
subset_name = subset_name_from_ui # reset to name from creator UI
layer_names_in_hierarchy = []
created_group_name = self._clean_highlights(stub, group.name)
if creating_multiple_groups:
if use_layer_name:
layer_name = re.sub(
"[^{}]+".format(SUBSET_NAME_ALLOWED_SYMBOLS),
"",
@ -112,6 +105,21 @@ class ImageCreator(Creator):
stub.rename_layer(group.id,
stub.PUBLISH_ICON + created_group_name)
def collect_instances(self):
for instance_data in cache_and_get_instances(self):
# legacy instances have family=='image'
creator_id = (instance_data.get("creator_identifier") or
instance_data.get("family"))
if creator_id == self.identifier:
instance_data = self._handle_legacy(instance_data)
layer = api.stub().get_layer(instance_data["members"][0])
instance_data["layer"] = layer
instance = CreatedInstance.from_existing(
instance_data, self
)
self._add_instance_to_context(instance)
def update_instances(self, update_list):
self.log.debug("update_list:: {}".format(update_list))
for created_inst, _changes in update_list:
@ -137,12 +145,42 @@ class ImageCreator(Creator):
label="Create only for selected"),
BoolDef("create_multiple",
default=True,
label="Create separate instance for each selected")
label="Create separate instance for each selected"),
BoolDef("use_layer_name",
default=False,
label="Use layer name in subset")
]
return output
def get_detail_description(self):
return """Creator for Image instances"""
return """Creator for Image instances
Main publishable item in Photoshop will be of `image` family. Result of
this item (instance) is picture that could be loaded and used
in another DCCs (for example as single layer in composition in
AfterEffects, reference in Maya etc).
There are couple of options what to publish:
- separate image per selected layer (or group of layers)
- one image for all selected layers
- all visible layers (groups) flattened into single image
In most cases you would like to keep `Create only for selected`
toggled on and select what you would like to publish.
Toggling this option off will allow you to create instance for all
visible layers without a need to select them explicitly.
Use 'Create separate instance for each selected' to create separate
images per selected layer (group of layers).
'Use layer name in subset' will explicitly add layer name into subset
name. Position of this name is configurable in
`project_settings/global/tools/creator/subset_name_profiles`.
If layer placeholder ({layer}) is not used in `subset_name_profiles`
but layer name should be used (set explicitly in UI or implicitly if
multiple images should be created), it is added in capitalized form
as a suffix to subset name.
"""
def _handle_legacy(self, instance_data):
"""Converts old instances to new format."""
@ -155,7 +193,7 @@ class ImageCreator(Creator):
instance_data.pop("uuid")
if not instance_data.get("task"):
instance_data["task"] = legacy_io.Session.get("AVALON_TASK")
instance_data["task"] = self.create_context.get_current_task_name()
if not instance_data.get("variant"):
instance_data["variant"] = ''

View file

@ -2,8 +2,7 @@ import openpype.hosts.photoshop.api as api
from openpype.client import get_asset_by_name
from openpype.pipeline import (
AutoCreator,
CreatedInstance,
legacy_io
CreatedInstance
)
from openpype.hosts.photoshop.api.pipeline import cache_and_get_instances
@ -38,10 +37,11 @@ class PSWorkfileCreator(AutoCreator):
existing_instance = instance
break
project_name = legacy_io.Session["AVALON_PROJECT"]
asset_name = legacy_io.Session["AVALON_ASSET"]
task_name = legacy_io.Session["AVALON_TASK"]
host_name = legacy_io.Session["AVALON_APP"]
context = self.create_context
project_name = context.get_current_project_name()
asset_name = context.get_current_asset_name()
task_name = context.get_current_task_name()
host_name = context.host_name
if existing_instance is None:
asset_doc = get_asset_by_name(project_name, asset_name)
subset_name = self.get_subset_name(

View file

@ -37,7 +37,7 @@ class TrayPublisherHost(HostBase, IPublishHost):
return HostContext.get_context_data()
def update_context_data(self, data, changes):
HostContext.save_context_data(data, changes)
HostContext.save_context_data(data)
def set_project_name(self, project_name):
# TODO Deregister project specific plugins and register new project

View file

@ -33,6 +33,8 @@ class BatchMovieCreator(TrayPublishCreator):
create_allow_context_change = False
version_regex = re.compile(r"^(.+)_v([0-9]+)$")
# Position batch creator after simple creators
order = 110
def __init__(self, project_settings, *args, **kwargs):
super(BatchMovieCreator, self).__init__(project_settings,

View file

@ -30,7 +30,7 @@ from .vendor_bin_utils import (
)
from .attribute_definitions import (
AbtractAttrDef,
AbstractAttrDef,
UIDef,
UISeparatorDef,
@ -82,9 +82,6 @@ from .mongo import (
validate_mongo_connection,
OpenPypeMongoConnection
)
from .anatomy import (
Anatomy
)
from .dateutils import (
get_datetime_data,
@ -119,36 +116,19 @@ from .transcoding import (
)
from .avalon_context import (
CURRENT_DOC_SCHEMAS,
PROJECT_NAME_ALLOWED_SYMBOLS,
PROJECT_NAME_REGEX,
create_project,
is_latest,
any_outdated,
get_asset,
get_linked_assets,
get_latest_version,
get_system_general_anatomy_data,
get_workfile_template_key,
get_workfile_template_key_from_context,
get_workdir_data,
get_workdir,
get_workdir_with_workdir_data,
get_last_workfile_with_version,
get_last_workfile,
create_workfile_doc,
save_workfile_data_to_doc,
get_workfile_doc,
BuildWorkfile,
get_creator_by_name,
get_custom_workfile_template,
change_timer_to_current_context,
get_custom_workfile_template_by_context,
get_custom_workfile_template_by_string_context,
get_custom_workfile_template
@ -186,8 +166,6 @@ from .plugin_tools import (
get_subset_name,
get_subset_name_with_asset_doc,
prepare_template_data,
filter_pyblish_plugins,
set_plugin_attributes_from_settings,
source_hash,
)
@ -246,7 +224,7 @@ __all__ = [
"get_ffmpeg_tool_path",
"is_oiio_supported",
"AbtractAttrDef",
"AbstractAttrDef",
"UIDef",
"UISeparatorDef",
@ -278,34 +256,17 @@ __all__ = [
"convert_ffprobe_fps_to_float",
"CURRENT_DOC_SCHEMAS",
"PROJECT_NAME_ALLOWED_SYMBOLS",
"PROJECT_NAME_REGEX",
"create_project",
"is_latest",
"any_outdated",
"get_asset",
"get_linked_assets",
"get_latest_version",
"get_system_general_anatomy_data",
"get_workfile_template_key",
"get_workfile_template_key_from_context",
"get_workdir_data",
"get_workdir",
"get_workdir_with_workdir_data",
"get_last_workfile_with_version",
"get_last_workfile",
"create_workfile_doc",
"save_workfile_data_to_doc",
"get_workfile_doc",
"BuildWorkfile",
"get_creator_by_name",
"change_timer_to_current_context",
"get_custom_workfile_template_by_context",
"get_custom_workfile_template_by_string_context",
"get_custom_workfile_template",
@ -338,8 +299,6 @@ __all__ = [
"TaskNotSetError",
"get_subset_name",
"get_subset_name_with_asset_doc",
"filter_pyblish_plugins",
"set_plugin_attributes_from_settings",
"source_hash",
"format_file_size",
@ -358,8 +317,6 @@ __all__ = [
"terminal",
"Anatomy",
"get_datetime_data",
"get_formatted_current_time",

View file

@ -1,38 +0,0 @@
"""Code related to project Anatomy was moved
to 'openpype.pipeline.anatomy' please change your imports as soon as
possible. File will be probably removed in OpenPype 3.14.*
"""
import warnings
import functools
class AnatomyDeprecatedWarning(DeprecationWarning):
pass
def anatomy_deprecated(func):
"""Mark functions as deprecated.
It will result in a warning being emitted when the function is used.
"""
@functools.wraps(func)
def new_func(*args, **kwargs):
warnings.simplefilter("always", AnatomyDeprecatedWarning)
warnings.warn(
(
"Deprecated import of 'Anatomy'."
" Class was moved to 'openpype.pipeline.anatomy'."
" Please change your imports of Anatomy in codebase."
),
category=AnatomyDeprecatedWarning
)
return func(*args, **kwargs)
return new_func
@anatomy_deprecated
def Anatomy(*args, **kwargs):
from openpype.pipeline.anatomy import Anatomy
return Anatomy(*args, **kwargs)

View file

@ -20,7 +20,7 @@ def register_attr_def_class(cls):
Currently are registered definitions used to deserialize data to objects.
Attrs:
cls (AbtractAttrDef): Non-abstract class to be registered with unique
cls (AbstractAttrDef): Non-abstract class to be registered with unique
'type' attribute.
Raises:
@ -36,7 +36,7 @@ def get_attributes_keys(attribute_definitions):
"""Collect keys from list of attribute definitions.
Args:
attribute_definitions (List[AbtractAttrDef]): Objects of attribute
attribute_definitions (List[AbstractAttrDef]): Objects of attribute
definitions.
Returns:
@ -57,8 +57,8 @@ def get_default_values(attribute_definitions):
"""Receive default values for attribute definitions.
Args:
attribute_definitions (List[AbtractAttrDef]): Attribute definitions for
which default values should be collected.
attribute_definitions (List[AbstractAttrDef]): Attribute definitions
for which default values should be collected.
Returns:
Dict[str, Any]: Default values for passet attribute definitions.
@ -76,15 +76,15 @@ def get_default_values(attribute_definitions):
class AbstractAttrDefMeta(ABCMeta):
"""Meta class to validate existence of 'key' attribute.
"""Metaclass to validate existence of 'key' attribute.
Each object of `AbtractAttrDef` mus have defined 'key' attribute.
Each object of `AbstractAttrDef` mus have defined 'key' attribute.
"""
def __call__(self, *args, **kwargs):
obj = super(AbstractAttrDefMeta, self).__call__(*args, **kwargs)
init_class = getattr(obj, "__init__class__", None)
if init_class is not AbtractAttrDef:
if init_class is not AbstractAttrDef:
raise TypeError("{} super was not called in __init__.".format(
type(obj)
))
@ -92,7 +92,7 @@ class AbstractAttrDefMeta(ABCMeta):
@six.add_metaclass(AbstractAttrDefMeta)
class AbtractAttrDef(object):
class AbstractAttrDef(object):
"""Abstraction of attribute definiton.
Each attribute definition must have implemented validation and
@ -145,7 +145,7 @@ class AbtractAttrDef(object):
self.disabled = disabled
self._id = uuid.uuid4().hex
self.__init__class__ = AbtractAttrDef
self.__init__class__ = AbstractAttrDef
@property
def id(self):
@ -154,7 +154,15 @@ class AbtractAttrDef(object):
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
return self.key == other.key
return (
self.key == other.key
and self.hidden == other.hidden
and self.default == other.default
and self.disabled == other.disabled
)
def __ne__(self, other):
return not self.__eq__(other)
@abstractproperty
def type(self):
@ -212,7 +220,7 @@ class AbtractAttrDef(object):
# UI attribute definitoins won't hold value
# -----------------------------------------
class UIDef(AbtractAttrDef):
class UIDef(AbstractAttrDef):
is_value_def = False
def __init__(self, key=None, default=None, *args, **kwargs):
@ -237,7 +245,7 @@ class UILabelDef(UIDef):
# Attribute defintioins should hold value
# ---------------------------------------
class UnknownDef(AbtractAttrDef):
class UnknownDef(AbstractAttrDef):
"""Definition is not known because definition is not available.
This attribute can be used to keep existing data unchanged but does not
@ -254,7 +262,7 @@ class UnknownDef(AbtractAttrDef):
return value
class HiddenDef(AbtractAttrDef):
class HiddenDef(AbstractAttrDef):
"""Hidden value of Any type.
This attribute can be used for UI purposes to pass values related
@ -274,7 +282,7 @@ class HiddenDef(AbtractAttrDef):
return value
class NumberDef(AbtractAttrDef):
class NumberDef(AbstractAttrDef):
"""Number definition.
Number can have defined minimum/maximum value and decimal points. Value
@ -350,7 +358,7 @@ class NumberDef(AbtractAttrDef):
return round(float(value), self.decimals)
class TextDef(AbtractAttrDef):
class TextDef(AbstractAttrDef):
"""Text definition.
Text can have multiline option so endline characters are allowed regex
@ -415,7 +423,7 @@ class TextDef(AbtractAttrDef):
return data
class EnumDef(AbtractAttrDef):
class EnumDef(AbstractAttrDef):
"""Enumeration of single item from items.
Args:
@ -457,7 +465,7 @@ class EnumDef(AbtractAttrDef):
return self.default
def serialize(self):
data = super(TextDef, self).serialize()
data = super(EnumDef, self).serialize()
data["items"] = copy.deepcopy(self.items)
return data
@ -523,7 +531,8 @@ class EnumDef(AbtractAttrDef):
return output
class BoolDef(AbtractAttrDef):
class BoolDef(AbstractAttrDef):
"""Boolean representation.
Args:
@ -768,7 +777,7 @@ class FileDefItem(object):
return output
class FileDef(AbtractAttrDef):
class FileDef(AbstractAttrDef):
"""File definition.
It is possible to define filters of allowed file extensions and if supports
folders.
@ -886,7 +895,7 @@ def serialize_attr_def(attr_def):
"""Serialize attribute definition to data.
Args:
attr_def (AbtractAttrDef): Attribute definition to serialize.
attr_def (AbstractAttrDef): Attribute definition to serialize.
Returns:
Dict[str, Any]: Serialized data.
@ -899,7 +908,7 @@ def serialize_attr_defs(attr_defs):
"""Serialize attribute definitions to data.
Args:
attr_defs (List[AbtractAttrDef]): Attribute definitions to serialize.
attr_defs (List[AbstractAttrDef]): Attribute definitions to serialize.
Returns:
List[Dict[str, Any]]: Serialized data.

View file

@ -1,6 +1,5 @@
"""Should be used only inside of hosts."""
import os
import copy
import platform
import logging
import functools
@ -10,17 +9,12 @@ import six
from openpype.client import (
get_project,
get_assets,
get_asset_by_name,
get_last_version_by_subset_name,
get_workfile_info,
)
from openpype.client.operations import (
CURRENT_ASSET_DOC_SCHEMA,
CURRENT_PROJECT_SCHEMA,
CURRENT_PROJECT_CONFIG_SCHEMA,
PROJECT_NAME_ALLOWED_SYMBOLS,
PROJECT_NAME_REGEX,
)
from .profiles_filtering import filter_profiles
from .path_templates import StringTemplate
@ -128,70 +122,6 @@ def with_pipeline_io(func):
return wrapped
@deprecated("openpype.pipeline.context_tools.is_representation_from_latest")
def is_latest(representation):
"""Return whether the representation is from latest version
Args:
representation (dict): The representation document from the database.
Returns:
bool: Whether the representation is of latest version.
Deprecated:
Function will be removed after release version 3.15.*
"""
from openpype.pipeline.context_tools import is_representation_from_latest
return is_representation_from_latest(representation)
@deprecated("openpype.pipeline.load.any_outdated_containers")
def any_outdated():
"""Return whether the current scene has any outdated content.
Deprecated:
Function will be removed after release version 3.15.*
"""
from openpype.pipeline.load import any_outdated_containers
return any_outdated_containers()
@deprecated("openpype.pipeline.context_tools.get_current_project_asset")
def get_asset(asset_name=None):
""" Returning asset document from database by its name.
Doesn't count with duplicities on asset names!
Args:
asset_name (str)
Returns:
(MongoDB document)
Deprecated:
Function will be removed after release version 3.15.*
"""
from openpype.pipeline.context_tools import get_current_project_asset
return get_current_project_asset(asset_name=asset_name)
@deprecated("openpype.pipeline.template_data.get_general_template_data")
def get_system_general_anatomy_data(system_settings=None):
"""
Deprecated:
Function will be removed after release version 3.15.*
"""
from openpype.pipeline.template_data import get_general_template_data
return get_general_template_data(system_settings)
@deprecated("openpype.client.get_linked_asset_ids")
def get_linked_asset_ids(asset_doc):
"""Return linked asset ids for `asset_doc` from DB
@ -214,66 +144,6 @@ def get_linked_asset_ids(asset_doc):
return get_linked_asset_ids(project_name, asset_doc=asset_doc)
@deprecated("openpype.client.get_linked_assets")
def get_linked_assets(asset_doc):
"""Return linked assets for `asset_doc` from DB
Args:
asset_doc (dict): Asset document from DB
Returns:
(list) Asset documents of input links for passed asset doc.
Deprecated:
Function will be removed after release version 3.15.*
"""
from openpype.pipeline import legacy_io
from openpype.client import get_linked_assets
project_name = legacy_io.active_project()
return get_linked_assets(project_name, asset_doc=asset_doc)
@deprecated("openpype.client.get_last_version_by_subset_name")
def get_latest_version(asset_name, subset_name, dbcon=None, project_name=None):
"""Retrieve latest version from `asset_name`, and `subset_name`.
Do not use if you want to query more than 5 latest versions as this method
query 3 times to mongo for each call. For those cases is better to use
more efficient way, e.g. with help of aggregations.
Args:
asset_name (str): Name of asset.
subset_name (str): Name of subset.
dbcon (AvalonMongoDB, optional): Avalon Mongo connection with Session.
project_name (str, optional): Find latest version in specific project.
Returns:
None: If asset, subset or version were not found.
dict: Last version document for entered.
Deprecated:
Function will be removed after release version 3.15.*
"""
if not project_name:
if not dbcon:
from openpype.pipeline import legacy_io
log.debug("Using `legacy_io` for query.")
dbcon = legacy_io
# Make sure is installed
dbcon.install()
project_name = dbcon.active_project()
return get_last_version_by_subset_name(
project_name, subset_name, asset_name=asset_name
)
@deprecated(
"openpype.pipeline.workfile.get_workfile_template_key_from_context")
def get_workfile_template_key_from_context(
@ -361,142 +231,6 @@ def get_workfile_template_key(
)
@deprecated("openpype.pipeline.template_data.get_template_data")
def get_workdir_data(project_doc, asset_doc, task_name, host_name):
"""Prepare data for workdir template filling from entered information.
Args:
project_doc (dict): Mongo document of project from MongoDB.
asset_doc (dict): Mongo document of asset from MongoDB.
task_name (str): Task name for which are workdir data preapred.
host_name (str): Host which is used to workdir. This is required
because workdir template may contain `{app}` key.
Returns:
dict: Data prepared for filling workdir template.
Deprecated:
Function will be removed after release version 3.15.*
"""
from openpype.pipeline.template_data import get_template_data
return get_template_data(
project_doc, asset_doc, task_name, host_name
)
@deprecated("openpype.pipeline.workfile.get_workdir_with_workdir_data")
def get_workdir_with_workdir_data(
workdir_data, anatomy=None, project_name=None, template_key=None
):
"""Fill workdir path from entered data and project's anatomy.
It is possible to pass only project's name instead of project's anatomy but
one of them **must** be entered. It is preferred to enter anatomy if is
available as initialization of a new Anatomy object may be time consuming.
Args:
workdir_data (dict): Data to fill workdir template.
anatomy (Anatomy): Anatomy object for specific project. Optional if
`project_name` is entered.
project_name (str): Project's name. Optional if `anatomy` is entered
otherwise Anatomy object is created with using the project name.
template_key (str): Key of work templates in anatomy templates. If not
passed `get_workfile_template_key_from_context` is used to get it.
dbcon(AvalonMongoDB): Mongo connection. Required only if 'template_key'
and 'project_name' are not passed.
Returns:
TemplateResult: Workdir path.
Raises:
ValueError: When both `anatomy` and `project_name` are set to None.
Deprecated:
Function will be removed after release version 3.15.*
"""
if not anatomy and not project_name:
raise ValueError((
"Missing required arguments one of `project_name` or `anatomy`"
" must be entered."
))
if not project_name:
project_name = anatomy.project_name
from openpype.pipeline.workfile import get_workdir_with_workdir_data
return get_workdir_with_workdir_data(
workdir_data, project_name, anatomy, template_key
)
@deprecated("openpype.pipeline.workfile.get_workdir_with_workdir_data")
def get_workdir(
project_doc,
asset_doc,
task_name,
host_name,
anatomy=None,
template_key=None
):
"""Fill workdir path from entered data and project's anatomy.
Args:
project_doc (dict): Mongo document of project from MongoDB.
asset_doc (dict): Mongo document of asset from MongoDB.
task_name (str): Task name for which are workdir data preapred.
host_name (str): Host which is used to workdir. This is required
because workdir template may contain `{app}` key. In `Session`
is stored under `AVALON_APP` key.
anatomy (Anatomy): Optional argument. Anatomy object is created using
project name from `project_doc`. It is preferred to pass this
argument as initialization of a new Anatomy object may be time
consuming.
template_key (str): Key of work templates in anatomy templates. Default
value is defined in `get_workdir_with_workdir_data`.
Returns:
TemplateResult: Workdir path.
Deprecated:
Function will be removed after release version 3.15.*
"""
from openpype.pipeline.workfile import get_workdir
# Output is TemplateResult object which contain useful data
return get_workdir(
project_doc,
asset_doc,
task_name,
host_name,
anatomy,
template_key
)
@deprecated("openpype.pipeline.context_tools.get_template_data_from_session")
def template_data_from_session(session=None):
""" Return dictionary with template from session keys.
Args:
session (dict, Optional): The Session to use. If not provided use the
currently active global Session.
Returns:
dict: All available data from session.
Deprecated:
Function will be removed after release version 3.15.*
"""
from openpype.pipeline.context_tools import get_template_data_from_session
return get_template_data_from_session(session)
@deprecated("openpype.pipeline.context_tools.compute_session_changes")
def compute_session_changes(
session, task=None, asset=None, app=None, template_key=None
@ -588,133 +322,6 @@ def update_current_task(task=None, asset=None, app=None, template_key=None):
return change_current_context(asset, task, template_key)
@deprecated("openpype.client.get_workfile_info")
def get_workfile_doc(asset_id, task_name, filename, dbcon=None):
"""Return workfile document for entered context.
Do not use this method to get more than one document. In that cases use
custom query as this will return documents from database one by one.
Args:
asset_id (ObjectId): Mongo ID of an asset under which workfile belongs.
task_name (str): Name of task under which the workfile belongs.
filename (str): Name of a workfile.
dbcon (AvalonMongoDB): Optionally enter avalon AvalonMongoDB object and
`legacy_io` is used if not entered.
Returns:
dict: Workfile document or None.
Deprecated:
Function will be removed after release version 3.15.*
"""
# Use legacy_io if dbcon is not entered
if not dbcon:
from openpype.pipeline import legacy_io
dbcon = legacy_io
project_name = dbcon.active_project()
return get_workfile_info(project_name, asset_id, task_name, filename)
@deprecated
def create_workfile_doc(asset_doc, task_name, filename, workdir, dbcon=None):
"""Creates or replace workfile document in mongo.
Do not use this method to update data. This method will remove all
additional data from existing document.
Args:
asset_doc (dict): Document of asset under which workfile belongs.
task_name (str): Name of task for which is workfile related to.
filename (str): Filename of workfile.
workdir (str): Path to directory where `filename` is located.
dbcon (AvalonMongoDB): Optionally enter avalon AvalonMongoDB object and
`legacy_io` is used if not entered.
"""
from openpype.pipeline import Anatomy
from openpype.pipeline.template_data import get_template_data
# Use legacy_io if dbcon is not entered
if not dbcon:
from openpype.pipeline import legacy_io
dbcon = legacy_io
# Filter of workfile document
doc_filter = {
"type": "workfile",
"parent": asset_doc["_id"],
"task_name": task_name,
"filename": filename
}
# Document data are copy of filter
doc_data = copy.deepcopy(doc_filter)
# Prepare project for workdir data
project_name = dbcon.active_project()
project_doc = get_project(project_name)
workdir_data = get_template_data(
project_doc, asset_doc, task_name, dbcon.Session["AVALON_APP"]
)
# Prepare anatomy
anatomy = Anatomy(project_name)
# Get workdir path (result is anatomy.TemplateResult)
template_workdir = get_workdir_with_workdir_data(
workdir_data, anatomy
)
template_workdir_path = str(template_workdir).replace("\\", "/")
# Replace slashses in workdir path where workfile is located
mod_workdir = workdir.replace("\\", "/")
# Replace workdir from templates with rootless workdir
rootles_workdir = mod_workdir.replace(
template_workdir_path,
template_workdir.rootless.replace("\\", "/")
)
doc_data["schema"] = "pype:workfile-1.0"
doc_data["files"] = ["/".join([rootles_workdir, filename])]
doc_data["data"] = {}
dbcon.replace_one(
doc_filter,
doc_data,
upsert=True
)
@deprecated
def save_workfile_data_to_doc(workfile_doc, data, dbcon=None):
if not workfile_doc:
# TODO add log message
return
if not data:
return
# Use legacy_io if dbcon is not entered
if not dbcon:
from openpype.pipeline import legacy_io
dbcon = legacy_io
# Convert data to mongo modification keys/values
# - this is naive implementation which does not expect nested
# dictionaries
set_data = {}
for key, value in data.items():
new_key = "data.{}".format(key)
set_data[new_key] = value
# Update workfile document with data
dbcon.update_one(
{"_id": workfile_doc["_id"]},
{"$set": set_data}
)
@deprecated("openpype.pipeline.workfile.BuildWorkfile")
def BuildWorkfile():
"""Build workfile class was moved to workfile pipeline.
@ -747,38 +354,6 @@ def get_creator_by_name(creator_name, case_sensitive=False):
return get_legacy_creator_by_name(creator_name, case_sensitive)
@deprecated
def change_timer_to_current_context():
"""Called after context change to change timers.
Deprecated:
This method is specific for TimersManager module so please use the
functionality from there. Function will be removed after release
version 3.15.*
"""
from openpype.pipeline import legacy_io
webserver_url = os.environ.get("OPENPYPE_WEBSERVER_URL")
if not webserver_url:
log.warning("Couldn't find webserver url")
return
rest_api_url = "{}/timers_manager/start_timer".format(webserver_url)
try:
import requests
except Exception:
log.warning("Couldn't start timer")
return
data = {
"project_name": legacy_io.Session["AVALON_PROJECT"],
"asset_name": legacy_io.Session["AVALON_ASSET"],
"task_name": legacy_io.Session["AVALON_TASK"]
}
requests.post(rest_api_url, json=data)
def _get_task_context_data_for_anatomy(
project_doc, asset_doc, task_name, anatomy=None
):
@ -800,6 +375,8 @@ def _get_task_context_data_for_anatomy(
dict: With Anatomy context data.
"""
from openpype.pipeline.template_data import get_general_template_data
if anatomy is None:
from openpype.pipeline import Anatomy
anatomy = Anatomy(project_doc["name"])
@ -840,7 +417,7 @@ def _get_task_context_data_for_anatomy(
}
}
system_general_data = get_system_general_anatomy_data()
system_general_data = get_general_template_data()
data.update(system_general_data)
return data

View file

@ -117,12 +117,12 @@ def run_subprocess(*args, **kwargs):
full_output = ""
_stdout, _stderr = proc.communicate()
if _stdout:
_stdout = _stdout.decode("utf-8")
_stdout = _stdout.decode("utf-8", errors="backslashreplace")
full_output += _stdout
logger.debug(_stdout)
if _stderr:
_stderr = _stderr.decode("utf-8")
_stderr = _stderr.decode("utf-8", errors="backslashreplace")
# Add additional line break if output already contains stdout
if full_output:
full_output += "\n"

View file

@ -8,7 +8,6 @@ import warnings
import functools
from openpype.client import get_asset_by_id
from openpype.settings import get_project_settings
log = logging.getLogger(__name__)
@ -101,8 +100,6 @@ def get_subset_name_with_asset_doc(
is not passed.
dynamic_data (dict): Dynamic data specific for a creator which creates
instance.
dbcon (AvalonMongoDB): Mongo connection to be able query asset document
if 'asset_doc' is not passed.
"""
from openpype.pipeline.create import get_subset_name
@ -202,122 +199,6 @@ def prepare_template_data(fill_pairs):
return fill_data
@deprecated("openpype.pipeline.publish.lib.filter_pyblish_plugins")
def filter_pyblish_plugins(plugins):
"""Filter pyblish plugins by presets.
This servers as plugin filter / modifier for pyblish. It will load plugin
definitions from presets and filter those needed to be excluded.
Args:
plugins (dict): Dictionary of plugins produced by :mod:`pyblish-base`
`discover()` method.
Deprecated:
Function will be removed after release version 3.15.*
"""
from openpype.pipeline.publish.lib import filter_pyblish_plugins
filter_pyblish_plugins(plugins)
@deprecated
def set_plugin_attributes_from_settings(
plugins, superclass, host_name=None, project_name=None
):
"""Change attribute values on Avalon plugins by project settings.
This function should be used only in host context. Modify
behavior of plugins.
Args:
plugins (list): Plugins discovered by origin avalon discover method.
superclass (object): Superclass of plugin type (e.g. Cretor, Loader).
host_name (str): Name of host for which plugins are loaded and from.
Value from environment `AVALON_APP` is used if not entered.
project_name (str): Name of project for which settings will be loaded.
Value from environment `AVALON_PROJECT` is used if not entered.
Deprecated:
Function will be removed after release version 3.15.*
"""
# Function is not used anymore
from openpype.pipeline import LegacyCreator, LoaderPlugin
# determine host application to use for finding presets
if host_name is None:
host_name = os.environ.get("AVALON_APP")
if project_name is None:
project_name = os.environ.get("AVALON_PROJECT")
# map plugin superclass to preset json. Currently supported is load and
# create (LoaderPlugin and LegacyCreator)
plugin_type = None
if superclass is LoaderPlugin or issubclass(superclass, LoaderPlugin):
plugin_type = "load"
elif superclass is LegacyCreator or issubclass(superclass, LegacyCreator):
plugin_type = "create"
if not host_name or not project_name or plugin_type is None:
msg = "Skipped attributes override from settings."
if not host_name:
msg += " Host name is not defined."
if not project_name:
msg += " Project name is not defined."
if plugin_type is None:
msg += " Plugin type is unsupported for class {}.".format(
superclass.__name__
)
print(msg)
return
print(">>> Finding presets for {}:{} ...".format(host_name, plugin_type))
project_settings = get_project_settings(project_name)
plugin_type_settings = (
project_settings
.get(host_name, {})
.get(plugin_type, {})
)
global_type_settings = (
project_settings
.get("global", {})
.get(plugin_type, {})
)
if not global_type_settings and not plugin_type_settings:
return
for plugin in plugins:
plugin_name = plugin.__name__
plugin_settings = None
# Look for plugin settings in host specific settings
if plugin_name in plugin_type_settings:
plugin_settings = plugin_type_settings[plugin_name]
# Look for plugin settings in global settings
elif plugin_name in global_type_settings:
plugin_settings = global_type_settings[plugin_name]
if not plugin_settings:
continue
print(">>> We have preset for {}".format(plugin_name))
for option, value in plugin_settings.items():
if option == "enabled" and value is False:
setattr(plugin, "active", False)
print(" - is disabled by preset")
else:
setattr(plugin, option, value)
print(" - setting `{}`: `{}`".format(option, value))
def source_hash(filepath, *args):
"""Generate simple identifier for a source file.
This is used to identify whether a source file has previously been

View file

@ -64,6 +64,7 @@ class MayaPluginInfo(object):
# Include all lights flag
RenderSetupIncludeLights = attr.ib(
default="1", validator=_validate_deadline_bool_value)
StrictErrorChecking = attr.ib(default=True)
@attr.s
@ -219,6 +220,8 @@ class MayaSubmitDeadline(abstract_submit_deadline.AbstractSubmitDeadline):
"renderSetupIncludeLights", default_rs_include_lights)
if rs_include_lights not in {"1", "0", True, False}:
rs_include_lights = default_rs_include_lights
strict_error_checking = instance.data.get("strict_error_checking",
True)
plugin_info = MayaPluginInfo(
SceneFile=self.scene_path,
Version=cmds.about(version=True),
@ -227,6 +230,7 @@ class MayaSubmitDeadline(abstract_submit_deadline.AbstractSubmitDeadline):
RenderSetupIncludeLights=rs_include_lights, # noqa
ProjectPath=context.data["workspaceDir"],
UsingRenderLayers=True,
StrictErrorChecking=strict_error_checking
)
plugin_payload = attr.asdict(plugin_info)

View file

@ -35,7 +35,7 @@ class OpenPypeVersion:
self.prerelease = prerelease
is_valid = True
if not major or not minor or not patch:
if major is None or minor is None or patch is None:
is_valid = False
self.is_valid = is_valid
@ -157,7 +157,7 @@ def get_openpype_version_from_path(path, build=True):
# fix path for application bundle on macos
if platform.system().lower() == "darwin":
path = os.path.join(path, "Contents", "MacOS", "lib", "Python")
path = os.path.join(path, "MacOS")
version_file = os.path.join(path, "openpype", "version.py")
if not os.path.isfile(version_file):
@ -189,6 +189,11 @@ def get_openpype_executable():
exe_list = config.GetConfigEntryWithDefault("OpenPypeExecutable", "")
dir_list = config.GetConfigEntryWithDefault(
"OpenPypeInstallationDirs", "")
# clean '\ ' for MacOS pasting
if platform.system().lower() == "darwin":
exe_list = exe_list.replace("\\ ", " ")
dir_list = dir_list.replace("\\ ", " ")
return exe_list, dir_list
@ -218,8 +223,8 @@ def get_requested_openpype_executable(
requested_version_obj = OpenPypeVersion.from_string(requested_version)
if not requested_version_obj:
print((
">>> Requested version does not match version regex \"{}\""
).format(VERSION_REGEX))
">>> Requested version '{}' does not match version regex '{}'"
).format(requested_version, VERSION_REGEX))
return None
print((
@ -272,7 +277,8 @@ def get_requested_openpype_executable(
# Deadline decide.
exe_list = [
os.path.join(version_dir, "openpype_console.exe"),
os.path.join(version_dir, "openpype_console")
os.path.join(version_dir, "openpype_console"),
os.path.join(version_dir, "MacOS", "openpype_console")
]
return FileUtils.SearchFileList(";".join(exe_list))

Some files were not shown because too many files have changed in this diff Show more