Merge branch 'develop' into bugfix/OP-2549_fps-rounding-in-maya

# Conflicts:
#	openpype/hosts/maya/api/lib.py
This commit is contained in:
Toke Stuart Jepsen 2023-02-10 07:27:37 +00:00
commit 11552ef2c9
63 changed files with 2126 additions and 694 deletions

View file

@ -1,19 +0,0 @@
name: Automate Projects
on:
issues:
types: [opened, labeled]
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
jobs:
assign_one_project:
runs-on: ubuntu-latest
name: Assign to One Project
steps:
- name: Assign NEW bugs to triage
uses: srggrs/assign-one-project-github-action@1.2.0
if: contains(github.event.issue.labels.*.name, 'bug')
with:
project: 'https://github.com/pypeclub/pype/projects/2'
column_name: 'Needs triage'

View file

@ -13,7 +13,7 @@ jobs:
if: github.event.pull_request.milestone == null
uses: zoispag/action-assign-milestone@v1
with:
repo-token: "${{ secrets.GITHUB_TOKEN }}"
repo-token: "${{ secrets.YNPUT_BOT_TOKEN }}"
milestone: 'next-minor'
run_if_develop:
@ -24,5 +24,5 @@ jobs:
if: github.event.pull_request.milestone == null
uses: zoispag/action-assign-milestone@v1
with:
repo-token: "${{ secrets.GITHUB_TOKEN }}"
milestone: 'next-patch'
repo-token: "${{ secrets.YNPUT_BOT_TOKEN }}"
milestone: 'next-patch'

View file

@ -12,7 +12,7 @@ jobs:
uses: "WyriHaximus/github-action-get-milestones@master"
id: milestones
env:
GITHUB_TOKEN: "${{ secrets.GITHUB_TOKEN }}"
GITHUB_TOKEN: "${{ secrets.YNPUT_BOT_TOKEN }}"
- run: printf "name=number::%s" $(printenv MILESTONES | jq --arg MILESTONE $(printenv MILESTONE) '.[] | select(.title == $MILESTONE) | .number')
id: querymilestone
@ -31,7 +31,7 @@ jobs:
with:
title: 'next-patch'
env:
GITHUB_TOKEN: "${{ secrets.GITHUB_TOKEN }}"
GITHUB_TOKEN: "${{ secrets.YNPUT_BOT_TOKEN }}"
generate-next-minor:
runs-on: ubuntu-latest
@ -40,7 +40,7 @@ jobs:
uses: "WyriHaximus/github-action-get-milestones@master"
id: milestones
env:
GITHUB_TOKEN: "${{ secrets.GITHUB_TOKEN }}"
GITHUB_TOKEN: "${{ secrets.YNPUT_BOT_TOKEN }}"
- run: printf "name=number::%s" $(printenv MILESTONES | jq --arg MILESTONE $(printenv MILESTONE) '.[] | select(.title == $MILESTONE) | .number')
id: querymilestone
@ -59,4 +59,4 @@ jobs:
with:
title: 'next-minor'
env:
GITHUB_TOKEN: "${{ secrets.GITHUB_TOKEN }}"
GITHUB_TOKEN: "${{ secrets.YNPUT_BOT_TOKEN }}"

View file

@ -14,10 +14,10 @@ jobs:
- name: 🚛 Checkout Code
uses: actions/checkout@v2
- name: 🔨 Merge develop to main
- name: 🔨 Merge develop to main
uses: everlytic/branch-merge@1.1.0
with:
github_token: ${{ secrets.ADMIN_TOKEN }}
github_token: ${{ secrets.YNPUT_BOT_TOKEN }}
source_ref: 'develop'
target_branch: 'main'
commit_message_template: '[Automated] Merged {source_ref} into {target_branch}'
@ -26,4 +26,4 @@ jobs:
uses: benc-uk/workflow-dispatch@v1
with:
workflow: Nightly Prerelease
token: ${{ secrets.ADMIN_TOKEN }}
token: ${{ secrets.YNPUT_BOT_TOKEN }}

View file

@ -25,43 +25,15 @@ jobs:
- name: 🔎 Determine next version type
id: version_type
run: |
TYPE=$(python ./tools/ci_tools.py --bump --github_token ${{ secrets.GITHUB_TOKEN }})
echo ::set-output name=type::$TYPE
TYPE=$(python ./tools/ci_tools.py --bump --github_token ${{ secrets.YNPUT_BOT_TOKEN }})
echo "type=${TYPE}" >> $GITHUB_OUTPUT
- name: 💉 Inject new version into files
id: version
if: steps.version_type.outputs.type != 'skip'
run: |
RESULT=$(python ./tools/ci_tools.py --nightly --github_token ${{ secrets.GITHUB_TOKEN }})
echo ::set-output name=next_tag::$RESULT
# - name: "✏️ Generate full changelog"
# if: steps.version_type.outputs.type != 'skip'
# id: generate-full-changelog
# uses: heinrichreimer/github-changelog-generator-action@v2.3
# with:
# token: ${{ secrets.ADMIN_TOKEN }}
# addSections: '{"documentation":{"prefix":"### 📖 Documentation","labels":["type: documentation"]},"tests":{"prefix":"### ✅ Testing","labels":["tests"]},"feature":{"prefix":"**🆕 New features**", "labels":["type: feature"]},"breaking":{"prefix":"**💥 Breaking**", "labels":["breaking"]},"enhancements":{"prefix":"**🚀 Enhancements**", "labels":["type: enhancement"]},"bugs":{"prefix":"**🐛 Bug fixes**", "labels":["type: bug"]},"deprecated":{"prefix":"**⚠️ Deprecations**", "labels":["depreciated"]}, "refactor":{"prefix":"**🔀 Refactored code**", "labels":["refactor"]}}'
# issues: false
# issuesWoLabels: false
# sinceTag: "3.12.0"
# maxIssues: 100
# pullRequests: true
# prWoLabels: false
# author: false
# unreleased: true
# compareLink: true
# stripGeneratorNotice: true
# verbose: true
# unreleasedLabel: ${{ steps.version.outputs.next_tag }}
# excludeTagsRegex: "CI/.+"
# releaseBranch: "main"
- name: "🖨️ Print changelog to console"
if: steps.version_type.outputs.type != 'skip'
run: cat CHANGELOG.md
NEW_VERSION_TAG=$(python ./tools/ci_tools.py --nightly --github_token ${{ secrets.YNPUT_BOT_TOKEN }})
echo "next_tag=${NEW_VERSION_TAG}" >> $GITHUB_OUTPUT
- name: 💾 Commit and Tag
id: git_commit
@ -80,7 +52,7 @@ jobs:
- name: Push to protected main branch
uses: CasperWA/push-protected@v2.10.0
with:
token: ${{ secrets.ADMIN_TOKEN }}
token: ${{ secrets.YNPUT_BOT_TOKEN }}
branch: main
tags: true
unprotect_reviews: true
@ -89,7 +61,7 @@ jobs:
uses: everlytic/branch-merge@1.1.0
if: steps.version_type.outputs.type != 'skip'
with:
github_token: ${{ secrets.ADMIN_TOKEN }}
github_token: ${{ secrets.YNPUT_BOT_TOKEN }}
source_ref: 'main'
target_branch: 'develop'
commit_message_template: '[Automated] Merged {source_ref} into {target_branch}'

View file

@ -26,34 +26,12 @@ jobs:
- name: 💉 Inject new version into files
id: version
run: |
echo ::set-output name=current_version::${GITHUB_REF#refs/*/}
RESULT=$(python ./tools/ci_tools.py --finalize ${GITHUB_REF#refs/*/})
LASTRELEASE=$(python ./tools/ci_tools.py --lastversion release)
NEW_VERSION=$(python ./tools/ci_tools.py --finalize ${GITHUB_REF#refs/*/})
LAST_VERSION=$(python ./tools/ci_tools.py --lastversion release)
echo ::set-output name=last_release::$LASTRELEASE
echo ::set-output name=release_tag::$RESULT
# - name: "✏️ Generate full changelog"
# if: steps.version.outputs.release_tag != 'skip'
# id: generate-full-changelog
# uses: heinrichreimer/github-changelog-generator-action@v2.3
# with:
# token: ${{ secrets.ADMIN_TOKEN }}
# addSections: '{"documentation":{"prefix":"### 📖 Documentation","labels":["type: documentation"]},"tests":{"prefix":"### ✅ Testing","labels":["tests"]},"feature":{"prefix":"**🆕 New features**", "labels":["type: feature"]},"breaking":{"prefix":"**💥 Breaking**", "labels":["breaking"]},"enhancements":{"prefix":"**🚀 Enhancements**", "labels":["type: enhancement"]},"bugs":{"prefix":"**🐛 Bug fixes**", "labels":["type: bug"]},"deprecated":{"prefix":"**⚠️ Deprecations**", "labels":["depreciated"]}, "refactor":{"prefix":"**🔀 Refactored code**", "labels":["refactor"]}}'
# issues: false
# issuesWoLabels: false
# sinceTag: "3.12.0"
# maxIssues: 100
# pullRequests: true
# prWoLabels: false
# author: false
# unreleased: true
# compareLink: true
# stripGeneratorNotice: true
# verbose: true
# futureRelease: ${{ steps.version.outputs.release_tag }}
# excludeTagsRegex: "CI/.+"
# releaseBranch: "main"
echo "current_version=${GITHUB_REF#refs/*/}" >> $GITHUB_OUTPUT
echo "last_release=${LAST_VERSION}" >> $GITHUB_OUTPUT
echo "release_tag=${NEW_VERSION}" >> $GITHUB_OUTPUT
- name: 💾 Commit and Tag
id: git_commit
@ -70,43 +48,17 @@ jobs:
if: steps.version.outputs.release_tag != 'skip'
uses: CasperWA/push-protected@v2.10.0
with:
token: ${{ secrets.ADMIN_TOKEN }}
token: ${{ secrets.YNPUT_BOT_TOKEN }}
branch: main
tags: true
unprotect_reviews: true
- name: "✏️ Generate last changelog"
if: steps.version.outputs.release_tag != 'skip'
id: generate-last-changelog
uses: heinrichreimer/github-changelog-generator-action@v2.2
with:
token: ${{ secrets.ADMIN_TOKEN }}
addSections: '{"documentation":{"prefix":"### 📖 Documentation","labels":["type: documentation"]},"tests":{"prefix":"### ✅ Testing","labels":["tests"]},"feature":{"prefix":"**🆕 New features**", "labels":["type: feature"]},"breaking":{"prefix":"**💥 Breaking**", "labels":["breaking"]},"enhancements":{"prefix":"**🚀 Enhancements**", "labels":["type: enhancement"]},"bugs":{"prefix":"**🐛 Bug fixes**", "labels":["type: bug"]},"deprecated":{"prefix":"**⚠️ Deprecations**", "labels":["depreciated"]}, "refactor":{"prefix":"**🔀 Refactored code**", "labels":["refactor"]}}'
issues: false
issuesWoLabels: false
sinceTag: ${{ steps.version.outputs.last_release }}
maxIssues: 100
pullRequests: true
prWoLabels: false
author: false
unreleased: true
compareLink: true
stripGeneratorNotice: true
verbose: true
futureRelease: ${{ steps.version.outputs.release_tag }}
excludeTagsRegex: "CI/.+"
releaseBranch: "main"
stripHeaders: true
base: 'none'
- name: 🚀 Github Release
if: steps.version.outputs.release_tag != 'skip'
uses: ncipollo/release-action@v1
with:
body: ${{ steps.generate-last-changelog.outputs.changelog }}
tag: ${{ steps.version.outputs.release_tag }}
token: ${{ secrets.ADMIN_TOKEN }}
token: ${{ secrets.YNPUT_BOT_TOKEN }}
- name: ☠ Delete Pre-release
if: steps.version.outputs.release_tag != 'skip'
@ -118,7 +70,7 @@ jobs:
if: steps.version.outputs.release_tag != 'skip'
uses: everlytic/branch-merge@1.1.0
with:
github_token: ${{ secrets.ADMIN_TOKEN }}
github_token: ${{ secrets.YNPUT_BOT_TOKEN }}
source_ref: 'main'
target_branch: 'develop'
commit_message_template: '[Automated] Merged release {source_ref} into {target_branch}'

View file

@ -28,7 +28,7 @@ jobs:
uses: actions/setup-python@v2
with:
python-version: ${{ matrix.python-version }}
- name: 🧵 Install Requirements
shell: pwsh
run: |
@ -64,27 +64,3 @@ jobs:
run: |
export SKIP_THIRD_PARTY_VALIDATION="1"
./tools/build.sh
# MacOS-latest:
# runs-on: macos-latest
# strategy:
# matrix:
# python-version: [3.9]
# steps:
# - name: 🚛 Checkout Code
# uses: actions/checkout@v2
# - name: Set up Python
# uses: actions/setup-python@v2
# with:
# python-version: ${{ matrix.python-version }}
# - name: 🧵 Install Requirements
# run: |
# ./tools/create_env.sh
# - name: 🔨 Build
# run: |
# ./tools/build.sh

View file

@ -9,4 +9,4 @@ repos:
- id: check-yaml
- id: check-added-large-files
- id: no-commit-to-branch
args: [ '--pattern', '^(?!((release|enhancement|feature|bugfix|documentation|tests|local|chore)\/[a-zA-Z0-9\-]+)$).*' ]
args: [ '--pattern', '^(?!((release|enhancement|feature|bugfix|documentation|tests|local|chore)\/[a-zA-Z0-9\-_]+)$).*' ]

View file

@ -1,112 +0,0 @@
from .settings import (
get_system_settings,
get_project_settings,
get_current_project_settings,
get_anatomy_settings,
SystemSettings,
ProjectSettings
)
from .lib import (
PypeLogger,
Logger,
Anatomy,
execute,
run_subprocess,
version_up,
get_asset,
get_workdir_data,
get_version_from_path,
get_last_version_from_path,
get_app_environments_for_context,
source_hash,
get_latest_version,
get_local_site_id,
change_openpype_mongo_url,
create_project_folders,
get_project_basic_paths
)
from .lib.mongo import (
get_default_components
)
from .lib.applications import (
ApplicationManager
)
from .lib.avalon_context import (
BuildWorkfile
)
from . import resources
from .plugin import (
Extractor,
ValidatePipelineOrder,
ValidateContentsOrder,
ValidateSceneOrder,
ValidateMeshOrder,
)
# temporary fix, might
from .action import (
get_errored_instances_from_context,
RepairAction,
RepairContextAction
)
__all__ = [
"get_system_settings",
"get_project_settings",
"get_current_project_settings",
"get_anatomy_settings",
"get_project_basic_paths",
"SystemSettings",
"ProjectSettings",
"PypeLogger",
"Logger",
"Anatomy",
"execute",
"get_default_components",
"ApplicationManager",
"BuildWorkfile",
# Resources
"resources",
# plugin classes
"Extractor",
# ordering
"ValidatePipelineOrder",
"ValidateContentsOrder",
"ValidateSceneOrder",
"ValidateMeshOrder",
# action
"get_errored_instances_from_context",
"RepairAction",
"RepairContextAction",
# get contextual data
"version_up",
"get_asset",
"get_workdir_data",
"get_version_from_path",
"get_last_version_from_path",
"get_app_environments_for_context",
"source_hash",
"run_subprocess",
"get_latest_version",
"get_local_site_id",
"change_openpype_mongo_url",
"get_project_basic_paths",
"create_project_folders"
]

View file

@ -1,4 +1,5 @@
import os
from openpype.lib import PreLaunchHook
@ -40,5 +41,13 @@ class AddLastWorkfileToLaunchArgs(PreLaunchHook):
self.log.info("Current context does not have any workfile yet.")
return
# Determine whether to open workfile post initialization.
if self.host_name == "maya":
key = "open_workfile_post_initialization"
if self.data["project_settings"]["maya"][key]:
self.log.debug("Opening workfile post initialization.")
self.data["env"]["OPENPYPE_" + key.upper()] = "1"
return
# Add path to workfile to arguments
self.launch_context.launch_args.append(last_workfile)

View file

@ -8,6 +8,7 @@ exists is used.
import os
from abc import ABCMeta, abstractmethod
import platform
import six
@ -187,11 +188,19 @@ class HostDirmap(object):
self.log.debug("local overrides {}".format(active_overrides))
self.log.debug("remote overrides {}".format(remote_overrides))
current_platform = platform.system().lower()
for root_name, active_site_dir in active_overrides.items():
remote_site_dir = (
remote_overrides.get(root_name)
or sync_settings["sites"][remote_site]["root"][root_name]
)
if isinstance(remote_site_dir, dict):
remote_site_dir = remote_site_dir.get(current_platform)
if not remote_site_dir:
continue
if os.path.isdir(active_site_dir):
if "destination-path" not in mapping:
mapping["destination-path"] = []

View file

@ -11,9 +11,15 @@ from openpype.pipeline import (
)
from openpype.hosts.aftereffects.api.pipeline import cache_and_get_instances
from openpype.lib import prepare_template_data
from openpype.pipeline.create import SUBSET_NAME_ALLOWED_SYMBOLS
class RenderCreator(Creator):
"""Creates 'render' instance for publishing.
Result of 'render' instance is video or sequence of images for particular
composition based of configuration in its RenderQueue.
"""
identifier = "render"
label = "Render"
family = "render"
@ -28,45 +34,6 @@ class RenderCreator(Creator):
["RenderCreator"]
["defaults"])
def get_icon(self):
return resources.get_openpype_splash_filepath()
def collect_instances(self):
for instance_data in cache_and_get_instances(self):
# legacy instances have family=='render' or 'renderLocal', use them
creator_id = (instance_data.get("creator_identifier") or
instance_data.get("family", '').replace("Local", ''))
if creator_id == self.identifier:
instance_data = self._handle_legacy(instance_data)
instance = CreatedInstance.from_existing(
instance_data, self
)
self._add_instance_to_context(instance)
def update_instances(self, update_list):
for created_inst, _changes in update_list:
api.get_stub().imprint(created_inst.get("instance_id"),
created_inst.data_to_store())
subset_change = _changes.get("subset")
if subset_change:
api.get_stub().rename_item(created_inst.data["members"][0],
subset_change[1])
def remove_instances(self, instances):
for instance in instances:
self._remove_instance_from_context(instance)
self.host.remove_instance(instance)
subset = instance.data["subset"]
comp_id = instance.data["members"][0]
comp = api.get_stub().get_item(comp_id)
if comp:
new_comp_name = comp.name.replace(subset, '')
if not new_comp_name:
new_comp_name = "dummyCompName"
api.get_stub().rename_item(comp_id,
new_comp_name)
def create(self, subset_name_from_ui, data, pre_create_data):
stub = api.get_stub() # only after After Effects is up
if pre_create_data.get("use_selection"):
@ -82,10 +49,19 @@ class RenderCreator(Creator):
"if 'useSelection' or create at least "
"one composition."
)
use_composition_name = (pre_create_data.get("use_composition_name") or
len(comps) > 1)
for comp in comps:
if pre_create_data.get("use_composition_name"):
composition_name = comp.name
if use_composition_name:
if "{composition}" not in subset_name_from_ui.lower():
subset_name_from_ui += "{Composition}"
composition_name = re.sub(
"[^{}]+".format(SUBSET_NAME_ALLOWED_SYMBOLS),
"",
comp.name
)
dynamic_fill = prepare_template_data({"composition":
composition_name})
subset_name = subset_name_from_ui.format(**dynamic_fill)
@ -129,8 +105,72 @@ class RenderCreator(Creator):
]
return output
def get_icon(self):
return resources.get_openpype_splash_filepath()
def collect_instances(self):
for instance_data in cache_and_get_instances(self):
# legacy instances have family=='render' or 'renderLocal', use them
creator_id = (instance_data.get("creator_identifier") or
instance_data.get("family", '').replace("Local", ''))
if creator_id == self.identifier:
instance_data = self._handle_legacy(instance_data)
instance = CreatedInstance.from_existing(
instance_data, self
)
self._add_instance_to_context(instance)
def update_instances(self, update_list):
for created_inst, _changes in update_list:
api.get_stub().imprint(created_inst.get("instance_id"),
created_inst.data_to_store())
subset_change = _changes.get("subset")
if subset_change:
api.get_stub().rename_item(created_inst.data["members"][0],
subset_change[1])
def remove_instances(self, instances):
for instance in instances:
self._remove_instance_from_context(instance)
self.host.remove_instance(instance)
subset = instance.data["subset"]
comp_id = instance.data["members"][0]
comp = api.get_stub().get_item(comp_id)
if comp:
new_comp_name = comp.name.replace(subset, '')
if not new_comp_name:
new_comp_name = "dummyCompName"
api.get_stub().rename_item(comp_id,
new_comp_name)
def get_detail_description(self):
return """Creator for Render instances"""
return """Creator for Render instances
Main publishable item in AfterEffects will be of `render` family.
Result of this item (instance) is picture sequence or video that could
be a final delivery product or loaded and used in another DCCs.
Select single composition and create instance of 'render' family or
turn off 'Use selection' to create instance for all compositions.
'Use composition name in subset' allows to explicitly add composition
name into created subset name.
Position of composition name could be set in
`project_settings/global/tools/creator/subset_name_profiles` with some
form of '{composition}' placeholder.
Composition name will be used implicitly if multiple composition should
be handled at same time.
If {composition} placeholder is not us 'subset_name_profiles'
composition name will be capitalized and set at the end of subset name
if necessary.
If composition name should be used, it will be cleaned up of characters
that would cause an issue in published file names.
"""
def get_dynamic_data(self, variant, task_name, asset_doc,
project_name, host_name, instance):

View file

@ -225,12 +225,12 @@ class HoudiniCreator(NewCreator, HoudiniCreatorBase):
self._add_instance_to_context(created_instance)
def update_instances(self, update_list):
for created_inst, _changes in update_list:
for created_inst, changes in update_list:
instance_node = hou.node(created_inst.get("instance_node"))
new_values = {
key: new_value
for key, (_old_value, new_value) in _changes.items()
key: changes[key].new_value
for key in changes.changed_keys
}
imprint(
instance_node,

View file

@ -1,4 +1,5 @@
import os
import re
import logging
import platform
@ -66,7 +67,7 @@ def generate_shelves():
)
continue
mandatory_attributes = {'name', 'script'}
mandatory_attributes = {'label', 'script'}
for tool_definition in shelf_definition.get('tools_list'):
# We verify that the name and script attibutes of the tool
# are set
@ -152,31 +153,32 @@ def get_or_create_tool(tool_definition, shelf):
Returns:
hou.Tool: The tool updated or the new one
"""
existing_tools = shelf.tools()
tool_label = tool_definition.get('label')
tool_label = tool_definition.get("label")
if not tool_label:
log.warning("Skipped shelf without label")
return
script_path = tool_definition["script"]
if not script_path or not os.path.exists(script_path):
log.warning("This path doesn't exist - {}".format(script_path))
return
existing_tools = shelf.tools()
existing_tool = next(
(tool for tool in existing_tools if tool.label() == tool_label),
None
)
with open(script_path) as stream:
script = stream.read()
tool_definition["script"] = script
if existing_tool:
tool_definition.pop('name', None)
tool_definition.pop('label', None)
tool_definition.pop("label", None)
existing_tool.setData(**tool_definition)
return existing_tool
tool_name = tool_label.replace(' ', '_').lower()
if not os.path.exists(tool_definition['script']):
log.warning(
"This path doesn't exist - {}".format(tool_definition['script'])
)
return
with open(tool_definition['script']) as f:
script = f.read()
tool_definition.update({'script': script})
new_tool = hou.shelves.newTool(name=tool_name, **tool_definition)
return new_tool
tool_name = re.sub(r"[^\w\d]+", "_", tool_label).lower()
return hou.shelves.newTool(name=tool_name, **tool_definition)

View file

@ -78,12 +78,12 @@ class MaxCreator(Creator, MaxCreatorBase):
self._add_instance_to_context(created_instance)
def update_instances(self, update_list):
for created_inst, _changes in update_list:
for created_inst, changes in update_list:
instance_node = created_inst.get("instance_node")
new_values = {
key: new_value
for key, (_old_value, new_value) in _changes.items()
key: changes[key].new_value
for key in changes.changed_keys
}
imprint(
instance_node,

View file

@ -5,6 +5,7 @@ import sys
import platform
import uuid
import math
import re
import json
import logging
@ -3430,3 +3431,34 @@ def convert_to_maya_fps(fps):
)
return supported_framerate
def write_xgen_file(data, filepath):
"""Overwrites data in .xgen files.
Quite naive approach to mainly overwrite "xgDataPath" and "xgProjectPath".
Args:
data (dict): Dictionary of key, value. Key matches with xgen file.
For example:
{"xgDataPath": "some/path"}
filepath (string): Absolute path of .xgen file.
"""
# Generate regex lookup for line to key basically
# match any of the keys in `\t{key}\t\t`
keys = "|".join(re.escape(key) for key in data.keys())
re_keys = re.compile("^\t({})\t\t".format(keys))
lines = []
with open(filepath, "r") as f:
for line in f:
match = re_keys.match(line)
if match:
key = match.group(1)
value = data[key]
line = "\t{}\t\t{}\n".format(key, value)
lines.append(line)
with open(filepath, "w") as f:
f.writelines(lines)

View file

@ -300,6 +300,39 @@ class ReferenceLoader(Loader):
str(representation["_id"]),
type="string")
# When an animation or pointcache gets connected to an Xgen container,
# the compound attribute "xgenContainers" gets created. When animation
# containers gets updated we also need to update the cacheFileName on
# the Xgen collection.
compound_name = "xgenContainers"
if cmds.objExists("{}.{}".format(node, compound_name)):
import xgenm
container_amount = cmds.getAttr(
"{}.{}".format(node, compound_name), size=True
)
# loop through all compound children
for i in range(container_amount):
attr = "{}.{}[{}].container".format(node, compound_name, i)
objectset = cmds.listConnections(attr)[0]
reference_node = cmds.sets(objectset, query=True)[0]
palettes = cmds.ls(
cmds.referenceQuery(reference_node, nodes=True),
type="xgmPalette"
)
for palette in palettes:
for description in xgenm.descriptions(palette):
xgenm.setAttr(
"cacheFileName",
path.replace("\\", "/"),
palette,
description,
"SplinePrimitive"
)
# Refresh UI and viewport.
de = xgenm.xgGlobal.DescriptionEditor
de.refresh("Full")
def remove(self, container):
"""Remove an existing `container` from Maya scene

View file

@ -54,6 +54,7 @@ class CreateRender(plugin.Creator):
tileRendering (bool): Instance is set to tile rendering mode. We
won't submit actual render, but we'll make publish job to wait
for Tile Assembly job done and then publish.
strict_error_checking (bool): Enable/disable error checking on DL
See Also:
https://pype.club/docs/artist_hosts_maya#creating-basic-render-setup
@ -271,6 +272,9 @@ class CreateRender(plugin.Creator):
secondary_pool = pool_setting["secondary_pool"]
self.data["secondaryPool"] = self._set_default_pool(pool_names,
secondary_pool)
strict_error_checking = maya_submit_dl.get("strict_error_checking",
True)
self.data["strict_error_checking"] = strict_error_checking
if muster_enabled:
self.log.info(">>> Loading Muster credentials ...")

View file

@ -2,9 +2,9 @@ from openpype.hosts.maya.api import plugin
class CreateXgen(plugin.Creator):
"""Xgen interactive export"""
"""Xgen"""
name = "xgen"
label = "Xgen Interactive"
label = "Xgen"
family = "xgen"
icon = "pagelines"

View file

@ -0,0 +1,153 @@
from maya import cmds
from openpype.pipeline import InventoryAction, get_representation_context
from openpype.hosts.maya.api.lib import get_id
class ConnectGeometry(InventoryAction):
"""Connect geometries within containers.
Source container will connect to the target containers, by searching for
matching geometry IDs (cbid).
Source containers are of family; "animation" and "pointcache".
The connection with be done with a live world space blendshape.
"""
label = "Connect Geometry"
icon = "link"
color = "white"
def process(self, containers):
# Validate selection is more than 1.
message = (
"Only 1 container selected. 2+ containers needed for this action."
)
if len(containers) == 1:
self.display_warning(message)
return
# Categorize containers by family.
containers_by_family = {}
for container in containers:
family = get_representation_context(
container["representation"]
)["subset"]["data"]["family"]
try:
containers_by_family[family].append(container)
except KeyError:
containers_by_family[family] = [container]
# Validate to only 1 source container.
source_containers = containers_by_family.get("animation", [])
source_containers += containers_by_family.get("pointcache", [])
source_container_namespaces = [
x["namespace"] for x in source_containers
]
message = (
"{} animation containers selected:\n\n{}\n\nOnly select 1 of type "
"\"animation\" or \"pointcache\".".format(
len(source_containers), source_container_namespaces
)
)
if len(source_containers) != 1:
self.display_warning(message)
return
source_object = source_containers[0]["objectName"]
# Collect matching geometry transforms based cbId attribute.
target_containers = []
for family, containers in containers_by_family.items():
if family in ["animation", "pointcache"]:
continue
target_containers.extend(containers)
source_data = self.get_container_data(source_object)
matches = []
node_types = set()
for target_container in target_containers:
target_data = self.get_container_data(
target_container["objectName"]
)
node_types.update(target_data["node_types"])
for id, transform in target_data["ids"].items():
source_match = source_data["ids"].get(id)
if source_match:
matches.append([source_match, transform])
# Message user about what is about to happen.
if not matches:
self.display_warning("No matching geometries found.")
return
message = "Connecting geometries:\n\n"
for match in matches:
message += "{} > {}\n".format(match[0], match[1])
choice = self.display_warning(message, show_cancel=True)
if choice is False:
return
# Setup live worldspace blendshape connection.
for source, target in matches:
blendshape = cmds.blendShape(source, target)[0]
cmds.setAttr(blendshape + ".origin", 0)
cmds.setAttr(blendshape + "." + target.split(":")[-1], 1)
# Update Xgen if in any of the containers.
if "xgmPalette" in node_types:
cmds.xgmPreview()
def get_container_data(self, container):
"""Collects data about the container nodes.
Args:
container (dict): Container instance.
Returns:
data (dict):
"node_types": All node types in container nodes.
"ids": If the node is a mesh, we collect its parent transform
id.
"""
data = {"node_types": set(), "ids": {}}
ref_node = cmds.sets(container, query=True, nodesOnly=True)[0]
for node in cmds.referenceQuery(ref_node, nodes=True):
node_type = cmds.nodeType(node)
data["node_types"].add(node_type)
# Only interested in mesh transforms for connecting geometry with
# blendshape.
if node_type != "mesh":
continue
transform = cmds.listRelatives(node, parent=True)[0]
data["ids"][get_id(transform)] = transform
return data
def display_warning(self, message, show_cancel=False):
"""Show feedback to user.
Returns:
bool
"""
from Qt import QtWidgets
accept = QtWidgets.QMessageBox.Ok
if show_cancel:
buttons = accept | QtWidgets.QMessageBox.Cancel
else:
buttons = accept
state = QtWidgets.QMessageBox.warning(
None,
"",
message,
buttons=buttons,
defaultButton=accept
)
return state == accept

View file

@ -0,0 +1,168 @@
from maya import cmds
import xgenm
from openpype.pipeline import (
InventoryAction, get_representation_context, get_representation_path
)
class ConnectXgen(InventoryAction):
"""Connect Xgen with an animation or pointcache.
"""
label = "Connect Xgen"
icon = "link"
color = "white"
def process(self, containers):
# Validate selection is more than 1.
message = (
"Only 1 container selected. 2+ containers needed for this action."
)
if len(containers) == 1:
self.display_warning(message)
return
# Categorize containers by family.
containers_by_family = {}
for container in containers:
family = get_representation_context(
container["representation"]
)["subset"]["data"]["family"]
try:
containers_by_family[family].append(container)
except KeyError:
containers_by_family[family] = [container]
# Validate to only 1 source container.
source_containers = containers_by_family.get("animation", [])
source_containers += containers_by_family.get("pointcache", [])
source_container_namespaces = [
x["namespace"] for x in source_containers
]
message = (
"{} animation containers selected:\n\n{}\n\nOnly select 1 of type "
"\"animation\" or \"pointcache\".".format(
len(source_containers), source_container_namespaces
)
)
if len(source_containers) != 1:
self.display_warning(message)
return
source_container = source_containers[0]
source_object = source_container["objectName"]
# Validate source representation is an alembic.
source_path = get_representation_path(
get_representation_context(
source_container["representation"]
)["representation"]
).replace("\\", "/")
message = "Animation container \"{}\" is not an alembic:\n{}".format(
source_container["namespace"], source_path
)
if not source_path.endswith(".abc"):
self.display_warning(message)
return
# Target containers.
target_containers = []
for family, containers in containers_by_family.items():
if family in ["animation", "pointcache"]:
continue
target_containers.extend(containers)
# Inform user of connections from source representation to target
# descriptions.
descriptions_data = []
connections_msg = ""
for target_container in target_containers:
reference_node = cmds.sets(
target_container["objectName"], query=True
)[0]
palettes = cmds.ls(
cmds.referenceQuery(reference_node, nodes=True),
type="xgmPalette"
)
for palette in palettes:
for description in xgenm.descriptions(palette):
descriptions_data.append([palette, description])
connections_msg += "\n{}/{}".format(palette, description)
message = "Connecting \"{}\" to:\n".format(
source_container["namespace"]
)
message += connections_msg
choice = self.display_warning(message, show_cancel=True)
if choice is False:
return
# Recreate "xgenContainers" attribute to reset.
compound_name = "xgenContainers"
attr = "{}.{}".format(source_object, compound_name)
if cmds.objExists(attr):
cmds.deleteAttr(attr)
cmds.addAttr(
source_object,
longName=compound_name,
attributeType="compound",
numberOfChildren=1,
multi=True
)
# Connect target containers.
for target_container in target_containers:
cmds.addAttr(
source_object,
longName="container",
attributeType="message",
parent=compound_name
)
index = target_containers.index(target_container)
cmds.connectAttr(
target_container["objectName"] + ".message",
source_object + ".{}[{}].container".format(
compound_name, index
)
)
# Setup cache on Xgen
object = "SplinePrimitive"
for palette, description in descriptions_data:
xgenm.setAttr("useCache", "true", palette, description, object)
xgenm.setAttr("liveMode", "false", palette, description, object)
xgenm.setAttr(
"cacheFileName", source_path, palette, description, object
)
# Refresh UI and viewport.
de = xgenm.xgGlobal.DescriptionEditor
de.refresh("Full")
def display_warning(self, message, show_cancel=False):
"""Show feedback to user.
Returns:
bool
"""
from Qt import QtWidgets
accept = QtWidgets.QMessageBox.Ok
if show_cancel:
buttons = accept | QtWidgets.QMessageBox.Cancel
else:
buttons = accept
state = QtWidgets.QMessageBox.warning(
None,
"",
message,
buttons=buttons,
defaultButton=accept
)
return state == accept

View file

@ -93,7 +93,20 @@ class ImportMayaLoader(load.LoaderPlugin):
"""
representations = ["ma", "mb", "obj"]
families = ["*"]
families = [
"model",
"pointcache",
"proxyAbc",
"animation",
"mayaAscii",
"mayaScene",
"setdress",
"layout",
"camera",
"rig",
"camerarig",
"staticMesh"
]
label = "Import"
order = 10

View file

@ -25,9 +25,9 @@ class ReferenceLoader(openpype.hosts.maya.api.plugin.ReferenceLoader):
"camera",
"rig",
"camerarig",
"xgen",
"staticMesh",
"mvLook"]
representations = ["ma", "abc", "fbx", "mb"]
label = "Reference"

View file

@ -81,10 +81,11 @@ class VRayProxyLoader(load.LoaderPlugin):
c = colors.get(family)
if c is not None:
cmds.setAttr("{0}.useOutlinerColor".format(group_node), 1)
cmds.setAttr("{0}.outlinerColor".format(group_node),
(float(c[0])/255),
(float(c[1])/255),
(float(c[2])/255)
cmds.setAttr(
"{0}.outlinerColor".format(group_node),
(float(c[0]) / 255),
(float(c[1]) / 255),
(float(c[2]) / 255)
)
return containerise(
@ -101,7 +102,7 @@ class VRayProxyLoader(load.LoaderPlugin):
assert cmds.objExists(node), "Missing container"
members = cmds.sets(node, query=True) or []
vraymeshes = cmds.ls(members, type="VRayMesh")
vraymeshes = cmds.ls(members, type="VRayProxy")
assert vraymeshes, "Cannot find VRayMesh in container"
# get all representations for this version

View file

@ -0,0 +1,173 @@
import os
import maya.cmds as cmds
import xgenm
from Qt import QtWidgets
import openpype.hosts.maya.api.plugin
from openpype.hosts.maya.api.lib import (
maintained_selection,
get_container_members,
attribute_values,
write_xgen_file
)
from openpype.hosts.maya.api import current_file
from openpype.pipeline import get_representation_path
class XgenLoader(openpype.hosts.maya.api.plugin.ReferenceLoader):
"""Load Xgen as reference"""
families = ["xgen"]
representations = ["ma", "mb"]
label = "Reference Xgen"
icon = "code-fork"
color = "orange"
def get_xgen_xgd_paths(self, palette):
_, maya_extension = os.path.splitext(current_file())
xgen_file = current_file().replace(
maya_extension,
"__{}.xgen".format(palette.replace("|", "").replace(":", "__"))
)
xgd_file = xgen_file.replace(".xgen", ".xgd")
return xgen_file, xgd_file
def process_reference(self, context, name, namespace, options):
# Validate workfile has a path.
if current_file() is None:
QtWidgets.QMessageBox.warning(
None,
"",
"Current workfile has not been saved. Please save the workfile"
" before loading an Xgen."
)
return
maya_filepath = self.prepare_root_value(
self.fname, context["project"]["name"]
)
# Reference xgen. Xgen does not like being referenced in under a group.
new_nodes = []
with maintained_selection():
nodes = cmds.file(
maya_filepath,
namespace=namespace,
sharedReferenceFile=False,
reference=True,
returnNewNodes=True
)
xgen_palette = cmds.ls(
nodes, type="xgmPalette", long=True
)[0].replace("|", "")
xgen_file, xgd_file = self.get_xgen_xgd_paths(xgen_palette)
self.set_palette_attributes(xgen_palette, xgen_file, xgd_file)
# Change the cache and disk values of xgDataPath and xgProjectPath
# to ensure paths are setup correctly.
project_path = os.path.dirname(current_file()).replace("\\", "/")
xgenm.setAttr("xgProjectPath", project_path, xgen_palette)
data_path = "${{PROJECT}}xgen/collections/{};{}".format(
xgen_palette.replace(":", "__ns__"),
xgenm.getAttr("xgDataPath", xgen_palette)
)
xgenm.setAttr("xgDataPath", data_path, xgen_palette)
data = {"xgProjectPath": project_path, "xgDataPath": data_path}
write_xgen_file(data, xgen_file)
# This create an expression attribute of float. If we did not add
# any changes to collection, then Xgen does not create an xgd file
# on save. This gives errors when launching the workfile again due
# to trying to find the xgd file.
name = "custom_float_ignore"
if name not in xgenm.customAttrs(xgen_palette):
xgenm.addCustomAttr(
"custom_float_ignore", xgen_palette
)
shapes = cmds.ls(nodes, shapes=True, long=True)
new_nodes = (list(set(nodes) - set(shapes)))
self[:] = new_nodes
return new_nodes
def set_palette_attributes(self, xgen_palette, xgen_file, xgd_file):
cmds.setAttr(
"{}.xgBaseFile".format(xgen_palette),
os.path.basename(xgen_file),
type="string"
)
cmds.setAttr(
"{}.xgFileName".format(xgen_palette),
os.path.basename(xgd_file),
type="string"
)
cmds.setAttr("{}.xgExportAsDelta".format(xgen_palette), True)
def update(self, container, representation):
"""Workflow for updating Xgen.
- Copy and potentially overwrite the workspace .xgen file.
- Export changes to delta file.
- Set collection attributes to not include delta files.
- Update xgen maya file reference.
- Apply the delta file changes.
- Reset collection attributes to include delta files.
We have to do this workflow because when using referencing of the xgen
collection, Maya implicitly imports the Xgen data from the xgen file so
we dont have any control over when adding the delta file changes.
There is an implicit increment of the xgen and delta files, due to
using the workfile basename.
"""
container_node = container["objectName"]
members = get_container_members(container_node)
xgen_palette = cmds.ls(
members, type="xgmPalette", long=True
)[0].replace("|", "")
xgen_file, xgd_file = self.get_xgen_xgd_paths(xgen_palette)
# Export current changes to apply later.
xgenm.createDelta(xgen_palette.replace("|", ""), xgd_file)
self.set_palette_attributes(xgen_palette, xgen_file, xgd_file)
maya_file = get_representation_path(representation)
_, extension = os.path.splitext(maya_file)
new_xgen_file = maya_file.replace(extension, ".xgen")
data_path = ""
with open(new_xgen_file, "r") as f:
for line in f:
if line.startswith("\txgDataPath"):
line = line.rstrip()
data_path = line.split("\t")[-1]
break
project_path = os.path.dirname(current_file()).replace("\\", "/")
data_path = "${{PROJECT}}xgen/collections/{};{}".format(
xgen_palette.replace(":", "__ns__"),
data_path
)
data = {"xgProjectPath": project_path, "xgDataPath": data_path}
write_xgen_file(data, xgen_file)
attribute_data = {
"{}.xgFileName".format(xgen_palette): os.path.basename(xgen_file),
"{}.xgBaseFile".format(xgen_palette): "",
"{}.xgExportAsDelta".format(xgen_palette): False
}
with attribute_values(attribute_data):
super().update(container, representation)
xgenm.applyDelta(xgen_palette.replace("|", ""), xgd_file)

View file

@ -318,7 +318,9 @@ class CollectMayaRender(pyblish.api.ContextPlugin):
"aovSeparator": layer_render_products.layer_data.aov_separator, # noqa: E501
"renderSetupIncludeLights": render_instance.data.get(
"renderSetupIncludeLights"
)
),
"strict_error_checking": render_instance.data.get(
"strict_error_checking")
}
# Collect Deadline url if Deadline module is enabled

View file

@ -0,0 +1,71 @@
import os
from maya import cmds
import pyblish.api
from openpype.hosts.maya.api.lib import get_attribute_input
class CollectXgen(pyblish.api.InstancePlugin):
"""Collect Xgen"""
order = pyblish.api.CollectorOrder + 0.499999
label = "Collect Xgen"
families = ["xgen"]
def process(self, instance):
data = {
"xgmPalettes": cmds.ls(instance, type="xgmPalette", long=True),
"xgmDescriptions": cmds.ls(
instance, type="xgmDescription", long=True
),
"xgmSubdPatches": cmds.ls(instance, type="xgmSubdPatch", long=True)
}
data["xgenNodes"] = (
data["xgmPalettes"] +
data["xgmDescriptions"] +
data["xgmSubdPatches"]
)
if data["xgmPalettes"]:
data["xgmPalette"] = data["xgmPalettes"][0]
data["xgenConnections"] = {}
for node in data["xgmSubdPatches"]:
data["xgenConnections"][node] = {}
for attr in ["transform", "geometry"]:
input = get_attribute_input("{}.{}".format(node, attr))
data["xgenConnections"][node][attr] = input
# Collect all files under palette root as resources.
import xgenm
data_path = xgenm.getAttr(
"xgDataPath", data["xgmPalette"].replace("|", "")
).split(os.pathsep)[0]
data_path = data_path.replace(
"${PROJECT}",
xgenm.getAttr("xgProjectPath", data["xgmPalette"].replace("|", ""))
)
transfers = []
# Since we are duplicating this palette when extracting we predict that
# the name will be the basename without namespaces.
predicted_palette_name = data["xgmPalette"].split(":")[-1]
predicted_palette_name = predicted_palette_name.replace("|", "")
for root, _, files in os.walk(data_path):
for file in files:
source = os.path.join(root, file).replace("\\", "/")
destination = os.path.join(
instance.data["resourcesDir"],
"collections",
predicted_palette_name,
source.replace(data_path, "")[1:]
)
transfers.append((source, destination.replace("\\", "/")))
data["transfers"] = transfers
self.log.info(data)
instance.data.update(data)

View file

@ -20,8 +20,7 @@ class ExtractMayaSceneRaw(publish.Extractor):
"mayaScene",
"setdress",
"layout",
"camerarig",
"xgen"]
"camerarig"]
scene_type = "ma"
def process(self, instance):

View file

@ -0,0 +1,250 @@
import os
import shutil
import copy
from maya import cmds
import pyblish.api
from openpype.hosts.maya.api.lib import extract_alembic
from openpype.pipeline import publish
from openpype.lib import StringTemplate
class ExtractWorkfileXgen(publish.Extractor):
"""Extract Workfile Xgen.
When submitting a render, we need to prep Xgen side car files.
"""
# Offset to run before workfile scene save.
order = pyblish.api.ExtractorOrder - 0.499
label = "Extract Workfile Xgen"
families = ["workfile"]
hosts = ["maya"]
def get_render_max_frame_range(self, context):
"""Return start to end frame range including all renderlayers in
context.
This will return the full frame range which includes all frames of the
renderlayer instances to be published/submitted.
Args:
context (pyblish.api.Context): Current publishing context.
Returns:
tuple or None: Start frame, end frame tuple if any renderlayers
found. Otherwise None is returned.
"""
def _is_active_renderlayer(i):
"""Return whether instance is active renderlayer"""
if not i.data.get("publish", True):
return False
is_renderlayer = (
"renderlayer" in i.data.get("families", []) or
i.data["family"] == "renderlayer"
)
return is_renderlayer
start_frame = None
end_frame = None
for instance in context:
if not _is_active_renderlayer(instance):
# Only consider renderlyare instances
continue
render_start_frame = instance.data["frameStart"]
render_end_frame = instance.data["frameStart"]
if start_frame is None:
start_frame = render_start_frame
else:
start_frame = min(start_frame, render_start_frame)
if end_frame is None:
end_frame = render_end_frame
else:
end_frame = max(end_frame, render_end_frame)
if start_frame is None or end_frame is None:
return
return start_frame, end_frame
def process(self, instance):
transfers = []
# Validate there is any palettes in the scene.
if not cmds.ls(type="xgmPalette"):
self.log.debug(
"No collections found in the scene. Skipping Xgen extraction."
)
return
import xgenm
# Validate to extract only when we are publishing a renderlayer as
# well.
render_range = self.get_render_max_frame_range(instance.context)
if not render_range:
self.log.debug(
"No publishable renderlayers found in context. Skipping Xgen"
" extraction."
)
return
start_frame, end_frame = render_range
# We decrement start frame and increment end frame so motion blur will
# render correctly.
start_frame -= 1
end_frame += 1
# Extract patches alembic.
path_no_ext, _ = os.path.splitext(instance.context.data["currentFile"])
kwargs = {"attrPrefix": ["xgen"], "stripNamespaces": True}
alembic_files = []
for palette in cmds.ls(type="xgmPalette"):
patch_names = []
for description in xgenm.descriptions(palette):
for name in xgenm.boundGeometry(palette, description):
patch_names.append(name)
alembic_file = "{}__{}.abc".format(
path_no_ext, palette.replace(":", "__ns__")
)
extract_alembic(
alembic_file,
root=patch_names,
selection=False,
startFrame=float(start_frame),
endFrame=float(end_frame),
verbose=True,
**kwargs
)
alembic_files.append(alembic_file)
template_data = copy.deepcopy(instance.data["anatomyData"])
published_maya_path = StringTemplate(
instance.context.data["anatomy"].templates["publish"]["file"]
).format(template_data)
published_basename, _ = os.path.splitext(published_maya_path)
for source in alembic_files:
destination = os.path.join(
os.path.dirname(instance.data["resourcesDir"]),
os.path.basename(
source.replace(path_no_ext, published_basename)
)
)
transfers.append((source, destination))
# Validate that we are using the published workfile.
deadline_settings = instance.context.get("deadline")
if deadline_settings:
publish_settings = deadline_settings["publish"]
if not publish_settings["MayaSubmitDeadline"]["use_published"]:
self.log.debug(
"Not using the published workfile. Abort Xgen extraction."
)
return
# Collect Xgen and Delta files.
xgen_files = []
sources = []
current_dir = os.path.dirname(instance.context.data["currentFile"])
attrs = ["xgFileName", "xgBaseFile"]
for palette in cmds.ls(type="xgmPalette"):
for attr in attrs:
source = os.path.join(
current_dir, cmds.getAttr(palette + "." + attr)
)
if not os.path.exists(source):
continue
ext = os.path.splitext(source)[1]
if ext == ".xgen":
xgen_files.append(source)
if ext == ".xgd":
sources.append(source)
# Copy .xgen file to temporary location and modify.
staging_dir = self.staging_dir(instance)
for source in xgen_files:
destination = os.path.join(staging_dir, os.path.basename(source))
shutil.copy(source, destination)
lines = []
with open(destination, "r") as f:
for line in [line.rstrip() for line in f]:
if line.startswith("\txgProjectPath"):
path = os.path.dirname(instance.data["resourcesDir"])
line = "\txgProjectPath\t\t{}/".format(
path.replace("\\", "/")
)
lines.append(line)
with open(destination, "w") as f:
f.write("\n".join(lines))
sources.append(destination)
# Add resource files to workfile instance.
for source in sources:
basename = os.path.basename(source)
destination = os.path.join(
os.path.dirname(instance.data["resourcesDir"]), basename
)
transfers.append((source, destination))
destination_dir = os.path.join(
instance.data["resourcesDir"], "collections"
)
for palette in cmds.ls(type="xgmPalette"):
project_path = xgenm.getAttr("xgProjectPath", palette)
data_path = xgenm.getAttr("xgDataPath", palette)
data_path = data_path.replace("${PROJECT}", project_path)
for path in data_path.split(";"):
for root, _, files in os.walk(path):
for f in files:
source = os.path.join(root, f)
destination = "{}/{}{}".format(
destination_dir,
palette.replace(":", "__ns__"),
source.replace(path, "")
)
transfers.append((source, destination))
for source, destination in transfers:
self.log.debug("Transfer: {} > {}".format(source, destination))
instance.data["transfers"] = transfers
# Set palette attributes in preparation for workfile publish.
attrs = {"xgFileName": None, "xgBaseFile": ""}
data = {}
for palette in cmds.ls(type="xgmPalette"):
attrs["xgFileName"] = "resources/{}.xgen".format(
palette.replace(":", "__ns__")
)
for attr, value in attrs.items():
node_attr = palette + "." + attr
old_value = cmds.getAttr(node_attr)
try:
data[palette][attr] = old_value
except KeyError:
data[palette] = {attr: old_value}
cmds.setAttr(node_attr, value, type="string")
self.log.info(
"Setting \"{}\" on \"{}\"".format(value, node_attr)
)
cmds.setAttr(palette + "." + "xgExportAsDelta", False)
instance.data["xgenAttributes"] = data

View file

@ -0,0 +1,142 @@
import os
import copy
import tempfile
from maya import cmds
import xgenm
from openpype.pipeline import publish
from openpype.hosts.maya.api.lib import (
maintained_selection, attribute_values, write_xgen_file, delete_after
)
from openpype.lib import StringTemplate
class ExtractXgen(publish.Extractor):
"""Extract Xgen
Workflow:
- Duplicate nodes used for patches.
- Export palette and import onto duplicate nodes.
- Export/Publish duplicate nodes and palette.
- Export duplicate palette to .xgen file and add to publish.
- Publish all xgen files as resources.
"""
label = "Extract Xgen"
hosts = ["maya"]
families = ["xgen"]
scene_type = "ma"
def process(self, instance):
if "representations" not in instance.data:
instance.data["representations"] = []
staging_dir = self.staging_dir(instance)
maya_filename = "{}.{}".format(instance.data["name"], self.scene_type)
maya_filepath = os.path.join(staging_dir, maya_filename)
# Get published xgen file name.
template_data = copy.deepcopy(instance.data["anatomyData"])
template_data.update({"ext": "xgen"})
templates = instance.context.data["anatomy"].templates["publish"]
xgen_filename = StringTemplate(templates["file"]).format(template_data)
xgen_path = os.path.join(
self.staging_dir(instance), xgen_filename
).replace("\\", "/")
type = "mayaAscii" if self.scene_type == "ma" else "mayaBinary"
# Duplicate xgen setup.
with delete_after() as delete_bin:
duplicate_nodes = []
# Collect nodes to export.
for _, connections in instance.data["xgenConnections"].items():
transform_name = connections["transform"].split(".")[0]
# Duplicate_transform subd patch geometry.
duplicate_transform = cmds.duplicate(transform_name)[0]
delete_bin.append(duplicate_transform)
# Discard the children.
shapes = cmds.listRelatives(duplicate_transform, shapes=True)
children = cmds.listRelatives(
duplicate_transform, children=True
)
cmds.delete(set(children) - set(shapes))
duplicate_transform = cmds.parent(
duplicate_transform, world=True
)[0]
duplicate_nodes.append(duplicate_transform)
# Export temp xgen palette files.
temp_xgen_path = os.path.join(
tempfile.gettempdir(), "temp.xgen"
).replace("\\", "/")
xgenm.exportPalette(
instance.data["xgmPalette"].replace("|", ""), temp_xgen_path
)
self.log.info("Extracted to {}".format(temp_xgen_path))
# Import xgen onto the duplicate.
with maintained_selection():
cmds.select(duplicate_nodes)
palette = xgenm.importPalette(temp_xgen_path, [])
delete_bin.append(palette)
# Export duplicated palettes.
xgenm.exportPalette(palette, xgen_path)
# Export Maya file.
attribute_data = {"{}.xgFileName".format(palette): xgen_filename}
with attribute_values(attribute_data):
with maintained_selection():
cmds.select(duplicate_nodes + [palette])
cmds.file(
maya_filepath,
force=True,
type=type,
exportSelected=True,
preserveReferences=False,
constructionHistory=True,
shader=True,
constraints=True,
expressions=True
)
self.log.info("Extracted to {}".format(maya_filepath))
if os.path.exists(temp_xgen_path):
os.remove(temp_xgen_path)
data = {
"xgDataPath": os.path.join(
instance.data["resourcesDir"],
"collections",
palette.replace(":", "__ns__")
).replace("\\", "/"),
"xgProjectPath": os.path.dirname(
instance.data["resourcesDir"]
).replace("\\", "/")
}
write_xgen_file(data, xgen_path)
# Adding representations.
representation = {
"name": "xgen",
"ext": "xgen",
"files": xgen_filename,
"stagingDir": staging_dir,
}
instance.data["representations"].append(representation)
representation = {
"name": self.scene_type,
"ext": self.scene_type,
"files": maya_filename,
"stagingDir": staging_dir
}
instance.data["representations"].append(representation)

View file

@ -1,64 +0,0 @@
import os
from maya import cmds
from openpype.pipeline import publish
from openpype.hosts.maya.api.lib import (
suspended_refresh,
maintained_selection
)
class ExtractXgenCache(publish.Extractor):
"""Produce an alembic of just xgen interactive groom
"""
label = "Extract Xgen ABC Cache"
hosts = ["maya"]
families = ["xgen"]
optional = True
def process(self, instance):
# Collect the out set nodes
out_descriptions = [node for node in instance
if cmds.nodeType(node) == "xgmSplineDescription"]
start = 1
end = 1
self.log.info("Extracting Xgen Cache..")
dirname = self.staging_dir(instance)
parent_dir = self.staging_dir(instance)
filename = "{name}.abc".format(**instance.data)
path = os.path.join(parent_dir, filename)
with suspended_refresh():
with maintained_selection():
command = (
'-file '
+ path
+ ' -df "ogawa" -fr '
+ str(start)
+ ' '
+ str(end)
+ ' -step 1 -mxf -wfw'
)
for desc in out_descriptions:
command += (" -obj " + desc)
cmds.xgmSplineCache(export=True, j=command)
if "representations" not in instance.data:
instance.data["representations"] = []
representation = {
'name': 'abc',
'ext': 'abc',
'files': filename,
"stagingDir": dirname,
}
instance.data["representations"].append(representation)
self.log.info("Extracted {} to {}".format(instance, dirname))

View file

@ -0,0 +1,36 @@
from maya import cmds
import pyblish.api
class ResetXgenAttributes(pyblish.api.InstancePlugin):
"""Reset Xgen attributes.
When the incremental save of the workfile triggers, the Xgen attributes
changes so this plugin will change it back to the values before publishing.
"""
label = "Reset Xgen Attributes."
# Offset to run after workfile increment plugin.
order = pyblish.api.IntegratorOrder + 10.0
families = ["workfile"]
def process(self, instance):
xgen_attributes = instance.data.get("xgenAttributes", {})
if not xgen_attributes:
return
for palette, data in xgen_attributes.items():
for attr, value in data.items():
node_attr = "{}.{}".format(palette, attr)
self.log.info(
"Setting \"{}\" on \"{}\"".format(value, node_attr)
)
cmds.setAttr(node_attr, value, type="string")
cmds.setAttr(palette + ".xgExportAsDelta", True)
# Need to save the scene, cause the attribute changes above does not
# mark the scene as modified so user can exit without commiting the
# changes.
self.log.info("Saving changes.")
cmds.file(save=True)

View file

@ -0,0 +1,18 @@
from maya import cmds
import pyblish.api
from openpype.pipeline import PublishValidationError
class ValidateVray(pyblish.api.InstancePlugin):
"""Validate general Vray setup."""
order = pyblish.api.ValidatorOrder
label = 'VRay'
hosts = ["maya"]
families = ["vrayproxy"]
def process(self, instance):
# Validate vray plugin is loaded.
if not cmds.pluginInfo("vrayformaya", query=True, loaded=True):
raise PublishValidationError("Vray plugin is not loaded.")

View file

@ -0,0 +1,59 @@
import json
import maya.cmds as cmds
import xgenm
import pyblish.api
from openpype.pipeline.publish import PublishValidationError
class ValidateXgen(pyblish.api.InstancePlugin):
"""Validate Xgen data."""
label = "Validate Xgen"
order = pyblish.api.ValidatorOrder
host = ["maya"]
families = ["xgen"]
def process(self, instance):
set_members = instance.data.get("setMembers")
# Only 1 collection/node per instance.
if len(set_members) != 1:
raise PublishValidationError(
"Only one collection per instance is allowed."
" Found:\n{}".format(set_members)
)
# Only xgen palette node is allowed.
node_type = cmds.nodeType(set_members[0])
if node_type != "xgmPalette":
raise PublishValidationError(
"Only node of type \"xgmPalette\" are allowed. Referred to as"
" \"collection\" in the Maya UI."
" Node type found: {}".format(node_type)
)
# Cant have inactive modifiers in collection cause Xgen will try and
# look for them when loading.
palette = instance.data["xgmPalette"].replace("|", "")
inactive_modifiers = {}
for description in instance.data["xgmDescriptions"]:
description = description.split("|")[-2]
modifier_names = xgenm.fxModules(palette, description)
for name in modifier_names:
attr = xgenm.getAttr("active", palette, description, name)
# Attribute value are lowercase strings of false/true.
if attr == "false":
try:
inactive_modifiers[description].append(name)
except KeyError:
inactive_modifiers[description] = [name]
if inactive_modifiers:
raise PublishValidationError(
"There are inactive modifiers on the collection. "
"Please delete these:\n{}".format(
json.dumps(inactive_modifiers, indent=4, sort_keys=True)
)
)

View file

@ -1,16 +1,33 @@
import os
from functools import partial
from openpype.settings import get_project_settings
from openpype.pipeline import install_host
from openpype.hosts.maya.api import MayaHost
from maya import cmds
host = MayaHost()
install_host(host)
print("starting OpenPype usersetup")
print("Starting OpenPype usersetup...")
# build a shelf
# Open Workfile Post Initialization.
key = "OPENPYPE_OPEN_WORKFILE_POST_INITIALIZATION"
if bool(int(os.environ.get(key, "0"))):
cmds.evalDeferred(
partial(
cmds.file,
os.environ["AVALON_LAST_WORKFILE"],
open=True,
force=True
),
lowestPriority=True
)
# Build a shelf.
settings = get_project_settings(os.environ['AVALON_PROJECT'])
shelf_preset = settings['maya'].get('project_shelf')
@ -26,7 +43,10 @@ if shelf_preset:
print(import_string)
exec(import_string)
cmds.evalDeferred("mlib.shelf(name=shelf_preset['name'], iconPath=icon_path, preset=shelf_preset)")
cmds.evalDeferred(
"mlib.shelf(name=shelf_preset['name'], iconPath=icon_path,"
" preset=shelf_preset)"
)
print("finished OpenPype usersetup")
print("Finished OpenPype usersetup.")

View file

@ -1,7 +1,7 @@
import os
import nuke
import pyblish.api
import openpype.api as api
from openpype.lib import get_version_from_path
import openpype.hosts.nuke.api as napi
from openpype.pipeline import KnownPublishError
@ -57,7 +57,7 @@ class CollectContextData(pyblish.api.ContextPlugin):
"fps": root_node['fps'].value(),
"currentFile": current_file,
"version": int(api.get_version_from_path(current_file)),
"version": int(get_version_from_path(current_file)),
"host": pyblish.api.current_host(),
"hostVersion": nuke.NUKE_VERSION_STRING

View file

@ -5,7 +5,7 @@ from openpype.lib import BoolDef
from openpype.pipeline import (
Creator,
CreatedInstance,
legacy_io
CreatorError
)
from openpype.lib import prepare_template_data
from openpype.pipeline.create import SUBSET_NAME_ALLOWED_SYMBOLS
@ -13,27 +13,16 @@ from openpype.hosts.photoshop.api.pipeline import cache_and_get_instances
class ImageCreator(Creator):
"""Creates image instance for publishing."""
"""Creates image instance for publishing.
Result of 'image' instance is image of all visible layers, or image(s) of
selected layers.
"""
identifier = "image"
label = "Image"
family = "image"
description = "Image creator"
def collect_instances(self):
for instance_data in cache_and_get_instances(self):
# legacy instances have family=='image'
creator_id = (instance_data.get("creator_identifier") or
instance_data.get("family"))
if creator_id == self.identifier:
instance_data = self._handle_legacy(instance_data)
layer = api.stub().get_layer(instance_data["members"][0])
instance_data["layer"] = layer
instance = CreatedInstance.from_existing(
instance_data, self
)
self._add_instance_to_context(instance)
def create(self, subset_name_from_ui, data, pre_create_data):
groups_to_create = []
top_layers_to_wrap = []
@ -59,9 +48,10 @@ class ImageCreator(Creator):
try:
group = stub.group_selected_layers(subset_name_from_ui)
except:
raise ValueError("Cannot group locked Bakcground layer!")
raise CreatorError("Cannot group locked Background layer!")
groups_to_create.append(group)
# create empty group if nothing selected
if not groups_to_create and not top_layers_to_wrap:
group = stub.create_group(subset_name_from_ui)
groups_to_create.append(group)
@ -73,13 +63,16 @@ class ImageCreator(Creator):
groups_to_create.append(group)
layer_name = ''
creating_multiple_groups = len(groups_to_create) > 1
# use artist chosen option OR force layer if more subsets are created
# to differentiate them
use_layer_name = (pre_create_data.get("use_layer_name") or
len(groups_to_create) > 1)
for group in groups_to_create:
subset_name = subset_name_from_ui # reset to name from creator UI
layer_names_in_hierarchy = []
created_group_name = self._clean_highlights(stub, group.name)
if creating_multiple_groups:
if use_layer_name:
layer_name = re.sub(
"[^{}]+".format(SUBSET_NAME_ALLOWED_SYMBOLS),
"",
@ -112,6 +105,21 @@ class ImageCreator(Creator):
stub.rename_layer(group.id,
stub.PUBLISH_ICON + created_group_name)
def collect_instances(self):
for instance_data in cache_and_get_instances(self):
# legacy instances have family=='image'
creator_id = (instance_data.get("creator_identifier") or
instance_data.get("family"))
if creator_id == self.identifier:
instance_data = self._handle_legacy(instance_data)
layer = api.stub().get_layer(instance_data["members"][0])
instance_data["layer"] = layer
instance = CreatedInstance.from_existing(
instance_data, self
)
self._add_instance_to_context(instance)
def update_instances(self, update_list):
self.log.debug("update_list:: {}".format(update_list))
for created_inst, _changes in update_list:
@ -137,12 +145,42 @@ class ImageCreator(Creator):
label="Create only for selected"),
BoolDef("create_multiple",
default=True,
label="Create separate instance for each selected")
label="Create separate instance for each selected"),
BoolDef("use_layer_name",
default=False,
label="Use layer name in subset")
]
return output
def get_detail_description(self):
return """Creator for Image instances"""
return """Creator for Image instances
Main publishable item in Photoshop will be of `image` family. Result of
this item (instance) is picture that could be loaded and used
in another DCCs (for example as single layer in composition in
AfterEffects, reference in Maya etc).
There are couple of options what to publish:
- separate image per selected layer (or group of layers)
- one image for all selected layers
- all visible layers (groups) flattened into single image
In most cases you would like to keep `Create only for selected`
toggled on and select what you would like to publish.
Toggling this option off will allow you to create instance for all
visible layers without a need to select them explicitly.
Use 'Create separate instance for each selected' to create separate
images per selected layer (group of layers).
'Use layer name in subset' will explicitly add layer name into subset
name. Position of this name is configurable in
`project_settings/global/tools/creator/subset_name_profiles`.
If layer placeholder ({layer}) is not used in `subset_name_profiles`
but layer name should be used (set explicitly in UI or implicitly if
multiple images should be created), it is added in capitalized form
as a suffix to subset name.
"""
def _handle_legacy(self, instance_data):
"""Converts old instances to new format."""

View file

@ -37,7 +37,7 @@ class TrayPublisherHost(HostBase, IPublishHost):
return HostContext.get_context_data()
def update_context_data(self, data, changes):
HostContext.save_context_data(data, changes)
HostContext.save_context_data(data)
def set_project_name(self, project_name):
# TODO Deregister project specific plugins and register new project

View file

@ -33,6 +33,8 @@ class BatchMovieCreator(TrayPublishCreator):
create_allow_context_change = False
version_regex = re.compile(r"^(.+)_v([0-9]+)$")
# Position batch creator after simple creators
order = 110
def __init__(self, project_settings, *args, **kwargs):
super(BatchMovieCreator, self).__init__(project_settings,

View file

@ -64,6 +64,7 @@ class MayaPluginInfo(object):
# Include all lights flag
RenderSetupIncludeLights = attr.ib(
default="1", validator=_validate_deadline_bool_value)
StrictErrorChecking = attr.ib(default=True)
@attr.s
@ -219,6 +220,8 @@ class MayaSubmitDeadline(abstract_submit_deadline.AbstractSubmitDeadline):
"renderSetupIncludeLights", default_rs_include_lights)
if rs_include_lights not in {"1", "0", True, False}:
rs_include_lights = default_rs_include_lights
strict_error_checking = instance.data.get("strict_error_checking",
True)
plugin_info = MayaPluginInfo(
SceneFile=self.scene_path,
Version=cmds.about(version=True),
@ -227,6 +230,7 @@ class MayaSubmitDeadline(abstract_submit_deadline.AbstractSubmitDeadline):
RenderSetupIncludeLights=rs_include_lights, # noqa
ProjectPath=context.data["workspaceDir"],
UsingRenderLayers=True,
StrictErrorChecking=strict_error_checking
)
plugin_payload = attr.asdict(plugin_info)

View file

@ -35,7 +35,7 @@ class OpenPypeVersion:
self.prerelease = prerelease
is_valid = True
if not major or not minor or not patch:
if major is None or minor is None or patch is None:
is_valid = False
self.is_valid = is_valid
@ -157,7 +157,7 @@ def get_openpype_version_from_path(path, build=True):
# fix path for application bundle on macos
if platform.system().lower() == "darwin":
path = os.path.join(path, "Contents", "MacOS", "lib", "Python")
path = os.path.join(path, "MacOS")
version_file = os.path.join(path, "openpype", "version.py")
if not os.path.isfile(version_file):
@ -189,6 +189,11 @@ def get_openpype_executable():
exe_list = config.GetConfigEntryWithDefault("OpenPypeExecutable", "")
dir_list = config.GetConfigEntryWithDefault(
"OpenPypeInstallationDirs", "")
# clean '\ ' for MacOS pasting
if platform.system().lower() == "darwin":
exe_list = exe_list.replace("\\ ", " ")
dir_list = dir_list.replace("\\ ", " ")
return exe_list, dir_list
@ -218,8 +223,8 @@ def get_requested_openpype_executable(
requested_version_obj = OpenPypeVersion.from_string(requested_version)
if not requested_version_obj:
print((
">>> Requested version does not match version regex \"{}\""
).format(VERSION_REGEX))
">>> Requested version '{}' does not match version regex '{}'"
).format(requested_version, VERSION_REGEX))
return None
print((
@ -272,7 +277,8 @@ def get_requested_openpype_executable(
# Deadline decide.
exe_list = [
os.path.join(version_dir, "openpype_console.exe"),
os.path.join(version_dir, "openpype_console")
os.path.join(version_dir, "openpype_console"),
os.path.join(version_dir, "MacOS", "openpype_console")
]
return FileUtils.SearchFileList(";".join(exe_list))

View file

@ -73,7 +73,7 @@ class OpenPypeDeadlinePlugin(DeadlinePlugin):
"""
# fix path for application bundle on macos
if platform.system().lower() == "darwin":
path = os.path.join(path, "Contents", "MacOS", "lib", "Python")
path = os.path.join(path, "MacOS")
version_file = os.path.join(path, "openpype", "version.py")
if not os.path.isfile(version_file):
@ -107,8 +107,11 @@ class OpenPypeDeadlinePlugin(DeadlinePlugin):
"Scanning for compatible requested "
f"version {requested_version}"))
dir_list = self.GetConfigEntry("OpenPypeInstallationDirs")
# clean '\ ' for MacOS pasting
if platform.system().lower() == "darwin":
dir_list = dir_list.replace("\\ ", " ")
install_dir = DirectoryUtils.SearchDirectoryList(dir_list)
if dir:
if install_dir:
sub_dirs = [
f.path for f in os.scandir(install_dir)
if f.is_dir()
@ -120,6 +123,9 @@ class OpenPypeDeadlinePlugin(DeadlinePlugin):
openpype_versions.append((version, subdir))
exe_list = self.GetConfigEntry("OpenPypeExecutable")
# clean '\ ' for MacOS pasting
if platform.system().lower() == "darwin":
exe_list = exe_list.replace("\\ ", " ")
exe = FileUtils.SearchFileList(exe_list)
if openpype_versions:
# if looking for requested compatible version,
@ -161,7 +167,9 @@ class OpenPypeDeadlinePlugin(DeadlinePlugin):
os.path.join(
compatible_versions[-1][1], "openpype_console.exe"),
os.path.join(
compatible_versions[-1][1], "openpype_console")
compatible_versions[-1][1], "openpype_console"),
os.path.join(
compatible_versions[-1][1], "MacOS", "openpype_console")
]
exe = FileUtils.SearchFileList(";".join(exe_list))

View file

@ -316,7 +316,7 @@ def main_loop(ftrack_url):
statuser_failed_count = 0
# If thread failed test Ftrack and Mongo connection
elif not statuser_thread.isAlive():
elif not statuser_thread.is_alive():
statuser_thread.join()
statuser_thread = None
ftrack_accessible = False
@ -359,7 +359,7 @@ def main_loop(ftrack_url):
storer_failed_count = 0
# If thread failed test Ftrack and Mongo connection
elif not storer_thread.isAlive():
elif not storer_thread.is_alive():
if storer_thread.mongo_error:
raise MongoPermissionsError()
storer_thread.join()
@ -396,7 +396,7 @@ def main_loop(ftrack_url):
processor_failed_count = 0
# If thread failed test Ftrack and Mongo connection
elif not processor_thread.isAlive():
elif not processor_thread.is_alive():
if processor_thread.mongo_error:
raise Exception(
"Exiting because have issue with acces to MongoDB"

View file

@ -259,7 +259,7 @@ class CredentialsDialog(QtWidgets.QDialog):
# If there is an existing server thread running we need to stop it.
if self._login_server_thread:
if self._login_server_thread.isAlive():
if self._login_server_thread.is_alive():
self._login_server_thread.stop()
self._login_server_thread.join()
self._login_server_thread = None

View file

@ -19,6 +19,8 @@ oauth_config:
- chat:write.public
- files:write
- channels:read
- users:read
- usergroups:read
settings:
org_deploy_enabled: false
socket_mode_enabled: false

View file

@ -183,6 +183,319 @@ def prepare_failed_creator_operation_info(
}
_EMPTY_VALUE = object()
class TrackChangesItem(object):
"""Helper object to track changes in data.
Has access to full old and new data and will create deep copy of them,
so it is not needed to create copy before passed in.
Can work as a dictionary if old or new value is a dictionary. In
that case received object is another object of 'TrackChangesItem'.
Goal is to be able to get old or new value as was or only changed values
or get information about removed/changed keys, and all of that on
any "dictionary level".
```
# Example of possible usages
>>> old_value = {
... "key_1": "value_1",
... "key_2": {
... "key_sub_1": 1,
... "key_sub_2": {
... "enabled": True
... }
... },
... "key_3": "value_2"
... }
>>> new_value = {
... "key_1": "value_1",
... "key_2": {
... "key_sub_2": {
... "enabled": False
... },
... "key_sub_3": 3
... },
... "key_3": "value_3"
... }
>>> changes = TrackChangesItem(old_value, new_value)
>>> changes.changed
True
>>> changes["key_2"]["key_sub_1"].new_value is None
True
>>> list(sorted(changes.changed_keys))
['key_2', 'key_3']
>>> changes["key_2"]["key_sub_2"]["enabled"].changed
True
>>> changes["key_2"].removed_keys
{'key_sub_1'}
>>> list(sorted(changes["key_2"].available_keys))
['key_sub_1', 'key_sub_2', 'key_sub_3']
>>> changes.new_value == new_value
True
# Get only changed values
only_changed_new_values = {
key: changes[key].new_value
for key in changes.changed_keys
}
```
Args:
old_value (Any): Old value.
new_value (Any): New value.
"""
def __init__(self, old_value, new_value):
self._changed = old_value != new_value
# Resolve if value is '_EMPTY_VALUE' after comparison of the values
if old_value is _EMPTY_VALUE:
old_value = None
if new_value is _EMPTY_VALUE:
new_value = None
self._old_value = copy.deepcopy(old_value)
self._new_value = copy.deepcopy(new_value)
self._old_is_dict = isinstance(old_value, dict)
self._new_is_dict = isinstance(new_value, dict)
self._old_keys = None
self._new_keys = None
self._available_keys = None
self._removed_keys = None
self._changed_keys = None
self._sub_items = None
def __getitem__(self, key):
"""Getter looks into subitems if object is dictionary."""
if self._sub_items is None:
self._prepare_sub_items()
return self._sub_items[key]
def __bool__(self):
"""Boolean of object is if old and new value are the same."""
return self._changed
def get(self, key, default=None):
"""Try to get sub item."""
if self._sub_items is None:
self._prepare_sub_items()
return self._sub_items.get(key, default)
@property
def old_value(self):
"""Get copy of old value.
Returns:
Any: Whatever old value was.
"""
return copy.deepcopy(self._old_value)
@property
def new_value(self):
"""Get copy of new value.
Returns:
Any: Whatever new value was.
"""
return copy.deepcopy(self._new_value)
@property
def changed(self):
"""Value changed.
Returns:
bool: If data changed.
"""
return self._changed
@property
def is_dict(self):
"""Object can be used as dictionary.
Returns:
bool: When can be used that way.
"""
return self._old_is_dict or self._new_is_dict
@property
def changes(self):
"""Get changes in raw data.
This method should be used only if 'is_dict' value is 'True'.
Returns:
Dict[str, Tuple[Any, Any]]: Changes are by key in tuple
(<old value>, <new value>). If 'is_dict' is 'False' then
output is always empty dictionary.
"""
output = {}
if not self.is_dict:
return output
old_value = self.old_value
new_value = self.new_value
for key in self.changed_keys:
_old = None
_new = None
if self._old_is_dict:
_old = old_value.get(key)
if self._new_is_dict:
_new = new_value.get(key)
output[key] = (_old, _new)
return output
# Methods/properties that can be used when 'is_dict' is 'True'
@property
def old_keys(self):
"""Keys from old value.
Empty set is returned if old value is not a dict.
Returns:
Set[str]: Keys from old value.
"""
if self._old_keys is None:
self._prepare_keys()
return set(self._old_keys)
@property
def new_keys(self):
"""Keys from new value.
Empty set is returned if old value is not a dict.
Returns:
Set[str]: Keys from new value.
"""
if self._new_keys is None:
self._prepare_keys()
return set(self._new_keys)
@property
def changed_keys(self):
"""Keys that has changed from old to new value.
Empty set is returned if both old and new value are not a dict.
Returns:
Set[str]: Keys of changed keys.
"""
if self._changed_keys is None:
self._prepare_sub_items()
return set(self._changed_keys)
@property
def available_keys(self):
"""All keys that are available in old and new value.
Empty set is returned if both old and new value are not a dict.
Output is Union of 'old_keys' and 'new_keys'.
Returns:
Set[str]: All keys from old and new value.
"""
if self._available_keys is None:
self._prepare_keys()
return set(self._available_keys)
@property
def removed_keys(self):
"""Key that are not available in new value but were in old value.
Returns:
Set[str]: All removed keys.
"""
if self._removed_keys is None:
self._prepare_sub_items()
return set(self._removed_keys)
def _prepare_keys(self):
old_keys = set()
new_keys = set()
if self._old_is_dict and self._new_is_dict:
old_keys = set(self._old_value.keys())
new_keys = set(self._new_value.keys())
elif self._old_is_dict:
old_keys = set(self._old_value.keys())
elif self._new_is_dict:
new_keys = set(self._new_value.keys())
self._old_keys = old_keys
self._new_keys = new_keys
self._available_keys = old_keys | new_keys
self._removed_keys = old_keys - new_keys
def _prepare_sub_items(self):
sub_items = {}
changed_keys = set()
old_keys = self.old_keys
new_keys = self.new_keys
new_value = self.new_value
old_value = self.old_value
if self._old_is_dict and self._new_is_dict:
for key in self.available_keys:
item = TrackChangesItem(
old_value.get(key), new_value.get(key)
)
sub_items[key] = item
if item.changed or key not in old_keys or key not in new_keys:
changed_keys.add(key)
elif self._old_is_dict:
old_keys = set(old_value.keys())
available_keys = set(old_keys)
changed_keys = set(available_keys)
for key in available_keys:
# NOTE Use '_EMPTY_VALUE' because old value could be 'None'
# which would result in "unchanged" item
sub_items[key] = TrackChangesItem(
old_value.get(key), _EMPTY_VALUE
)
elif self._new_is_dict:
new_keys = set(new_value.keys())
available_keys = set(new_keys)
changed_keys = set(available_keys)
for key in available_keys:
# NOTE Use '_EMPTY_VALUE' because new value could be 'None'
# which would result in "unchanged" item
sub_items[key] = TrackChangesItem(
_EMPTY_VALUE, new_value.get(key)
)
self._sub_items = sub_items
self._changed_keys = changed_keys
class InstanceMember:
"""Representation of instance member.
@ -300,6 +613,10 @@ class AttributeValues(object):
return list(self._attr_defs)
@property
def origin_data(self):
return copy.deepcopy(self._origin_data)
def data_to_store(self):
"""Create new dictionary with data to store.
@ -316,30 +633,6 @@ class AttributeValues(object):
output[key] = attr_def.default
return output
@staticmethod
def calculate_changes(new_data, old_data):
"""Calculate changes of 2 dictionary objects."""
changes = {}
for key, new_value in new_data.items():
old_value = old_data.get(key)
if old_value != new_value:
changes[key] = (old_value, new_value)
return changes
def changes(self):
return self.calculate_changes(self._data, self._origin_data)
def apply_changes(self, changes):
for key, item in changes.items():
old_value, new_value = item
if new_value is None:
if key in self:
self.pop(key)
elif self.get(key) != new_value:
self[key] = new_value
def get_serialized_attr_defs(self):
"""Serialize attribute definitions to json serializable types.
@ -467,36 +760,9 @@ class PublishAttributes:
output[key] = attr_value.data_to_store()
return output
def changes(self):
"""Return changes per each key."""
changes = {}
for key, attr_val in self._data.items():
attr_changes = attr_val.changes()
if attr_changes:
if key not in changes:
changes[key] = {}
changes[key].update(attr_val)
for key, value in self._origin_data.items():
if key not in self._data:
changes[key] = (value, None)
return changes
def apply_changes(self, changes):
for key, item in changes.items():
if isinstance(item, dict):
self._data[key].apply_changes(item)
continue
old_value, new_value = item
if new_value is not None:
raise ValueError(
"Unexpected type \"{}\" expected None".format(
str(type(new_value))
)
)
self.pop(key)
@property
def origin_data(self):
return copy.deepcopy(self._origin_data)
def set_publish_plugins(self, attr_plugins):
"""Set publish plugins attribute definitions."""
@ -763,6 +1029,10 @@ class CreatedInstance:
return label
return self._group_label
@property
def origin_data(self):
return copy.deepcopy(self._orig_data)
@property
def creator_identifier(self):
return self._data["creator_identifier"]
@ -817,29 +1087,7 @@ class CreatedInstance:
def changes(self):
"""Calculate and return changes."""
changes = {}
new_keys = set()
for key, new_value in self._data.items():
new_keys.add(key)
if key in ("creator_attributes", "publish_attributes"):
continue
old_value = self._orig_data.get(key)
if old_value != new_value:
changes[key] = (old_value, new_value)
creator_attr_changes = self.creator_attributes.changes()
if creator_attr_changes:
changes["creator_attributes"] = creator_attr_changes
publish_attr_changes = self.publish_attributes.changes()
if publish_attr_changes:
changes["publish_attributes"] = publish_attr_changes
for key, old_value in self._orig_data.items():
if key not in new_keys:
changes[key] = (old_value, None)
return changes
return TrackChangesItem(self._orig_data, self.data_to_store())
def mark_as_stored(self):
"""Should be called when instance data are stored.
@ -1002,59 +1250,6 @@ class CreatedInstance:
return obj
def remote_changes(self):
"""Prepare serializable changes on remote side.
Returns:
Dict[str, Any]: Prepared changes that can be send to client side.
"""
return {
"changes": self.changes(),
"asset_is_valid": self._asset_is_valid,
"task_is_valid": self._task_is_valid,
}
def update_from_remote(self, remote_changes):
"""Apply changes from remote side on client side.
Args:
remote_changes (Dict[str, Any]): Changes created on remote side.
"""
self._asset_is_valid = remote_changes["asset_is_valid"]
self._task_is_valid = remote_changes["task_is_valid"]
changes = remote_changes["changes"]
creator_attributes = changes.pop("creator_attributes", None) or {}
publish_attributes = changes.pop("publish_attributes", None) or {}
if changes:
self.apply_changes(changes)
if creator_attributes:
self.creator_attributes.apply_changes(creator_attributes)
if publish_attributes:
self.publish_attributes.apply_changes(publish_attributes)
def apply_changes(self, changes):
"""Apply changes created via 'changes'.
Args:
Dict[str, Tuple[Any, Any]]: Instance changes to apply. Same values
are kept untouched.
"""
for key, item in changes.items():
old_value, new_value = item
if new_value is None:
if key in self:
self.pop(key)
else:
current_value = self.get(key)
if current_value != new_value:
self[key] = new_value
# Context validation related methods/properties
@property
def has_set_asset(self):
@ -1237,6 +1432,53 @@ class CreateContext:
"""Access to global publish attributes."""
return self._publish_attributes
def get_sorted_creators(self, identifiers=None):
"""Sorted creators by 'order' attribute.
Args:
identifiers (Iterable[str]): Filter creators by identifiers. All
creators are returned if 'None' is passed.
Returns:
List[BaseCreator]: Sorted creator plugins by 'order' value.
"""
if identifiers is not None:
identifiers = set(identifiers)
creators = [
creator
for identifier, creator in self.creators.items()
if identifier in identifiers
]
else:
creators = self.creators.values()
return sorted(
creators, key=lambda creator: creator.order
)
@property
def sorted_creators(self):
"""Sorted creators by 'order' attribute.
Returns:
List[BaseCreator]: Sorted creator plugins by 'order' value.
"""
return self.get_sorted_creators()
@property
def sorted_autocreators(self):
"""Sorted auto-creators by 'order' attribute.
Returns:
List[AutoCreator]: Sorted plugins by 'order' value.
"""
return sorted(
self.autocreators.values(), key=lambda creator: creator.order
)
@classmethod
def get_host_misssing_methods(cls, host):
"""Collect missing methods from host.
@ -1515,11 +1757,10 @@ class CreateContext:
def context_data_changes(self):
"""Changes of attributes."""
changes = {}
publish_attribute_changes = self._publish_attributes.changes()
if publish_attribute_changes:
changes["publish_attributes"] = publish_attribute_changes
return changes
return TrackChangesItem(
self._original_context_data, self.context_data_to_store()
)
def creator_adds_instance(self, instance):
"""Creator adds new instance to context.
@ -1599,6 +1840,9 @@ class CreateContext:
)
])
def _remove_instance(self, instance):
self._instances_by_id.pop(instance.id, None)
def creator_removed_instance(self, instance):
"""When creator removes instance context should be acknowledged.
@ -1610,7 +1854,7 @@ class CreateContext:
from scene metadata.
"""
self._instances_by_id.pop(instance.id, None)
self._remove_instance(instance)
def add_convertor_item(self, convertor_identifier, label):
self.convertor_items_by_id[convertor_identifier] = ConvertorItem(
@ -1654,7 +1898,7 @@ class CreateContext:
# Collect instances
error_message = "Collection of instances for creator {} failed. {}"
failed_info = []
for creator in self.creators.values():
for creator in self.sorted_creators:
label = creator.label
identifier = creator.identifier
failed = False
@ -1726,7 +1970,8 @@ class CreateContext:
error_message = "Failed to run AutoCreator with identifier \"{}\". {}"
failed_info = []
for identifier, creator in self.autocreators.items():
for creator in self.sorted_autocreators:
identifier = creator.identifier
label = creator.label
failed = False
add_traceback = False
@ -1831,19 +2076,26 @@ class CreateContext:
"""Save instance specific values."""
instances_by_identifier = collections.defaultdict(list)
for instance in self._instances_by_id.values():
instance_changes = instance.changes()
if not instance_changes:
continue
identifier = instance.creator_identifier
instances_by_identifier[identifier].append(instance)
instances_by_identifier[identifier].append(
UpdateData(instance, instance_changes)
)
if not instances_by_identifier:
return
error_message = "Instances update of creator \"{}\" failed. {}"
failed_info = []
for identifier, creator_instances in instances_by_identifier.items():
update_list = []
for instance in creator_instances:
instance_changes = instance.changes()
if instance_changes:
update_list.append(UpdateData(instance, instance_changes))
creator = self.creators[identifier]
for creator in self.get_sorted_creators(
instances_by_identifier.keys()
):
identifier = creator.identifier
update_list = instances_by_identifier[identifier]
if not update_list:
continue
@ -1879,9 +2131,13 @@ class CreateContext:
def remove_instances(self, instances):
"""Remove instances from context.
All instances that don't have creator identifier leading to existing
creator are just removed from context.
Args:
instances(list<CreatedInstance>): Instances that should be removed
from context.
instances(List[CreatedInstance]): Instances that should be removed.
Remove logic is done using creator, which may require to
do other cleanup than just remove instance from context.
"""
instances_by_identifier = collections.defaultdict(list)
@ -1889,10 +2145,21 @@ class CreateContext:
identifier = instance.creator_identifier
instances_by_identifier[identifier].append(instance)
# Just remove instances from context if creator is not available
missing_creators = set(instances_by_identifier) - set(self.creators)
for identifier in missing_creators:
for instance in instances_by_identifier[identifier]:
self._remove_instance(instance)
error_message = "Instances removement of creator \"{}\" failed. {}"
failed_info = []
for identifier, creator_instances in instances_by_identifier.items():
creator = self.creators.get(identifier)
# Remove instances by creator plugin order
for creator in self.get_sorted_creators(
instances_by_identifier.keys()
):
identifier = creator.identifier
creator_instances = instances_by_identifier[identifier]
label = creator.label
failed = False
add_traceback = False
@ -1935,6 +2202,7 @@ class CreateContext:
family(str): Instance family for which should be attribute
definitions returned.
"""
if family not in self._attr_plugins_by_family:
import pyblish.logic
@ -1950,7 +2218,13 @@ class CreateContext:
return self._attr_plugins_by_family[family]
def _get_publish_plugins_with_attr_for_context(self):
"""Publish plugins attributes for Context plugins."""
"""Publish plugins attributes for Context plugins.
Returns:
List[pyblish.api.Plugin]: Publish plugins that have attribute
definitions for context.
"""
plugins = []
for plugin in self.plugins_with_defs:
if not plugin.__instanceEnabled__:
@ -1975,7 +2249,7 @@ class CreateContext:
return self._collection_shared_data
def run_convertor(self, convertor_identifier):
"""Run convertor plugin by it's idenfitifier.
"""Run convertor plugin by identifier.
Conversion is skipped if convertor is not available.
@ -1988,7 +2262,7 @@ class CreateContext:
convertor.convert()
def run_convertors(self, convertor_identifiers):
"""Run convertor plugins by idenfitifiers.
"""Run convertor plugins by identifiers.
Conversion is skipped if convertor is not available. It is recommended
to trigger reset after conversion to reload instances.

View file

@ -107,7 +107,11 @@ class SubsetConvertorPlugin(object):
@property
def create_context(self):
"""Quick access to create context."""
"""Quick access to create context.
Returns:
CreateContext: Context which initialized the plugin.
"""
return self._create_context
@ -157,6 +161,10 @@ class BaseCreator:
# Cached group label after first call 'get_group_label'
_cached_group_label = None
# Order in which will be plugin executed (collect & update instances)
# less == earlier -> Order '90' will be processed before '100'
order = 100
# Variable to store logger
_log = None
@ -489,6 +497,17 @@ class Creator(BaseCreator):
# - similar to instance attribute definitions
pre_create_attr_defs = []
@property
def show_order(self):
"""Order in which is creator shown in UI.
Returns:
int: Order in which is creator shown (less == earlier). By default
is using Creator's 'order' or processing.
"""
return self.order
@abstractmethod
def create(self, subset_name, instance_data, pre_create_data):
"""Create new instance and store it.

View file

@ -2,7 +2,10 @@ import os
import logging
from openpype.settings import get_system_settings, get_project_settings
from openpype.pipeline import legacy_io
from openpype.pipeline import (
schema,
legacy_io,
)
from openpype.pipeline.plugin_discover import (
discover,
register_plugin,
@ -79,6 +82,45 @@ class LoaderPlugin(list):
print(" - setting `{}`: `{}`".format(option, value))
setattr(cls, option, value)
@classmethod
def is_compatible_loader(cls, context):
"""Return whether a loader is compatible with a context.
This checks the version's families and the representation for the given
Loader.
Returns:
bool
"""
plugin_repre_names = cls.get_representations()
plugin_families = cls.families
if not plugin_repre_names or not plugin_families:
return False
repre_doc = context.get("representation")
if not repre_doc:
return False
plugin_repre_names = set(plugin_repre_names)
if (
"*" not in plugin_repre_names
and repre_doc["name"] not in plugin_repre_names
):
return False
maj_version, _ = schema.get_schema_version(context["subset"]["schema"])
if maj_version < 3:
families = context["version"]["data"].get("families", [])
else:
families = context["subset"]["data"]["families"]
plugin_families = set(plugin_families)
return (
"*" in plugin_families
or any(family in plugin_families for family in families)
)
@classmethod
def get_representations(cls):
return cls.representations

View file

@ -748,25 +748,9 @@ def is_compatible_loader(Loader, context):
Returns:
bool
"""
maj_version, _ = schema.get_schema_version(context["subset"]["schema"])
if maj_version < 3:
families = context["version"]["data"].get("families", [])
else:
families = context["subset"]["data"]["families"]
representation = context["representation"]
has_family = (
"*" in Loader.families or any(
family in Loader.families for family in families
)
)
representations = Loader.get_representations()
has_representation = (
"*" in representations or representation["name"] in representations
)
return has_family and has_representation
return Loader.is_compatible_loader(context)
def loaders_from_repre_context(loaders, repre_context):

View file

@ -61,7 +61,8 @@ class CollectResourcesPath(pyblish.api.InstancePlugin):
"background",
"effect",
"staticMesh",
"skeletalMesh"
"skeletalMesh",
"xgen"
]
def process(self, instance):

View file

@ -33,7 +33,8 @@
"limit": [],
"jobInfo": {},
"pluginInfo": {},
"scene_patches": []
"scene_patches": [],
"strict_error_checking": true
},
"NukeSubmitDeadline": {
"enabled": true,

View file

@ -1,4 +1,5 @@
{
"open_workfile_post_initialization": false,
"imageio": {
"ocio_config": {
"enabled": false,

View file

@ -195,6 +195,12 @@
]
}
},
{
"type": "boolean",
"key": "strict_error_checking",
"label": "Strict Error Checking",
"default": true
}
]
},

View file

@ -5,6 +5,11 @@
"label": "Maya",
"is_file": true,
"children": [
{
"type": "boolean",
"key": "open_workfile_post_initialization",
"label": "Open Workfile Post Initialization"
},
{
"key": "imageio",
"type": "dict",

View file

@ -24,6 +24,7 @@ CREATOR_THUMBNAIL_ENABLED_ROLE = QtCore.Qt.UserRole + 5
FAMILY_ROLE = QtCore.Qt.UserRole + 6
GROUP_ROLE = QtCore.Qt.UserRole + 7
CONVERTER_IDENTIFIER_ROLE = QtCore.Qt.UserRole + 8
CREATOR_SORT_ROLE = QtCore.Qt.UserRole + 9
__all__ = (
@ -36,6 +37,7 @@ __all__ = (
"IS_GROUP_ROLE",
"CREATOR_IDENTIFIER_ROLE",
"CREATOR_THUMBNAIL_ENABLED_ROLE",
"CREATOR_SORT_ROLE",
"FAMILY_ROLE",
"GROUP_ROLE",
"CONVERTER_IDENTIFIER_ROLE",

View file

@ -832,7 +832,8 @@ class CreatorItem:
default_variants,
create_allow_context_change,
create_allow_thumbnail,
pre_create_attributes_defs
show_order,
pre_create_attributes_defs,
):
self.identifier = identifier
self.creator_type = creator_type
@ -846,6 +847,7 @@ class CreatorItem:
self.default_variants = default_variants
self.create_allow_context_change = create_allow_context_change
self.create_allow_thumbnail = create_allow_thumbnail
self.show_order = show_order
self.pre_create_attributes_defs = pre_create_attributes_defs
def get_group_label(self):
@ -869,6 +871,7 @@ class CreatorItem:
pre_create_attr_defs = None
create_allow_context_change = None
create_allow_thumbnail = None
show_order = creator.order
if creator_type is CreatorTypes.artist:
description = creator.get_description()
detail_description = creator.get_detail_description()
@ -877,6 +880,7 @@ class CreatorItem:
pre_create_attr_defs = creator.get_pre_create_attr_defs()
create_allow_context_change = creator.create_allow_context_change
create_allow_thumbnail = creator.create_allow_thumbnail
show_order = creator.show_order
identifier = creator.identifier
return cls(
@ -892,7 +896,8 @@ class CreatorItem:
default_variants,
create_allow_context_change,
create_allow_thumbnail,
pre_create_attr_defs
show_order,
pre_create_attr_defs,
)
def to_data(self):
@ -915,6 +920,7 @@ class CreatorItem:
"default_variants": self.default_variants,
"create_allow_context_change": self.create_allow_context_change,
"create_allow_thumbnail": self.create_allow_thumbnail,
"show_order": self.show_order,
"pre_create_attributes_defs": pre_create_attributes_defs,
}
@ -1502,9 +1508,6 @@ class BasePublisherController(AbstractPublisherController):
def _reset_attributes(self):
"""Reset most of attributes that can be reset."""
# Reset creator items
self._creator_items = None
self.publish_is_running = False
self.publish_has_validated = False
self.publish_has_crashed = False
@ -1760,6 +1763,8 @@ class PublisherController(BasePublisherController):
self._resetting_plugins = True
self._create_context.reset_plugins()
# Reset creator items
self._creator_items = None
self._resetting_plugins = False

View file

@ -18,9 +18,10 @@ from .tasks_widget import CreateWidgetTasksWidget
from .precreate_widget import PreCreateWidget
from ..constants import (
VARIANT_TOOLTIP,
CREATOR_IDENTIFIER_ROLE,
FAMILY_ROLE,
CREATOR_IDENTIFIER_ROLE,
CREATOR_THUMBNAIL_ENABLED_ROLE,
CREATOR_SORT_ROLE,
)
SEPARATORS = ("---separator---", "---")
@ -90,12 +91,19 @@ class CreatorShortDescWidget(QtWidgets.QWidget):
self._description_label.setText(description)
class CreatorsProxyModel(QtCore.QSortFilterProxyModel):
def lessThan(self, left, right):
l_show_order = left.data(CREATOR_SORT_ROLE)
r_show_order = right.data(CREATOR_SORT_ROLE)
if l_show_order == r_show_order:
return super(CreatorsProxyModel, self).lessThan(left, right)
return l_show_order < r_show_order
class CreateWidget(QtWidgets.QWidget):
def __init__(self, controller, parent=None):
super(CreateWidget, self).__init__(parent)
self.setWindowTitle("Create new instance")
self._controller = controller
self._asset_name = None
@ -141,7 +149,7 @@ class CreateWidget(QtWidgets.QWidget):
creators_view = QtWidgets.QListView(creators_view_widget)
creators_model = QtGui.QStandardItemModel()
creators_sort_model = QtCore.QSortFilterProxyModel()
creators_sort_model = CreatorsProxyModel()
creators_sort_model.setSourceModel(creators_model)
creators_view.setModel(creators_sort_model)
@ -441,7 +449,8 @@ class CreateWidget(QtWidgets.QWidget):
# Add new families
new_creators = set()
for identifier, creator_item in self._controller.creator_items.items():
creator_items_by_identifier = self._controller.creator_items
for identifier, creator_item in creator_items_by_identifier.items():
if creator_item.creator_type != "artist":
continue
@ -457,6 +466,7 @@ class CreateWidget(QtWidgets.QWidget):
self._creators_model.appendRow(item)
item.setData(creator_item.label, QtCore.Qt.DisplayRole)
item.setData(creator_item.show_order, CREATOR_SORT_ROLE)
item.setData(identifier, CREATOR_IDENTIFIER_ROLE)
item.setData(
creator_item.create_allow_thumbnail,
@ -482,8 +492,9 @@ class CreateWidget(QtWidgets.QWidget):
index = indexes[0]
identifier = index.data(CREATOR_IDENTIFIER_ROLE)
create_item = creator_items_by_identifier.get(identifier)
self._set_creator_by_identifier(identifier)
self._set_creator(create_item)
def _on_plugins_refresh(self):
# Trigger refresh only if is visible

View file

@ -1,3 +1,3 @@
# -*- coding: utf-8 -*-
"""Package declaring Pype version."""
__version__ = "3.15.1-nightly.2"
__version__ = "3.15.1-nightly.4"

View file

@ -1,6 +1,6 @@
[tool.poetry]
name = "OpenPype"
version = "3.15.1" # OpenPype
version = "3.15.0" # OpenPype
description = "Open VFX and Animation pipeline with support."
authors = ["OpenPype Team <info@openpype.io>"]
license = "MIT License"

View file

@ -7,16 +7,10 @@ from github import Github
import os
def get_release_type_github(Log, github_token):
# print(Log)
minor_labels = ["Bump Minor"]
# patch_labels = [
# "type: enhancement",
# "type: bug",
# "type: deprecated",
# "type: Feature"]
g = Github(github_token)
repo = g.get_repo("pypeclub/OpenPype")
repo = g.get_repo("ynput/OpenPype")
labels = set()
for line in Log.splitlines():
@ -35,12 +29,12 @@ def get_release_type_github(Log, github_token):
else:
return "patch"
# TODO: if all is working fine, this part can be cleaned up eventually
# TODO: if all is working fine, this part can be cleaned up eventually
# if any(label in labels for label in patch_labels):
# return "patch"
return None
def remove_prefix(text, prefix):
return text[text.startswith(prefix) and len(prefix):]
@ -93,12 +87,16 @@ def file_regex_replace(filename, regex, version):
f.truncate()
def bump_file_versions(version):
def bump_file_versions(version, nightly=False):
filename = "./openpype/version.py"
regex = "(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(-((0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(\.(0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(\+([0-9a-zA-Z-]+(\.[0-9a-zA-Z-]+)*))?"
file_regex_replace(filename, regex, version)
if nightly:
# skip nightly reversion in pyproject.toml
return
# bump pyproject.toml
filename = "pyproject.toml"
regex = "version = \"(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(\+((0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(\.(0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(\+([0-9a-zA-Z-]+(\.[0-9a-zA-Z-]+)*))?\" # OpenPype"
@ -196,7 +194,7 @@ def main():
if options.nightly:
next_tag_v = calculate_next_nightly(github_token=options.github_token)
print(next_tag_v)
bump_file_versions(next_tag_v)
bump_file_versions(next_tag_v, True)
if options.finalize:
new_release = finalize_prerelease(options.finalize)
@ -222,7 +220,7 @@ def main():
new_prerelease = current_prerelease.bump_prerelease().__str__()
print(new_prerelease)
bump_file_versions(new_prerelease)
if options.version:
bump_file_versions(options.version)
print(f"Injected version {options.version} into the release")

View file

@ -7,6 +7,7 @@ import Tabs from '@theme/Tabs';
import TabItem from '@theme/TabItem';
## Working in the studio
In studio environment you should have OpenPype already installed and deployed, so you can start using it without much setup. Your admin has probably put OpenPype icon on your desktop or even had your computer set up so OpenPype will start automatically.
@ -15,70 +16,66 @@ If this is not the case, please contact your administrator to consult on how to
## Working from home
If you are working from home though, you'll need to install it yourself. You should, however, receive the OpenPype installer files from your studio
admin, supervisor or production, because OpenPype versions and executables might not be compatible between studios.
If you are working from **home** though, you'll **need to install** it yourself. You should, however, receive the OpenPype installer files from your studio
admin, supervisor or production, because OpenPype versions and executables might not be compatible between studios.
To install OpenPype you just need to unzip it anywhere on the disk
Installing OpenPype is possible by Installer or by unzipping downloaded ZIP archive to any drive location.
To use it, you have two options
**openpype_gui.exe** is the most common for artists. It runs OpenPype GUI in system tray. From there you can run all the available tools. To use any of the features, OpenPype must be running in the tray.
**openpype_console.exe** in useful for debugging and error reporting. It opens console window where all the necessary information will appear during user's work.
:::tip Using the OpenPype Installer
See the [Installation section](artist_install.md) for more information on how to use the OpenPype Installer
:::
<Tabs
groupId="platforms"
defaultValue="win"
values={[
{label: 'Windows', value: 'win'},
{label: 'Linux', value: 'linux'},
{label: 'Mac', value: 'mac'},
]}>
You can run OpenPype by desktop "OP" icon (if it exists after installing) or by directly executing
<TabItem value="win">
**openpype_gui.exe** located in the OpenPype folder. This executable being suitable **for artists**.
WIP - Windows instructions once installers are finished
or alternatively by
</TabItem>
<TabItem value="linux">
**openpype_console.exe** which is more suitable for **TDs/Admin** for debugging and error reporting. This one runs with
opened console window where all the necessary info will appear during user's work session.
WIP - Linux instructions once installers are finished
:::tip Is OpenPype running?
OpenPype runs in the operating system's tray. If you see turquoise OpenPype icon in the tray you can easily tell OpenPype is currently running.
Keep in mind that on Windows this icon might be hidden by default, in which case, the artist can simply drag the icon down to the tray.
:::
</TabItem>
<TabItem value="mac">
WIP - Mac instructions once installers are finished
</TabItem>
</Tabs>
![Systray](assets/artist_systray.png)
## First Launch
When you first start OpenPype, you will be asked to give it some basic information.
When you first start OpenPype, you will be asked to fill in some basic information.
### MongoDB
In most cases that will only be your studio MongoDB Address.
In most cases you will only have to supply the MongoDB Address.
It's the database URL you should have received from your Studio admin and often will look like this
It is a URL that you should receive from you studio and most often will look like this `mongodb://username:passwword@mongo.mystudiodomain.com:12345` or `mongodb://192.168.100.15:27071`, it really depends on your studio setup. When OpenPype Igniter
`mongodb://username:passwword@mongo.mystudiodomain.com:12345`
or
`mongodb://192.168.100.15:27071`
it really depends on your studio setup. When OpenPype Igniter
asks for it, just put it in the corresponding text field and press `install` button.
### OpenPype Version Repository
Sometimes your studio might also ask you to fill in the path to it's version
repository. This is a location where OpenPype will be looking for when checking
if it's up to date and where updates are installed from automatically.
Sometimes your Studio might also ask you to fill in the path to its version
repository. This is a location where OpenPype will search for the latest versions, check
if it's up to date and where updates are installed from automatically.
This pat is usually taken from the database directly, so you shouldn't need it.
This path is usually taken from the database directly, so you shouldn't need it.
## Updates
If you're connected to your studio, OpenPype will check for, and install updates automatically every time you run it. That's why during the first start, it will go through a quick update installation process, even though you might have just installed it.
If you're connected to your Studio, OpenPype will check for, and install updates automatically every time you run it. That's why during the first start it can go through a quick update installation process, even though you might have just installed it.
## Advanced use
## Advanced Usage
For more advanced use of OpenPype commands please visit [Admin section](admin_openpype_commands.md).

View file

@ -601,3 +601,20 @@ about customizing review process refer to [admin section](project_settings/setti
If you don't move `modelMain` into `reviewMain`, review will be generated but it will
be published as separate entity.
## Inventory Actions
### Connect Geometry
This action will connect geometries between containers.
#### Usage
Select 1 container of type `animation` or `pointcache`, then 1+ container of any type.
#### Details
The action searches the selected containers for 1 animation container of type `animation` or `pointcache`. This animation container will be connected to the rest of the selected containers. Matching geometries between containers is done by comparing the attribute `cbId`.
The connection between geometries is done with a live blendshape.

View file

@ -4,26 +4,96 @@ title: Xgen for Maya
sidebar_label: Xgen
---
## Working with Xgen in OpenPype
OpenPype supports Xgen classic with the follow workflow. It eases the otherwise cumbersome issues around Xgen's side car files and hidden behaviour inside Maya. The workflow supports publishing, loading and updating of Xgen collections, along with connecting animation from geometry and (guide) curves.
OpenPype support publishing and loading of Xgen interactive grooms. You can publish
them as mayaAscii files with scalps that can be loaded into another maya scene, or as
alembic caches.
## Setup
### Publishing Xgen Grooms
### Settings
To prepare xgen for publishing just select all the descriptions that should be published together and the create Xgen Subset in the scene using - **OpenPype menu****Create**... and select **Xgen Interactive**. Leave Use selection checked.
Go to project settings > `Maya` > enable `Open Workfile Post Initialization`;
For actual publishing of your groom to go **OpenPype → Publish** and then press ▶ to publish. This will export `.ma` file containing your grooms with any geometries they are attached to and also a baked cache in `.abc` format
`project_settings/maya/open_workfile_post_initialization`
This is due to two errors occurring when opening workfile containing referenced xgen nodes on launch of Maya, specifically:
:::tip adding more descriptions
You can add multiple xgen description into the subset you are about to publish, simply by
adding them to the maya set that was created for you. Please make sure that only xgen description nodes are present inside of the set and not the scalp geometry.
:::
- ``Critical``: Duplicate collection errors on launching workfile. This is because Maya first imports Xgen when referencing in external Maya files, then imports Xgen again when the reference edits are applied.
```
Importing XGen Collections...
# Error: XGen: Failed to find description ball_xgenMain_01_:parent in collection ball_xgenMain_01_:collection. Abort applying delta: P:/PROJECTS/OP01_CG_demo/shots/sh040/work/Lighting/cg_sh040_Lighting_v001__ball_xgenMain_01___collection.xgen #
# Error: XGen: Tried to import a duplicate collection, ball_xgenMain_02_:collection, from file P:/PROJECTS/OP01_CG_demo/shots/sh040/work/Lighting/cg_sh040_Lighting_v001__ball_xgenMain_02___collection.xgen. Aborting import. #
```
- ``Non-critical``: Errors on opening workfile and failed opening of published xgen. This is because Maya imports Xgen when referencing in external Maya files but the reference edits that ensure the location of the Xgen files are correct, has not been applied yet.
```
Importing XGen Collections...
# Error: XGen: Failed to open file: P:/PROJECTS/OP01_CG_demo/shots/sh040/work/Lighting/cg_ball_xgenMain_v035__ball_rigMain_01___collection.xgen #
# Error: XGen: Failed to import collection from file P:/PROJECTS/OP01_CG_demo/shots/sh040/work/Lighting/cg_ball_xgenMain_v035__ball_rigMain_01___collection.xgen #
```
### Loading Xgen
Go to project settings > `Deadline` > `Publish plugins` > `Maya Submit to Deadline` > disable `Use Published scene`;
You can use published xgens by loading them using OpenPype Publisher. You can choose to reference or import xgen. We don't have any automatic mesh linking at the moment and it is expected, that groom is published with a scalp, that can then be manually attached to your animated mesh for example.
`project_settings/deadline/publish/MayaSubmitDeadline/use_published`
The alembic representation can be loaded too and it contains the groom converted to curves. Keep in mind that the density of the alembic directly depends on your viewport xgen density at the point of export.
This is due to temporary workaround while fixing rendering with published scenes.
## Create
Create an Xgen instance to publish. This needs to contain only **one Xgen collection**.
`OpenPype > Create... > Xgen`
You can create multiple Xgen instances if you have multiple collections to publish.
### Publish
The publishing process will grab geometry used for Xgen along with any external files used in the collection's descriptions. This creates an isolated Maya file with just the Xgen collection's dependencies, so you can use any nested geometry when creating the Xgen description. An Xgen version will consist of:
- Maya file (`.ma`) - this contains the geometry and the connections to the Xgen collection and descriptions.
- Xgen file (`.xgen`) - this contains the Xgen collection and description.
- Resource files (`.ptx`, `.xuv`) - this contains Xgen side car files used in the collection and descriptions.
## Load
Open the Loader tool, `OpenPype > Loader...`, and navigate to the published Xgen version. On right-click you'll get the option `Reference Xgen (ma)`
When loading an Xgen version the following happens:
- References in the Maya file.
- Copies the Xgen file (`.xgen`) to the current workspace.
- Modifies the Xgen file copy to load the current workspace first then the published Xgen collection.
- Makes a custom attribute on the Xgen collection, `float_ignore`, which can be seen under the `Expressions` tab of the `Xgen` UI. This is done to initialize the Xgen delta file workflow.
- Setup an Xgen delta file (`.xgd`) to store any workspace changes of the published Xgen version.
When the loading is done, Xgen collection will be in the Xgen delta file workflow which means any changes done in the Maya workfile will be stored in the current workspace. The published Xgen collection will remain intact, even if the user assigns maps to any attributes or otherwise modifies any attribute.
### Updating
When there are changes to the Xgen version, the user will be notified when opening the workfile or publishing. Since the Xgen is referenced, it follows the standard Maya referencing system and overrides.
For example publishing `xgenMain` version 1 with the attribute `renderer` set to `None`, then version 2 has `renderer` set to `Arnold Renderer`. When updating from version 1 to 2, the `renderer` attribute will be updated to `Arnold Renderer` unless there is a local override.
### Connect Patches
When loading in an Xgen version, it does not have any connections to anything in the workfile, so its static in the position it was published in. Use the [Connect Geometry](artist_hosts_maya#connect-geometry) action to connect Xgen to any matching loaded animated geometry.
### Connect Guides
Along with patches you can also connect the Xgen guides to an Alembic cache.
#### Usage
Select 1 animation container, of family `animation` or `pointcache`, then the Xgen containers to connect to. Right-click > `Actions` > `Connect Xgen`.
***Note: Only alembic (`.abc`) representations are allowed.***
#### Details
Connecting the guide will make Xgen use the Alembic directly, setting the attributes under `Guide Animation`, so the Alembic needs to contain the same amount of curves as guides in the Xgen.
The animation container gets connected with the Xgen container, so if the animation container is updated so will the Xgen container's attribute.
## Rendering
To render with Xgen, follow the [Rendering With OpenPype](artist_hosts_maya#rendering-with-openpype) guide.
### Details
When submitting a workfile with Xgen, all Xgen related files will be collected and published as the workfiles resources. This means the published workfile is no longer referencing the workspace Xgen files.