diff --git a/.github/workflows/miletone_release_trigger.yml b/.github/workflows/miletone_release_trigger.yml
new file mode 100644
index 0000000000..b5b8aab1dc
--- /dev/null
+++ b/.github/workflows/miletone_release_trigger.yml
@@ -0,0 +1,47 @@
+name: Milestone Release [trigger]
+
+on:
+ workflow_dispatch:
+ inputs:
+ milestone:
+ required: true
+ release-type:
+ type: choice
+ description: What release should be created
+ options:
+ - release
+ - pre-release
+ milestone:
+ types: closed
+
+
+jobs:
+ milestone-title:
+ runs-on: ubuntu-latest
+ outputs:
+ milestone: ${{ steps.milestoneTitle.outputs.value }}
+ steps:
+ - name: Switch input milestone
+ uses: haya14busa/action-cond@v1
+ id: milestoneTitle
+ with:
+ cond: ${{ inputs.milestone == '' }}
+ if_true: ${{ github.event.milestone.title }}
+ if_false: ${{ inputs.milestone }}
+ - name: Print resulted milestone
+ run: |
+ echo "${{ steps.milestoneTitle.outputs.value }}"
+
+ call-ci-tools-milestone-release:
+ needs: milestone-title
+ uses: ynput/ci-tools/.github/workflows/milestone_release_ref.yml@main
+ with:
+ milestone: ${{ needs.milestone-title.outputs.milestone }}
+ repo-owner: ${{ github.event.repository.owner.login }}
+ repo-name: ${{ github.event.repository.name }}
+ version-py-path: "./openpype/version.py"
+ pyproject-path: "./pyproject.toml"
+ secrets:
+ token: ${{ secrets.YNPUT_BOT_TOKEN }}
+ user_email: ${{ secrets.CI_EMAIL }}
+ user_name: ${{ secrets.CI_USER }}
diff --git a/.github/workflows/nightly_merge.yml b/.github/workflows/nightly_merge.yml
deleted file mode 100644
index 1776d7a464..0000000000
--- a/.github/workflows/nightly_merge.yml
+++ /dev/null
@@ -1,29 +0,0 @@
-name: Dev -> Main
-
-on:
- schedule:
- - cron: '21 3 * * 3,6'
- workflow_dispatch:
-
-jobs:
- develop-to-main:
-
- runs-on: ubuntu-latest
-
- steps:
- - name: 🚛 Checkout Code
- uses: actions/checkout@v2
-
- - name: 🔨 Merge develop to main
- uses: everlytic/branch-merge@1.1.0
- with:
- github_token: ${{ secrets.YNPUT_BOT_TOKEN }}
- source_ref: 'develop'
- target_branch: 'main'
- commit_message_template: '[Automated] Merged {source_ref} into {target_branch}'
-
- - name: Invoke pre-release workflow
- uses: benc-uk/workflow-dispatch@v1
- with:
- workflow: Nightly Prerelease
- token: ${{ secrets.YNPUT_BOT_TOKEN }}
diff --git a/.github/workflows/prerelease.yml b/.github/workflows/prerelease.yml
deleted file mode 100644
index 571b0339e1..0000000000
--- a/.github/workflows/prerelease.yml
+++ /dev/null
@@ -1,67 +0,0 @@
-name: Nightly Prerelease
-
-on:
- workflow_dispatch:
-
-
-jobs:
- create_nightly:
- runs-on: ubuntu-latest
-
- steps:
- - name: 🚛 Checkout Code
- uses: actions/checkout@v2
- with:
- fetch-depth: 0
-
- - name: Set up Python
- uses: actions/setup-python@v2
- with:
- python-version: 3.9
-
- - name: Install Python requirements
- run: pip install gitpython semver PyGithub
-
- - name: 🔎 Determine next version type
- id: version_type
- run: |
- TYPE=$(python ./tools/ci_tools.py --bump --github_token ${{ secrets.YNPUT_BOT_TOKEN }})
- echo "type=${TYPE}" >> $GITHUB_OUTPUT
-
- - name: 💉 Inject new version into files
- id: version
- if: steps.version_type.outputs.type != 'skip'
- run: |
- NEW_VERSION_TAG=$(python ./tools/ci_tools.py --nightly --github_token ${{ secrets.YNPUT_BOT_TOKEN }})
- echo "next_tag=${NEW_VERSION_TAG}" >> $GITHUB_OUTPUT
-
- - name: 💾 Commit and Tag
- id: git_commit
- if: steps.version_type.outputs.type != 'skip'
- run: |
- git config user.email ${{ secrets.CI_EMAIL }}
- git config user.name ${{ secrets.CI_USER }}
- git checkout main
- git pull
- git add .
- git commit -m "[Automated] Bump version"
- tag_name="CI/${{ steps.version.outputs.next_tag }}"
- echo $tag_name
- git tag -a $tag_name -m "nightly build"
-
- - name: Push to protected main branch
- uses: CasperWA/push-protected@v2.10.0
- with:
- token: ${{ secrets.YNPUT_BOT_TOKEN }}
- branch: main
- tags: true
- unprotect_reviews: true
-
- - name: 🔨 Merge main back to develop
- uses: everlytic/branch-merge@1.1.0
- if: steps.version_type.outputs.type != 'skip'
- with:
- github_token: ${{ secrets.YNPUT_BOT_TOKEN }}
- source_ref: 'main'
- target_branch: 'develop'
- commit_message_template: '[Automated] Merged {source_ref} into {target_branch}'
diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml
deleted file mode 100644
index 0b4c8af2c7..0000000000
--- a/.github/workflows/release.yml
+++ /dev/null
@@ -1,76 +0,0 @@
-name: Stable Release
-
-on:
- release:
- types:
- - prereleased
-
-jobs:
- create_release:
- runs-on: ubuntu-latest
- if: github.actor != 'pypebot'
-
- steps:
- - name: 🚛 Checkout Code
- uses: actions/checkout@v2
- with:
- fetch-depth: 0
-
- - name: Set up Python
- uses: actions/setup-python@v2
- with:
- python-version: 3.9
- - name: Install Python requirements
- run: pip install gitpython semver PyGithub
-
- - name: 💉 Inject new version into files
- id: version
- run: |
- NEW_VERSION=$(python ./tools/ci_tools.py --finalize ${GITHUB_REF#refs/*/})
- LAST_VERSION=$(python ./tools/ci_tools.py --lastversion release)
-
- echo "current_version=${GITHUB_REF#refs/*/}" >> $GITHUB_OUTPUT
- echo "last_release=${LAST_VERSION}" >> $GITHUB_OUTPUT
- echo "release_tag=${NEW_VERSION}" >> $GITHUB_OUTPUT
-
- - name: 💾 Commit and Tag
- id: git_commit
- if: steps.version.outputs.release_tag != 'skip'
- run: |
- git config user.email ${{ secrets.CI_EMAIL }}
- git config user.name ${{ secrets.CI_USER }}
- git add .
- git commit -m "[Automated] Release"
- tag_name="${{ steps.version.outputs.release_tag }}"
- git tag -a $tag_name -m "stable release"
-
- - name: 🔏 Push to protected main branch
- if: steps.version.outputs.release_tag != 'skip'
- uses: CasperWA/push-protected@v2.10.0
- with:
- token: ${{ secrets.YNPUT_BOT_TOKEN }}
- branch: main
- tags: true
- unprotect_reviews: true
-
- - name: 🚀 Github Release
- if: steps.version.outputs.release_tag != 'skip'
- uses: ncipollo/release-action@v1
- with:
- tag: ${{ steps.version.outputs.release_tag }}
- token: ${{ secrets.YNPUT_BOT_TOKEN }}
-
- - name: ☠ Delete Pre-release
- if: steps.version.outputs.release_tag != 'skip'
- uses: cb80/delrel@latest
- with:
- tag: "${{ steps.version.outputs.current_version }}"
-
- - name: 🔁 Merge main back to develop
- if: steps.version.outputs.release_tag != 'skip'
- uses: everlytic/branch-merge@1.1.0
- with:
- github_token: ${{ secrets.YNPUT_BOT_TOKEN }}
- source_ref: 'main'
- target_branch: 'develop'
- commit_message_template: '[Automated] Merged release {source_ref} into {target_branch}'
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 0da167763b..8a37886deb 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,5 +1,81 @@
# Changelog
+
+## [3.15.1](https://github.com/ynput/OpenPype/tree/3.15.1)
+
+[Full Changelog](https://github.com/ynput/OpenPype/compare/3.15.1...3.15.0)
+
+### **🆕 New features**
+
+
+
+Maya: Xgen (3d / maya ) - #4256
+
+
+___
+
+
+## Brief description
+Initial Xgen implementation.
+
+## Description
+Client request of Xgen pipeline.
+
+
+
+
+___
+
+
+
+
+### **🚀 Enhancements**
+
+
+
+Adding path validator for non-maya nodes (3d / maya ) - #4271
+
+
+___
+
+
+## Brief description
+Adding a path validator for filepaths from non-maya nodes, which are created by plugins such as Renderman, Yeti and abcImport.
+
+## Description
+As File Path Editor cannot catch the wrong filenpaths from non-maya nodes such as AlembicNodes, It is neccessary to have a new validator to ensure the existence of the filepaths from the nodes.
+
+
+
+
+___
+
+
+
+
+### **🐛 Bug fixes**
+
+
+
+Fix features for gizmo menu (2d / nuke ) - #4280
+
+
+___
+
+
+## Brief description
+Fix features for the Gizmo Menu project settings (shortcut for python type of usage and file type of usage functionality)
+
+
+
+
+___
+
+
+
+
+
+
## [3.15.0](https://github.com/ynput/OpenPype/tree/HEAD)
[Full Changelog](https://github.com/ynput/OpenPype/compare/3.14.10...HEAD)
diff --git a/README.md b/README.md
index 485ae7f4ee..514ffb62c0 100644
--- a/README.md
+++ b/README.md
@@ -8,7 +8,6 @@ OpenPype
[](https://github.com/pypeclub/pype/actions/workflows/documentation.yml) 
-this
Introduction
------------
diff --git a/openpype/hosts/aftereffects/plugins/create/create_render.py b/openpype/hosts/aftereffects/plugins/create/create_render.py
index 10ded8b912..c20b0ec51b 100644
--- a/openpype/hosts/aftereffects/plugins/create/create_render.py
+++ b/openpype/hosts/aftereffects/plugins/create/create_render.py
@@ -6,8 +6,7 @@ from openpype.hosts.aftereffects import api
from openpype.pipeline import (
Creator,
CreatedInstance,
- CreatorError,
- legacy_io,
+ CreatorError
)
from openpype.hosts.aftereffects.api.pipeline import cache_and_get_instances
from openpype.lib import prepare_template_data
@@ -127,7 +126,7 @@ class RenderCreator(Creator):
subset_change = _changes.get("subset")
if subset_change:
api.get_stub().rename_item(created_inst.data["members"][0],
- subset_change[1])
+ subset_change.new_value)
def remove_instances(self, instances):
for instance in instances:
@@ -195,7 +194,7 @@ class RenderCreator(Creator):
instance_data.pop("uuid")
if not instance_data.get("task"):
- instance_data["task"] = legacy_io.Session.get("AVALON_TASK")
+ instance_data["task"] = self.create_context.get_current_task_name()
if not instance_data.get("creator_attributes"):
is_old_farm = instance_data["family"] != "renderLocal"
diff --git a/openpype/hosts/aftereffects/plugins/create/workfile_creator.py b/openpype/hosts/aftereffects/plugins/create/workfile_creator.py
index c698af896b..2e7b9d4a7e 100644
--- a/openpype/hosts/aftereffects/plugins/create/workfile_creator.py
+++ b/openpype/hosts/aftereffects/plugins/create/workfile_creator.py
@@ -2,8 +2,7 @@ import openpype.hosts.aftereffects.api as api
from openpype.client import get_asset_by_name
from openpype.pipeline import (
AutoCreator,
- CreatedInstance,
- legacy_io,
+ CreatedInstance
)
from openpype.hosts.aftereffects.api.pipeline import cache_and_get_instances
@@ -38,10 +37,11 @@ class AEWorkfileCreator(AutoCreator):
existing_instance = instance
break
- project_name = legacy_io.Session["AVALON_PROJECT"]
- asset_name = legacy_io.Session["AVALON_ASSET"]
- task_name = legacy_io.Session["AVALON_TASK"]
- host_name = legacy_io.Session["AVALON_APP"]
+ context = self.create_context
+ project_name = context.get_current_project_name()
+ asset_name = context.get_current_asset_name()
+ task_name = context.get_current_task_name()
+ host_name = context.host_name
if existing_instance is None:
asset_doc = get_asset_by_name(project_name, asset_name)
diff --git a/openpype/hosts/flame/plugins/publish/extract_subset_resources.py b/openpype/hosts/flame/plugins/publish/extract_subset_resources.py
index d5294d61c2..5082217db0 100644
--- a/openpype/hosts/flame/plugins/publish/extract_subset_resources.py
+++ b/openpype/hosts/flame/plugins/publish/extract_subset_resources.py
@@ -143,6 +143,9 @@ class ExtractSubsetResources(publish.Extractor):
# create staging dir path
staging_dir = self.staging_dir(instance)
+ # append staging dir for later cleanup
+ instance.context.data["cleanupFullPaths"].append(staging_dir)
+
# add default preset type for thumbnail and reviewable video
# update them with settings and override in case the same
# are found in there
@@ -548,30 +551,3 @@ class ExtractSubsetResources(publish.Extractor):
"Path `{}` is containing more that one clip".format(path)
)
return clips[0]
-
- def staging_dir(self, instance):
- """Provide a temporary directory in which to store extracted files
-
- Upon calling this method the staging directory is stored inside
- the instance.data['stagingDir']
- """
- staging_dir = instance.data.get('stagingDir', None)
- openpype_temp_dir = os.getenv("OPENPYPE_TEMP_DIR")
-
- if not staging_dir:
- if openpype_temp_dir and os.path.exists(openpype_temp_dir):
- staging_dir = os.path.normpath(
- tempfile.mkdtemp(
- prefix="pyblish_tmp_",
- dir=openpype_temp_dir
- )
- )
- else:
- staging_dir = os.path.normpath(
- tempfile.mkdtemp(prefix="pyblish_tmp_")
- )
- instance.data['stagingDir'] = staging_dir
-
- instance.context.data["cleanupFullPaths"].append(staging_dir)
-
- return staging_dir
diff --git a/openpype/hosts/maya/api/commands.py b/openpype/hosts/maya/api/commands.py
index 4a36406632..19ad18d824 100644
--- a/openpype/hosts/maya/api/commands.py
+++ b/openpype/hosts/maya/api/commands.py
@@ -4,6 +4,7 @@ from maya import cmds
from openpype.client import get_asset_by_name, get_project
from openpype.pipeline import legacy_io
+from . import lib
class ToolWindows:
@@ -59,25 +60,11 @@ def edit_shader_definitions():
def reset_frame_range():
"""Set frame range to current asset"""
- # Set FPS first
- fps = {15: 'game',
- 24: 'film',
- 25: 'pal',
- 30: 'ntsc',
- 48: 'show',
- 50: 'palf',
- 60: 'ntscf',
- 23.98: '23.976fps',
- 23.976: '23.976fps',
- 29.97: '29.97fps',
- 47.952: '47.952fps',
- 47.95: '47.952fps',
- 59.94: '59.94fps',
- 44100: '44100fps',
- 48000: '48000fps'
- }.get(float(legacy_io.Session.get("AVALON_FPS", 25)), "pal")
- cmds.currentUnit(time=fps)
+ fps = lib.convert_to_maya_fps(
+ float(legacy_io.Session.get("AVALON_FPS", 25))
+ )
+ lib.set_scene_fps(fps)
# Set frame start/end
project_name = legacy_io.active_project()
diff --git a/openpype/hosts/maya/api/lib.py b/openpype/hosts/maya/api/lib.py
index 851028d0e5..b920428b20 100644
--- a/openpype/hosts/maya/api/lib.py
+++ b/openpype/hosts/maya/api/lib.py
@@ -1970,8 +1970,6 @@ def get_id_from_sibling(node, history_only=True):
return first_id
-
-# Project settings
def set_scene_fps(fps, update=True):
"""Set FPS from project configuration
@@ -1984,28 +1982,21 @@ def set_scene_fps(fps, update=True):
"""
- fps_mapping = {'15': 'game',
- '24': 'film',
- '25': 'pal',
- '30': 'ntsc',
- '48': 'show',
- '50': 'palf',
- '60': 'ntscf',
- '23.98': '23.976fps',
- '23.976': '23.976fps',
- '29.97': '29.97fps',
- '47.952': '47.952fps',
- '47.95': '47.952fps',
- '59.94': '59.94fps',
- '44100': '44100fps',
- '48000': '48000fps'}
-
- # pull from mapping
- # this should convert float string to float and int to int
- # so 25.0 is converted to 25, but 23.98 will be still float.
- dec, ipart = math.modf(fps)
- if dec == 0.0:
- fps = int(ipart)
+ fps_mapping = {
+ '15': 'game',
+ '24': 'film',
+ '25': 'pal',
+ '30': 'ntsc',
+ '48': 'show',
+ '50': 'palf',
+ '60': 'ntscf',
+ '23.976023976023978': '23.976fps',
+ '29.97002997002997': '29.97fps',
+ '47.952047952047955': '47.952fps',
+ '59.94005994005994': '59.94fps',
+ '44100': '44100fps',
+ '48000': '48000fps'
+ }
unit = fps_mapping.get(str(fps), None)
if unit is None:
@@ -2125,7 +2116,9 @@ def set_context_settings():
asset_data = asset_doc.get("data", {})
# Set project fps
- fps = asset_data.get("fps", project_data.get("fps", 25))
+ fps = convert_to_maya_fps(
+ asset_data.get("fps", project_data.get("fps", 25))
+ )
legacy_io.Session["AVALON_FPS"] = str(fps)
set_scene_fps(fps)
@@ -2147,15 +2140,12 @@ def validate_fps():
"""
- fps = get_current_project_asset(fields=["data.fps"])["data"]["fps"]
- # TODO(antirotor): This is hack as for framerates having multiple
- # decimal places. FTrack is ceiling decimal values on
- # fps to two decimal places but Maya 2019+ is reporting those fps
- # with much higher resolution. As we currently cannot fix Ftrack
- # rounding, we have to round those numbers coming from Maya.
- current_fps = float_round(mel.eval('currentTimeUnitToFPS()'), 2)
+ expected_fps = convert_to_maya_fps(
+ get_current_project_asset(fields=["data.fps"])["data"]["fps"]
+ )
+ current_fps = mel.eval('currentTimeUnitToFPS()')
- fps_match = current_fps == fps
+ fps_match = current_fps == expected_fps
if not fps_match and not IS_HEADLESS:
from openpype.widgets import popup
@@ -2164,14 +2154,19 @@ def validate_fps():
dialog = popup.PopupUpdateKeys(parent=parent)
dialog.setModal(True)
dialog.setWindowTitle("Maya scene does not match project FPS")
- dialog.setMessage("Scene %i FPS does not match project %i FPS" %
- (current_fps, fps))
+ dialog.setMessage(
+ "Scene {} FPS does not match project {} FPS".format(
+ current_fps, expected_fps
+ )
+ )
dialog.setButtonText("Fix")
# Set new text for button (add optional argument for the popup?)
toggle = dialog.widgets["toggle"]
update = toggle.isChecked()
- dialog.on_clicked_state.connect(lambda: set_scene_fps(fps, update))
+ dialog.on_clicked_state.connect(
+ lambda: set_scene_fps(expected_fps, update)
+ )
dialog.show()
@@ -3356,6 +3351,88 @@ def get_attribute_input(attr):
return connections[0] if connections else None
+def convert_to_maya_fps(fps):
+ """Convert any fps to supported Maya framerates."""
+ float_framerates = [
+ 23.976023976023978,
+ # WTF is 29.97 df vs fps?
+ 29.97002997002997,
+ 47.952047952047955,
+ 59.94005994005994
+ ]
+ # 44100 fps evaluates as 41000.0. Why? Omitting for now.
+ int_framerates = [
+ 2,
+ 3,
+ 4,
+ 5,
+ 6,
+ 8,
+ 10,
+ 12,
+ 15,
+ 16,
+ 20,
+ 24,
+ 25,
+ 30,
+ 40,
+ 48,
+ 50,
+ 60,
+ 75,
+ 80,
+ 90,
+ 100,
+ 120,
+ 125,
+ 150,
+ 200,
+ 240,
+ 250,
+ 300,
+ 375,
+ 400,
+ 500,
+ 600,
+ 750,
+ 1200,
+ 1500,
+ 2000,
+ 3000,
+ 6000,
+ 48000
+ ]
+
+ # If input fps is a whole number we'll return.
+ if float(fps).is_integer():
+ # Validate fps is part of Maya's fps selection.
+ if fps not in int_framerates:
+ raise ValueError(
+ "Framerate \"{}\" is not supported in Maya".format(fps)
+ )
+ return fps
+ else:
+ # Differences to supported float frame rates.
+ differences = []
+ for i in float_framerates:
+ differences.append(abs(i - fps))
+
+ # Validate difference does not stray too far from supported framerates.
+ min_difference = min(differences)
+ min_index = differences.index(min_difference)
+ supported_framerate = float_framerates[min_index]
+ if min_difference > 0.1:
+ raise ValueError(
+ "Framerate \"{}\" strays too far from any supported framerate"
+ " in Maya. Closest supported framerate is \"{}\"".format(
+ fps, supported_framerate
+ )
+ )
+
+ return supported_framerate
+
+
def write_xgen_file(data, filepath):
"""Overwrites data in .xgen files.
diff --git a/openpype/hosts/maya/api/workfile_template_builder.py b/openpype/hosts/maya/api/workfile_template_builder.py
index 3416c98793..2f550e787a 100644
--- a/openpype/hosts/maya/api/workfile_template_builder.py
+++ b/openpype/hosts/maya/api/workfile_template_builder.py
@@ -2,7 +2,7 @@ import json
from maya import cmds
-from openpype.pipeline import registered_host
+from openpype.pipeline import registered_host, get_current_asset_name
from openpype.pipeline.workfile.workfile_template_builder import (
TemplateAlreadyImported,
AbstractTemplateBuilder,
@@ -41,10 +41,27 @@ class MayaTemplateBuilder(AbstractTemplateBuilder):
))
cmds.sets(name=PLACEHOLDER_SET, empty=True)
- cmds.file(path, i=True, returnNewNodes=True)
+ new_nodes = cmds.file(path, i=True, returnNewNodes=True)
cmds.setAttr(PLACEHOLDER_SET + ".hiddenInOutliner", True)
+ imported_sets = cmds.ls(new_nodes, set=True)
+ if not imported_sets:
+ return True
+
+ # update imported sets information
+ asset_name = get_current_asset_name()
+ for node in imported_sets:
+ if not cmds.attributeQuery("id", node=node, exists=True):
+ continue
+ if cmds.getAttr("{}.id".format(node)) != "pyblish.avalon.instance":
+ continue
+ if not cmds.attributeQuery("asset", node=node, exists=True):
+ continue
+
+ cmds.setAttr(
+ "{}.asset".format(node), asset_name, type="string")
+
return True
diff --git a/openpype/hosts/maya/plugins/create/create_ass.py b/openpype/hosts/maya/plugins/create/create_arnold_scene_source.py
similarity index 84%
rename from openpype/hosts/maya/plugins/create/create_ass.py
rename to openpype/hosts/maya/plugins/create/create_arnold_scene_source.py
index 935a068ca5..2afb897e94 100644
--- a/openpype/hosts/maya/plugins/create/create_ass.py
+++ b/openpype/hosts/maya/plugins/create/create_arnold_scene_source.py
@@ -6,7 +6,7 @@ from openpype.hosts.maya.api import (
from maya import cmds
-class CreateAss(plugin.Creator):
+class CreateArnoldSceneSource(plugin.Creator):
"""Arnold Scene Source"""
name = "ass"
@@ -29,7 +29,7 @@ class CreateAss(plugin.Creator):
maskOperator = False
def __init__(self, *args, **kwargs):
- super(CreateAss, self).__init__(*args, **kwargs)
+ super(CreateArnoldSceneSource, self).__init__(*args, **kwargs)
# Add animation data
self.data.update(lib.collect_animation_data())
@@ -52,7 +52,7 @@ class CreateAss(plugin.Creator):
self.data["maskOperator"] = self.maskOperator
def process(self):
- instance = super(CreateAss, self).process()
+ instance = super(CreateArnoldSceneSource, self).process()
nodes = []
@@ -61,6 +61,6 @@ class CreateAss(plugin.Creator):
cmds.sets(nodes, rm=instance)
- assContent = cmds.sets(name="content_SET")
- assProxy = cmds.sets(name="proxy_SET", empty=True)
+ assContent = cmds.sets(name=instance + "_content_SET")
+ assProxy = cmds.sets(name=instance + "_proxy_SET", empty=True)
cmds.sets([assContent, assProxy], forceElement=instance)
diff --git a/openpype/hosts/maya/plugins/create/create_pointcache.py b/openpype/hosts/maya/plugins/create/create_pointcache.py
index cdec140ea8..63c0490dc7 100644
--- a/openpype/hosts/maya/plugins/create/create_pointcache.py
+++ b/openpype/hosts/maya/plugins/create/create_pointcache.py
@@ -1,3 +1,5 @@
+from maya import cmds
+
from openpype.hosts.maya.api import (
lib,
plugin
@@ -37,3 +39,9 @@ class CreatePointCache(plugin.Creator):
# Default to not send to farm.
self.data["farm"] = False
self.data["priority"] = 50
+
+ def process(self):
+ instance = super(CreatePointCache, self).process()
+
+ assProxy = cmds.sets(name=instance + "_proxy_SET", empty=True)
+ cmds.sets(assProxy, forceElement=instance)
diff --git a/openpype/hosts/maya/plugins/load/load_abc_to_standin.py b/openpype/hosts/maya/plugins/load/load_abc_to_standin.py
deleted file mode 100644
index 70866a3ba6..0000000000
--- a/openpype/hosts/maya/plugins/load/load_abc_to_standin.py
+++ /dev/null
@@ -1,132 +0,0 @@
-import os
-
-from openpype.pipeline import (
- legacy_io,
- load,
- get_representation_path
-)
-from openpype.settings import get_project_settings
-
-
-class AlembicStandinLoader(load.LoaderPlugin):
- """Load Alembic as Arnold Standin"""
-
- families = ["animation", "model", "proxyAbc", "pointcache"]
- representations = ["abc"]
-
- label = "Import Alembic as Arnold Standin"
- order = -5
- icon = "code-fork"
- color = "orange"
-
- def load(self, context, name, namespace, options):
-
- import maya.cmds as cmds
- import mtoa.ui.arnoldmenu
- from openpype.hosts.maya.api.pipeline import containerise
- from openpype.hosts.maya.api.lib import unique_namespace
-
- version = context["version"]
- version_data = version.get("data", {})
- family = version["data"]["families"]
- self.log.info("version_data: {}\n".format(version_data))
- self.log.info("family: {}\n".format(family))
- frameStart = version_data.get("frameStart", None)
-
- asset = context["asset"]["name"]
- namespace = namespace or unique_namespace(
- asset + "_",
- prefix="_" if asset[0].isdigit() else "",
- suffix="_",
- )
-
- # Root group
- label = "{}:{}".format(namespace, name)
- root = cmds.group(name=label, empty=True)
-
- settings = get_project_settings(os.environ['AVALON_PROJECT'])
- colors = settings["maya"]["load"]["colors"]
- fps = legacy_io.Session["AVALON_FPS"]
- c = colors.get(family[0])
- if c is not None:
- r = (float(c[0]) / 255)
- g = (float(c[1]) / 255)
- b = (float(c[2]) / 255)
- cmds.setAttr(root + ".useOutlinerColor", 1)
- cmds.setAttr(root + ".outlinerColor",
- r, g, b)
-
- transform_name = label + "_ABC"
-
- standinShape = cmds.ls(mtoa.ui.arnoldmenu.createStandIn())[0]
- standin = cmds.listRelatives(standinShape, parent=True,
- typ="transform")
- standin = cmds.rename(standin, transform_name)
- standinShape = cmds.listRelatives(standin, children=True)[0]
-
- cmds.parent(standin, root)
-
- # Set the standin filepath
- cmds.setAttr(standinShape + ".dso", self.fname, type="string")
- cmds.setAttr(standinShape + ".abcFPS", float(fps))
-
- if frameStart is None:
- cmds.setAttr(standinShape + ".useFrameExtension", 0)
-
- elif "model" in family:
- cmds.setAttr(standinShape + ".useFrameExtension", 0)
-
- else:
- cmds.setAttr(standinShape + ".useFrameExtension", 1)
-
- nodes = [root, standin]
- self[:] = nodes
-
- return containerise(
- name=name,
- namespace=namespace,
- nodes=nodes,
- context=context,
- loader=self.__class__.__name__)
-
- def update(self, container, representation):
-
- import pymel.core as pm
-
- path = get_representation_path(representation)
- fps = legacy_io.Session["AVALON_FPS"]
- # Update the standin
- standins = list()
- members = pm.sets(container['objectName'], query=True)
- self.log.info("container:{}".format(container))
- for member in members:
- shape = member.getShape()
- if (shape and shape.type() == "aiStandIn"):
- standins.append(shape)
-
- for standin in standins:
- standin.dso.set(path)
- standin.abcFPS.set(float(fps))
- if "modelMain" in container['objectName']:
- standin.useFrameExtension.set(0)
- else:
- standin.useFrameExtension.set(1)
-
- container = pm.PyNode(container["objectName"])
- container.representation.set(str(representation["_id"]))
-
- def switch(self, container, representation):
- self.update(container, representation)
-
- def remove(self, container):
- import maya.cmds as cmds
- members = cmds.sets(container['objectName'], query=True)
- cmds.lockNode(members, lock=False)
- cmds.delete([container['objectName']] + members)
-
- # Clean up the namespace
- try:
- cmds.namespace(removeNamespace=container['namespace'],
- deleteNamespaceContent=True)
- except RuntimeError:
- pass
diff --git a/openpype/hosts/maya/plugins/load/load_arnold_standin.py b/openpype/hosts/maya/plugins/load/load_arnold_standin.py
new file mode 100644
index 0000000000..ab69d62ef5
--- /dev/null
+++ b/openpype/hosts/maya/plugins/load/load_arnold_standin.py
@@ -0,0 +1,218 @@
+import os
+import clique
+
+import maya.cmds as cmds
+import mtoa.ui.arnoldmenu
+
+from openpype.settings import get_project_settings
+from openpype.pipeline import (
+ load,
+ get_representation_path
+)
+from openpype.hosts.maya.api.lib import (
+ unique_namespace, get_attribute_input, maintained_selection
+)
+from openpype.hosts.maya.api.pipeline import containerise
+
+
+def is_sequence(files):
+ sequence = False
+ collections, remainder = clique.assemble(files)
+ if collections:
+ sequence = True
+
+ return sequence
+
+
+class ArnoldStandinLoader(load.LoaderPlugin):
+ """Load as Arnold standin"""
+
+ families = ["ass", "animation", "model", "proxyAbc", "pointcache"]
+ representations = ["ass", "abc"]
+
+ label = "Load as Arnold standin"
+ order = -5
+ icon = "code-fork"
+ color = "orange"
+
+ def load(self, context, name, namespace, options):
+ version = context['version']
+ version_data = version.get("data", {})
+
+ self.log.info("version_data: {}\n".format(version_data))
+
+ asset = context['asset']['name']
+ namespace = namespace or unique_namespace(
+ asset + "_",
+ prefix="_" if asset[0].isdigit() else "",
+ suffix="_",
+ )
+
+ # Root group
+ label = "{}:{}".format(namespace, name)
+ root = cmds.group(name=label, empty=True)
+
+ # Set color.
+ settings = get_project_settings(context["project"]["name"])
+ color = settings['maya']['load']['colors'].get('ass')
+ if color is not None:
+ cmds.setAttr(root + ".useOutlinerColor", True)
+ cmds.setAttr(
+ root + ".outlinerColor", color[0], color[1], color[2]
+ )
+
+ with maintained_selection():
+ # Create transform with shape
+ transform_name = label + "_standin"
+
+ standin_shape = mtoa.ui.arnoldmenu.createStandIn()
+ standin = cmds.listRelatives(standin_shape, parent=True)[0]
+ standin = cmds.rename(standin, transform_name)
+ standin_shape = cmds.listRelatives(standin, shapes=True)[0]
+
+ cmds.parent(standin, root)
+
+ # Set the standin filepath
+ path, operator = self._setup_proxy(
+ standin_shape, self.fname, namespace
+ )
+ cmds.setAttr(standin_shape + ".dso", path, type="string")
+ sequence = is_sequence(os.listdir(os.path.dirname(self.fname)))
+ cmds.setAttr(standin_shape + ".useFrameExtension", sequence)
+
+ nodes = [root, standin]
+ if operator is not None:
+ nodes.append(operator)
+ self[:] = nodes
+
+ return containerise(
+ name=name,
+ namespace=namespace,
+ nodes=nodes,
+ context=context,
+ loader=self.__class__.__name__)
+
+ def get_next_free_multi_index(self, attr_name):
+ """Find the next unconnected multi index at the input attribute."""
+ for index in range(10000000):
+ connection_info = cmds.connectionInfo(
+ "{}[{}]".format(attr_name, index),
+ sourceFromDestination=True
+ )
+ if len(connection_info or []) == 0:
+ return index
+
+ def _get_proxy_path(self, path):
+ basename_split = os.path.basename(path).split(".")
+ proxy_basename = (
+ basename_split[0] + "_proxy." + ".".join(basename_split[1:])
+ )
+ proxy_path = "/".join([os.path.dirname(path), proxy_basename])
+ return proxy_basename, proxy_path
+
+ def _setup_proxy(self, shape, path, namespace):
+ proxy_basename, proxy_path = self._get_proxy_path(path)
+
+ options_node = "defaultArnoldRenderOptions"
+ merge_operator = get_attribute_input(options_node + ".operator")
+ if merge_operator is None:
+ merge_operator = cmds.createNode("aiMerge")
+ cmds.connectAttr(
+ merge_operator + ".message", options_node + ".operator"
+ )
+
+ merge_operator = merge_operator.split(".")[0]
+
+ string_replace_operator = cmds.createNode(
+ "aiStringReplace", name=namespace + ":string_replace_operator"
+ )
+ node_type = "alembic" if path.endswith(".abc") else "procedural"
+ cmds.setAttr(
+ string_replace_operator + ".selection",
+ "*.(@node=='{}')".format(node_type),
+ type="string"
+ )
+ cmds.setAttr(
+ string_replace_operator + ".match",
+ proxy_basename,
+ type="string"
+ )
+ cmds.setAttr(
+ string_replace_operator + ".replace",
+ os.path.basename(path),
+ type="string"
+ )
+
+ cmds.connectAttr(
+ string_replace_operator + ".out",
+ "{}.inputs[{}]".format(
+ merge_operator,
+ self.get_next_free_multi_index(merge_operator + ".inputs")
+ )
+ )
+
+ # We setup the string operator no matter whether there is a proxy or
+ # not. This makes it easier to update since the string operator will
+ # always be created. Return original path to use for standin.
+ if not os.path.exists(proxy_path):
+ return path, string_replace_operator
+
+ return proxy_path, string_replace_operator
+
+ def update(self, container, representation):
+ # Update the standin
+ members = cmds.sets(container['objectName'], query=True)
+ for member in members:
+ if cmds.nodeType(member) == "aiStringReplace":
+ string_replace_operator = member
+
+ shapes = cmds.listRelatives(member, shapes=True)
+ if not shapes:
+ continue
+ if cmds.nodeType(shapes[0]) == "aiStandIn":
+ standin = shapes[0]
+
+ path = get_representation_path(representation)
+ proxy_basename, proxy_path = self._get_proxy_path(path)
+
+ # Whether there is proxy or so, we still update the string operator.
+ # If no proxy exists, the string operator wont replace anything.
+ cmds.setAttr(
+ string_replace_operator + ".match",
+ "resources/" + proxy_basename,
+ type="string"
+ )
+ cmds.setAttr(
+ string_replace_operator + ".replace",
+ os.path.basename(path),
+ type="string"
+ )
+
+ dso_path = path
+ if os.path.exists(proxy_path):
+ dso_path = proxy_path
+ cmds.setAttr(standin + ".dso", dso_path, type="string")
+
+ sequence = is_sequence(os.listdir(os.path.dirname(path)))
+ cmds.setAttr(standin + ".useFrameExtension", sequence)
+
+ cmds.setAttr(
+ container["objectName"] + ".representation",
+ str(representation["_id"]),
+ type="string"
+ )
+
+ def switch(self, container, representation):
+ self.update(container, representation)
+
+ def remove(self, container):
+ members = cmds.sets(container['objectName'], query=True)
+ cmds.lockNode(members, lock=False)
+ cmds.delete([container['objectName']] + members)
+
+ # Clean up the namespace
+ try:
+ cmds.namespace(removeNamespace=container['namespace'],
+ deleteNamespaceContent=True)
+ except RuntimeError:
+ pass
diff --git a/openpype/hosts/maya/plugins/load/load_ass.py b/openpype/hosts/maya/plugins/load/load_ass.py
deleted file mode 100644
index 5db6fc3dfa..0000000000
--- a/openpype/hosts/maya/plugins/load/load_ass.py
+++ /dev/null
@@ -1,290 +0,0 @@
-import os
-import clique
-
-from openpype.settings import get_project_settings
-from openpype.pipeline import (
- load,
- get_representation_path
-)
-import openpype.hosts.maya.api.plugin
-from openpype.hosts.maya.api.plugin import get_reference_node
-from openpype.hosts.maya.api.lib import (
- maintained_selection,
- unique_namespace
-)
-from openpype.hosts.maya.api.pipeline import containerise
-
-
-class AssProxyLoader(openpype.hosts.maya.api.plugin.ReferenceLoader):
- """Load Arnold Proxy as reference"""
-
- families = ["ass"]
- representations = ["ass"]
-
- label = "Reference .ASS standin with Proxy"
- order = -10
- icon = "code-fork"
- color = "orange"
-
- def process_reference(self, context, name, namespace, options):
-
- import maya.cmds as cmds
- import pymel.core as pm
-
- version = context['version']
- version_data = version.get("data", {})
-
- self.log.info("version_data: {}\n".format(version_data))
-
- frameStart = version_data.get("frameStart", None)
-
- try:
- family = context["representation"]["context"]["family"]
- except ValueError:
- family = "ass"
-
- with maintained_selection():
-
- groupName = "{}:{}".format(namespace, name)
- path = self.fname
- proxyPath_base = os.path.splitext(path)[0]
-
- if frameStart is not None:
- proxyPath_base = os.path.splitext(proxyPath_base)[0]
-
- publish_folder = os.path.split(path)[0]
- files_in_folder = os.listdir(publish_folder)
- collections, remainder = clique.assemble(files_in_folder)
-
- if collections:
- hashes = collections[0].padding * '#'
- coll = collections[0].format('{head}[index]{tail}')
- filename = coll.replace('[index]', hashes)
-
- path = os.path.join(publish_folder, filename)
-
- proxyPath = proxyPath_base + ".ma"
-
- project_name = context["project"]["name"]
- file_url = self.prepare_root_value(proxyPath,
- project_name)
-
- nodes = cmds.file(file_url,
- namespace=namespace,
- reference=True,
- returnNewNodes=True,
- groupReference=True,
- groupName=groupName)
-
- cmds.makeIdentity(groupName, apply=False, rotate=True,
- translate=True, scale=True)
-
- # Set attributes
- proxyShape = pm.ls(nodes, type="mesh")[0]
-
- proxyShape.aiTranslator.set('procedural')
- proxyShape.dso.set(path)
- proxyShape.aiOverrideShaders.set(0)
-
- settings = get_project_settings(project_name)
- colors = settings['maya']['load']['colors']
-
- c = colors.get(family)
- if c is not None:
- cmds.setAttr(groupName + ".useOutlinerColor", 1)
- cmds.setAttr(groupName + ".outlinerColor",
- (float(c[0])/255),
- (float(c[1])/255),
- (float(c[2])/255)
- )
-
- self[:] = nodes
-
- return nodes
-
- def switch(self, container, representation):
- self.update(container, representation)
-
- def update(self, container, representation):
- from maya import cmds
- import pymel.core as pm
-
- node = container["objectName"]
-
- representation["context"].pop("frame", None)
- path = get_representation_path(representation)
- print(path)
- # path = self.fname
- print(self.fname)
- proxyPath = os.path.splitext(path)[0] + ".ma"
- print(proxyPath)
-
- # Get reference node from container members
- members = cmds.sets(node, query=True, nodesOnly=True)
- reference_node = get_reference_node(members)
-
- assert os.path.exists(proxyPath), "%s does not exist." % proxyPath
-
- try:
- file_url = self.prepare_root_value(proxyPath,
- representation["context"]
- ["project"]
- ["name"])
- content = cmds.file(file_url,
- loadReference=reference_node,
- type="mayaAscii",
- returnNewNodes=True)
-
- # Set attributes
- proxyShape = pm.ls(content, type="mesh")[0]
-
- proxyShape.aiTranslator.set('procedural')
- proxyShape.dso.set(path)
- proxyShape.aiOverrideShaders.set(0)
-
- except RuntimeError as exc:
- # When changing a reference to a file that has load errors the
- # command will raise an error even if the file is still loaded
- # correctly (e.g. when raising errors on Arnold attributes)
- # When the file is loaded and has content, we consider it's fine.
- if not cmds.referenceQuery(reference_node, isLoaded=True):
- raise
-
- content = cmds.referenceQuery(reference_node,
- nodes=True,
- dagPath=True)
- if not content:
- raise
-
- self.log.warning("Ignoring file read error:\n%s", exc)
-
- # Add new nodes of the reference to the container
- cmds.sets(content, forceElement=node)
-
- # Remove any placeHolderList attribute entries from the set that
- # are remaining from nodes being removed from the referenced file.
- members = cmds.sets(node, query=True)
- invalid = [x for x in members if ".placeHolderList" in x]
- if invalid:
- cmds.sets(invalid, remove=node)
-
- # Update metadata
- cmds.setAttr("{}.representation".format(node),
- str(representation["_id"]),
- type="string")
-
-
-class AssStandinLoader(load.LoaderPlugin):
- """Load .ASS file as standin"""
-
- families = ["ass"]
- representations = ["ass"]
-
- label = "Load .ASS file as standin"
- order = -5
- icon = "code-fork"
- color = "orange"
-
- def load(self, context, name, namespace, options):
-
- import maya.cmds as cmds
- import mtoa.ui.arnoldmenu
- import pymel.core as pm
-
- version = context['version']
- version_data = version.get("data", {})
-
- self.log.info("version_data: {}\n".format(version_data))
-
- frameStart = version_data.get("frameStart", None)
-
- asset = context['asset']['name']
- namespace = namespace or unique_namespace(
- asset + "_",
- prefix="_" if asset[0].isdigit() else "",
- suffix="_",
- )
-
- # cmds.loadPlugin("gpuCache", quiet=True)
-
- # Root group
- label = "{}:{}".format(namespace, name)
- root = pm.group(name=label, empty=True)
-
- settings = get_project_settings(os.environ['AVALON_PROJECT'])
- colors = settings['maya']['load']['colors']
-
- c = colors.get('ass')
- if c is not None:
- cmds.setAttr(root + ".useOutlinerColor", 1)
- cmds.setAttr(root + ".outlinerColor",
- c[0], c[1], c[2])
-
- # Create transform with shape
- transform_name = label + "_ASS"
- # transform = pm.createNode("transform", name=transform_name,
- # parent=root)
-
- standinShape = pm.PyNode(mtoa.ui.arnoldmenu.createStandIn())
- standin = standinShape.getParent()
- standin.rename(transform_name)
-
- pm.parent(standin, root)
-
- # Set the standin filepath
- standinShape.dso.set(self.fname)
- if frameStart is not None:
- standinShape.useFrameExtension.set(1)
-
- nodes = [root, standin]
- self[:] = nodes
-
- return containerise(
- name=name,
- namespace=namespace,
- nodes=nodes,
- context=context,
- loader=self.__class__.__name__)
-
- def update(self, container, representation):
-
- import pymel.core as pm
-
- path = get_representation_path(representation)
-
- files_in_path = os.listdir(os.path.split(path)[0])
- sequence = 0
- collections, remainder = clique.assemble(files_in_path)
- if collections:
- sequence = 1
-
- # Update the standin
- standins = list()
- members = pm.sets(container['objectName'], query=True)
- for member in members:
- shape = member.getShape()
- if (shape and shape.type() == "aiStandIn"):
- standins.append(shape)
-
- for standin in standins:
- standin.dso.set(path)
- standin.useFrameExtension.set(sequence)
-
- container = pm.PyNode(container["objectName"])
- container.representation.set(str(representation["_id"]))
-
- def switch(self, container, representation):
- self.update(container, representation)
-
- def remove(self, container):
- import maya.cmds as cmds
- members = cmds.sets(container['objectName'], query=True)
- cmds.lockNode(members, lock=False)
- cmds.delete([container['objectName']] + members)
-
- # Clean up the namespace
- try:
- cmds.namespace(removeNamespace=container['namespace'],
- deleteNamespaceContent=True)
- except RuntimeError:
- pass
diff --git a/openpype/hosts/maya/plugins/publish/collect_ass.py b/openpype/hosts/maya/plugins/publish/collect_arnold_scene_source.py
similarity index 60%
rename from openpype/hosts/maya/plugins/publish/collect_ass.py
rename to openpype/hosts/maya/plugins/publish/collect_arnold_scene_source.py
index b5e05d6665..0415808b7a 100644
--- a/openpype/hosts/maya/plugins/publish/collect_ass.py
+++ b/openpype/hosts/maya/plugins/publish/collect_arnold_scene_source.py
@@ -1,19 +1,18 @@
from maya import cmds
-from openpype.pipeline.publish import KnownPublishError
import pyblish.api
-class CollectAssData(pyblish.api.InstancePlugin):
- """Collect Ass data."""
+class CollectArnoldSceneSource(pyblish.api.InstancePlugin):
+ """Collect Arnold Scene Source data."""
# Offset to be after renderable camera collection.
order = pyblish.api.CollectorOrder + 0.2
- label = 'Collect Ass'
+ label = "Collect Arnold Scene Source"
families = ["ass"]
def process(self, instance):
- objsets = instance.data['setMembers']
+ objsets = instance.data["setMembers"]
for objset in objsets:
objset = str(objset)
@@ -21,15 +20,12 @@ class CollectAssData(pyblish.api.InstancePlugin):
if members is None:
self.log.warning("Skipped empty instance: \"%s\" " % objset)
continue
- if "content_SET" in objset:
- instance.data['setMembers'] = members
- self.log.debug('content members: {}'.format(members))
- elif objset.startswith("proxy_SET"):
- if len(members) != 1:
- msg = "You have multiple proxy meshes, please only use one"
- raise KnownPublishError(msg)
- instance.data['proxy'] = members
- self.log.debug('proxy members: {}'.format(members))
+ if objset.endswith("content_SET"):
+ instance.data["setMembers"] = cmds.ls(members, long=True)
+ self.log.debug("content members: {}".format(members))
+ elif objset.endswith("proxy_SET"):
+ instance.data["proxy"] = cmds.ls(members, long=True)
+ self.log.debug("proxy members: {}".format(members))
# Use camera in object set if present else default to render globals
# camera.
diff --git a/openpype/hosts/maya/plugins/publish/collect_pointcache.py b/openpype/hosts/maya/plugins/publish/collect_pointcache.py
index a841341f72..332992ca92 100644
--- a/openpype/hosts/maya/plugins/publish/collect_pointcache.py
+++ b/openpype/hosts/maya/plugins/publish/collect_pointcache.py
@@ -1,3 +1,5 @@
+from maya import cmds
+
import pyblish.api
@@ -12,3 +14,31 @@ class CollectPointcache(pyblish.api.InstancePlugin):
def process(self, instance):
if instance.data.get("farm"):
instance.data["families"].append("publish.farm")
+
+ proxy_set = None
+ for node in instance.data["setMembers"]:
+ if cmds.nodeType(node) != "objectSet":
+ continue
+ members = cmds.sets(node, query=True)
+ if members is None:
+ self.log.warning("Skipped empty objectset: \"%s\" " % node)
+ continue
+ if node.endswith("proxy_SET"):
+ proxy_set = node
+ instance.data["proxy"] = []
+ instance.data["proxyRoots"] = []
+ for member in members:
+ instance.data["proxy"].extend(cmds.ls(member, long=True))
+ instance.data["proxyRoots"].extend(
+ cmds.ls(member, long=True)
+ )
+ instance.data["proxy"].extend(
+ cmds.listRelatives(member, shapes=True, fullPath=True)
+ )
+ self.log.debug(
+ "proxy members: {}".format(instance.data["proxy"])
+ )
+
+ if proxy_set:
+ instance.remove(proxy_set)
+ instance.data["setMembers"].remove(proxy_set)
diff --git a/openpype/hosts/maya/plugins/publish/collect_render.py b/openpype/hosts/maya/plugins/publish/collect_render.py
index fc297ef612..f2b5262187 100644
--- a/openpype/hosts/maya/plugins/publish/collect_render.py
+++ b/openpype/hosts/maya/plugins/publish/collect_render.py
@@ -42,7 +42,6 @@ Provides:
import re
import os
import platform
-import json
from maya import cmds
import maya.app.renderSetup.model.renderSetup as renderSetup
@@ -320,7 +319,8 @@ class CollectMayaRender(pyblish.api.ContextPlugin):
"renderSetupIncludeLights"
),
"strict_error_checking": render_instance.data.get(
- "strict_error_checking")
+ "strict_error_checking", True
+ )
}
# Collect Deadline url if Deadline module is enabled
diff --git a/openpype/hosts/maya/plugins/publish/extract_arnold_scene_source.py b/openpype/hosts/maya/plugins/publish/extract_arnold_scene_source.py
new file mode 100644
index 0000000000..924ac58c40
--- /dev/null
+++ b/openpype/hosts/maya/plugins/publish/extract_arnold_scene_source.py
@@ -0,0 +1,160 @@
+import os
+
+from maya import cmds
+import arnold
+
+from openpype.pipeline import publish
+from openpype.hosts.maya.api.lib import (
+ maintained_selection, attribute_values, delete_after
+)
+
+
+class ExtractArnoldSceneSource(publish.Extractor):
+ """Extract the content of the instance to an Arnold Scene Source file."""
+
+ label = "Extract Arnold Scene Source"
+ hosts = ["maya"]
+ families = ["ass"]
+ asciiAss = False
+
+ def process(self, instance):
+ staging_dir = self.staging_dir(instance)
+ filename = "{}.ass".format(instance.name)
+ file_path = os.path.join(staging_dir, filename)
+
+ # Mask
+ mask = arnold.AI_NODE_ALL
+
+ node_types = {
+ "options": arnold.AI_NODE_OPTIONS,
+ "camera": arnold.AI_NODE_CAMERA,
+ "light": arnold.AI_NODE_LIGHT,
+ "shape": arnold.AI_NODE_SHAPE,
+ "shader": arnold.AI_NODE_SHADER,
+ "override": arnold.AI_NODE_OVERRIDE,
+ "driver": arnold.AI_NODE_DRIVER,
+ "filter": arnold.AI_NODE_FILTER,
+ "color_manager": arnold.AI_NODE_COLOR_MANAGER,
+ "operator": arnold.AI_NODE_OPERATOR
+ }
+
+ for key in node_types.keys():
+ if instance.data.get("mask" + key.title()):
+ mask = mask ^ node_types[key]
+
+ # Motion blur
+ attribute_data = {
+ "defaultArnoldRenderOptions.motion_blur_enable": instance.data.get(
+ "motionBlur", True
+ ),
+ "defaultArnoldRenderOptions.motion_steps": instance.data.get(
+ "motionBlurKeys", 2
+ ),
+ "defaultArnoldRenderOptions.motion_frames": instance.data.get(
+ "motionBlurLength", 0.5
+ )
+ }
+
+ # Write out .ass file
+ kwargs = {
+ "filename": file_path,
+ "startFrame": instance.data.get("frameStartHandle", 1),
+ "endFrame": instance.data.get("frameEndHandle", 1),
+ "frameStep": instance.data.get("step", 1),
+ "selected": True,
+ "asciiAss": self.asciiAss,
+ "shadowLinks": True,
+ "lightLinks": True,
+ "boundingBox": True,
+ "expandProcedurals": instance.data.get("expandProcedurals", False),
+ "camera": instance.data["camera"],
+ "mask": mask
+ }
+
+ filenames = self._extract(
+ instance.data["setMembers"], attribute_data, kwargs
+ )
+
+ if "representations" not in instance.data:
+ instance.data["representations"] = []
+
+ representation = {
+ "name": "ass",
+ "ext": "ass",
+ "files": filenames if len(filenames) > 1 else filenames[0],
+ "stagingDir": staging_dir,
+ "frameStart": kwargs["startFrame"]
+ }
+
+ instance.data["representations"].append(representation)
+
+ self.log.info(
+ "Extracted instance {} to: {}".format(instance.name, staging_dir)
+ )
+
+ # Extract proxy.
+ if not instance.data.get("proxy", []):
+ return
+
+ kwargs["filename"] = file_path.replace(".ass", "_proxy.ass")
+ filenames = self._extract(
+ instance.data["proxy"], attribute_data, kwargs
+ )
+
+ representation = {
+ "name": "proxy",
+ "ext": "ass",
+ "files": filenames if len(filenames) > 1 else filenames[0],
+ "stagingDir": staging_dir,
+ "frameStart": kwargs["startFrame"],
+ "outputName": "proxy"
+ }
+
+ instance.data["representations"].append(representation)
+
+ def _extract(self, nodes, attribute_data, kwargs):
+ self.log.info("Writing: " + kwargs["filename"])
+ filenames = []
+ # Duplicating nodes so they are direct children of the world. This
+ # makes the hierarchy of any exported ass file the same.
+ with delete_after() as delete_bin:
+ duplicate_nodes = []
+ for node in nodes:
+ duplicate_transform = cmds.duplicate(node)[0]
+
+ # Discard the children.
+ shapes = cmds.listRelatives(duplicate_transform, shapes=True)
+ children = cmds.listRelatives(
+ duplicate_transform, children=True
+ )
+ cmds.delete(set(children) - set(shapes))
+
+ duplicate_transform = cmds.parent(
+ duplicate_transform, world=True
+ )[0]
+
+ cmds.rename(duplicate_transform, node.split("|")[-1])
+ duplicate_transform = "|" + node.split("|")[-1]
+
+ duplicate_nodes.append(duplicate_transform)
+ delete_bin.append(duplicate_transform)
+
+ with attribute_values(attribute_data):
+ with maintained_selection():
+ self.log.info(
+ "Writing: {}".format(duplicate_nodes)
+ )
+ cmds.select(duplicate_nodes, noExpand=True)
+
+ self.log.info(
+ "Extracting ass sequence with: {}".format(kwargs)
+ )
+
+ exported_files = cmds.arnoldExportAss(**kwargs)
+
+ for file in exported_files:
+ filenames.append(os.path.split(file)[1])
+
+ self.log.info("Exported: {}".format(filenames))
+
+ return filenames
diff --git a/openpype/hosts/maya/plugins/publish/extract_ass.py b/openpype/hosts/maya/plugins/publish/extract_ass.py
deleted file mode 100644
index 049f256a7a..0000000000
--- a/openpype/hosts/maya/plugins/publish/extract_ass.py
+++ /dev/null
@@ -1,106 +0,0 @@
-import os
-
-from maya import cmds
-import arnold
-
-from openpype.pipeline import publish
-from openpype.hosts.maya.api.lib import maintained_selection, attribute_values
-
-
-class ExtractAssStandin(publish.Extractor):
- """Extract the content of the instance to a ass file"""
-
- label = "Arnold Scene Source (.ass)"
- hosts = ["maya"]
- families = ["ass"]
- asciiAss = False
-
- def process(self, instance):
- staging_dir = self.staging_dir(instance)
- filename = "{}.ass".format(instance.name)
- filenames = []
- file_path = os.path.join(staging_dir, filename)
-
- # Mask
- mask = arnold.AI_NODE_ALL
-
- node_types = {
- "options": arnold.AI_NODE_OPTIONS,
- "camera": arnold.AI_NODE_CAMERA,
- "light": arnold.AI_NODE_LIGHT,
- "shape": arnold.AI_NODE_SHAPE,
- "shader": arnold.AI_NODE_SHADER,
- "override": arnold.AI_NODE_OVERRIDE,
- "driver": arnold.AI_NODE_DRIVER,
- "filter": arnold.AI_NODE_FILTER,
- "color_manager": arnold.AI_NODE_COLOR_MANAGER,
- "operator": arnold.AI_NODE_OPERATOR
- }
-
- for key in node_types.keys():
- if instance.data.get("mask" + key.title()):
- mask = mask ^ node_types[key]
-
- # Motion blur
- values = {
- "defaultArnoldRenderOptions.motion_blur_enable": instance.data.get(
- "motionBlur", True
- ),
- "defaultArnoldRenderOptions.motion_steps": instance.data.get(
- "motionBlurKeys", 2
- ),
- "defaultArnoldRenderOptions.motion_frames": instance.data.get(
- "motionBlurLength", 0.5
- )
- }
-
- # Write out .ass file
- kwargs = {
- "filename": file_path,
- "startFrame": instance.data.get("frameStartHandle", 1),
- "endFrame": instance.data.get("frameEndHandle", 1),
- "frameStep": instance.data.get("step", 1),
- "selected": True,
- "asciiAss": self.asciiAss,
- "shadowLinks": True,
- "lightLinks": True,
- "boundingBox": True,
- "expandProcedurals": instance.data.get("expandProcedurals", False),
- "camera": instance.data["camera"],
- "mask": mask
- }
-
- self.log.info("Writing: '%s'" % file_path)
- with attribute_values(values):
- with maintained_selection():
- self.log.info(
- "Writing: {}".format(instance.data["setMembers"])
- )
- cmds.select(instance.data["setMembers"], noExpand=True)
-
- self.log.info(
- "Extracting ass sequence with: {}".format(kwargs)
- )
-
- exported_files = cmds.arnoldExportAss(**kwargs)
-
- for file in exported_files:
- filenames.append(os.path.split(file)[1])
-
- self.log.info("Exported: {}".format(filenames))
-
- if "representations" not in instance.data:
- instance.data["representations"] = []
-
- representation = {
- 'name': 'ass',
- 'ext': 'ass',
- 'files': filenames if len(filenames) > 1 else filenames[0],
- "stagingDir": staging_dir,
- 'frameStart': kwargs["startFrame"]
- }
-
- instance.data["representations"].append(representation)
-
- self.log.info("Extracted instance '%s' to: %s"
- % (instance.name, staging_dir))
diff --git a/openpype/hosts/maya/plugins/publish/extract_assproxy.py b/openpype/hosts/maya/plugins/publish/extract_assproxy.py
deleted file mode 100644
index 4937a28a9e..0000000000
--- a/openpype/hosts/maya/plugins/publish/extract_assproxy.py
+++ /dev/null
@@ -1,81 +0,0 @@
-import os
-import contextlib
-
-from maya import cmds
-
-from openpype.pipeline import publish
-from openpype.hosts.maya.api.lib import maintained_selection
-
-
-class ExtractAssProxy(publish.Extractor):
- """Extract proxy model as Maya Ascii to use as arnold standin
-
-
- """
-
- order = publish.Extractor.order + 0.2
- label = "Ass Proxy (Maya ASCII)"
- hosts = ["maya"]
- families = ["ass"]
-
- def process(self, instance):
-
- @contextlib.contextmanager
- def unparent(root):
- """Temporarily unparent `root`"""
- parent = cmds.listRelatives(root, parent=True)
- if parent:
- cmds.parent(root, world=True)
- yield
- self.log.info("{} - {}".format(root, parent))
- cmds.parent(root, parent)
- else:
- yield
-
- # Define extract output file path
- stagingdir = self.staging_dir(instance)
- filename = "{0}.ma".format(instance.name)
- path = os.path.join(stagingdir, filename)
-
- # Perform extraction
- self.log.info("Performing extraction..")
-
- # Get only the shape contents we need in such a way that we avoid
- # taking along intermediateObjects
- proxy = instance.data.get('proxy', None)
-
- if not proxy:
- self.log.info("no proxy mesh")
- return
-
- members = cmds.ls(proxy,
- dag=True,
- transforms=True,
- noIntermediate=True)
- self.log.info(members)
-
- with maintained_selection():
- with unparent(members[0]):
- cmds.select(members, noExpand=True)
- cmds.file(path,
- force=True,
- typ="mayaAscii",
- exportSelected=True,
- preserveReferences=False,
- channels=False,
- constraints=False,
- expressions=False,
- constructionHistory=False)
-
- if "representations" not in instance.data:
- instance.data["representations"] = []
-
- representation = {
- 'name': 'ma',
- 'ext': 'ma',
- 'files': filename,
- "stagingDir": stagingdir
- }
- instance.data["representations"].append(representation)
-
- self.log.info("Extracted instance '%s' to: %s" % (instance.name, path))
diff --git a/openpype/hosts/maya/plugins/publish/extract_pointcache.py b/openpype/hosts/maya/plugins/publish/extract_pointcache.py
index 7ed73fd5b0..0eb65e4226 100644
--- a/openpype/hosts/maya/plugins/publish/extract_pointcache.py
+++ b/openpype/hosts/maya/plugins/publish/extract_pointcache.py
@@ -1,4 +1,5 @@
import os
+import copy
from maya import cmds
@@ -9,6 +10,7 @@ from openpype.hosts.maya.api.lib import (
maintained_selection,
iter_visible_nodes_in_range
)
+from openpype.lib import StringTemplate
class ExtractAlembic(publish.Extractor):
@@ -23,9 +25,7 @@ class ExtractAlembic(publish.Extractor):
label = "Extract Pointcache (Alembic)"
hosts = ["maya"]
- families = ["pointcache",
- "model",
- "vrayproxy"]
+ families = ["pointcache", "model", "vrayproxy"]
targets = ["local", "remote"]
def process(self, instance):
@@ -87,6 +87,7 @@ class ExtractAlembic(publish.Extractor):
end=end))
suspend = not instance.data.get("refresh", False)
+ self.log.info(nodes)
with suspended_refresh(suspend=suspend):
with maintained_selection():
cmds.select(nodes, noExpand=True)
@@ -101,9 +102,9 @@ class ExtractAlembic(publish.Extractor):
instance.data["representations"] = []
representation = {
- 'name': 'abc',
- 'ext': 'abc',
- 'files': filename,
+ "name": "abc",
+ "ext": "abc",
+ "files": filename,
"stagingDir": dirname
}
instance.data["representations"].append(representation)
@@ -112,6 +113,48 @@ class ExtractAlembic(publish.Extractor):
self.log.info("Extracted {} to {}".format(instance, dirname))
+ # Extract proxy.
+ if not instance.data.get("proxy"):
+ return
+
+ path = path.replace(".abc", "_proxy.abc")
+ if not instance.data.get("includeParentHierarchy", True):
+ # Set the root nodes if we don't want to include parents
+ # The roots are to be considered the ones that are the actual
+ # direct members of the set
+ options["root"] = instance.data["proxyRoots"]
+
+ with suspended_refresh(suspend=suspend):
+ with maintained_selection():
+ cmds.select(instance.data["proxy"])
+ extract_alembic(
+ file=path,
+ startFrame=start,
+ endFrame=end,
+ **options
+ )
+
+ template_data = copy.deepcopy(instance.data["anatomyData"])
+ template_data.update({"ext": "abc"})
+ templates = instance.context.data["anatomy"].templates["publish"]
+ published_filename_without_extension = StringTemplate(
+ templates["file"]
+ ).format(template_data).replace(".abc", "_proxy")
+ transfers = []
+ destination = os.path.join(
+ instance.data["resourcesDir"],
+ filename.replace(
+ filename.split(".")[0],
+ published_filename_without_extension
+ )
+ )
+ transfers.append((path, destination))
+
+ for source, destination in transfers:
+ self.log.debug("Transfer: {} > {}".format(source, destination))
+
+ instance.data["transfers"] = transfers
+
def get_members_and_roots(self, instance):
return instance[:], instance.data.get("setMembers")
diff --git a/openpype/hosts/maya/plugins/publish/validate_arnold_scene_source.py b/openpype/hosts/maya/plugins/publish/validate_arnold_scene_source.py
new file mode 100644
index 0000000000..3b0ffd52d7
--- /dev/null
+++ b/openpype/hosts/maya/plugins/publish/validate_arnold_scene_source.py
@@ -0,0 +1,106 @@
+import maya.cmds as cmds
+
+import pyblish.api
+from openpype.pipeline.publish import (
+ ValidateContentsOrder, PublishValidationError
+)
+
+
+class ValidateArnoldSceneSource(pyblish.api.InstancePlugin):
+ """Validate Arnold Scene Source.
+
+ We require at least 1 root node/parent for the meshes. This is to ensure we
+ can duplicate the nodes and preserve the names.
+
+ If using proxies we need the nodes to share the same names and not be
+ parent to the world. This ends up needing at least two groups with content
+ nodes and proxy nodes in another.
+ """
+
+ order = ValidateContentsOrder
+ hosts = ["maya"]
+ families = ["ass"]
+ label = "Validate Arnold Scene Source"
+
+ def _get_nodes_data(self, nodes):
+ ungrouped_nodes = []
+ nodes_by_name = {}
+ parents = []
+ for node in nodes:
+ node_split = node.split("|")
+ if len(node_split) == 2:
+ ungrouped_nodes.append(node)
+
+ parent = "|".join(node_split[:-1])
+ if parent:
+ parents.append(parent)
+
+ nodes_by_name[node_split[-1]] = node
+ for shape in cmds.listRelatives(node, shapes=True):
+ nodes_by_name[shape.split("|")[-1]] = shape
+
+ return ungrouped_nodes, nodes_by_name, parents
+
+ def process(self, instance):
+ ungrouped_nodes = []
+
+ nodes, content_nodes_by_name, content_parents = self._get_nodes_data(
+ instance.data["setMembers"]
+ )
+ ungrouped_nodes.extend(nodes)
+
+ nodes, proxy_nodes_by_name, proxy_parents = self._get_nodes_data(
+ instance.data.get("proxy", [])
+ )
+ ungrouped_nodes.extend(nodes)
+
+ # Validate against nodes directly parented to world.
+ if ungrouped_nodes:
+ raise PublishValidationError(
+ "Found nodes parented to the world: {}\n"
+ "All nodes need to be grouped.".format(ungrouped_nodes)
+ )
+
+ # Proxy validation.
+ if not instance.data.get("proxy", []):
+ return
+
+ # Validate for content and proxy nodes amount being the same.
+ if len(instance.data["setMembers"]) != len(instance.data["proxy"]):
+ raise PublishValidationError(
+ "Amount of content nodes ({}) and proxy nodes ({}) needs to "
+ "be the same.".format(
+ len(instance.data["setMembers"]),
+ len(instance.data["proxy"])
+ )
+ )
+
+ # Validate against content and proxy nodes sharing same parent.
+ if list(set(content_parents) & set(proxy_parents)):
+ raise PublishValidationError(
+ "Content and proxy nodes cannot share the same parent."
+ )
+
+ # Validate for content and proxy nodes sharing same names.
+ sorted_content_names = sorted(content_nodes_by_name.keys())
+ sorted_proxy_names = sorted(proxy_nodes_by_name.keys())
+ odd_content_names = list(
+ set(sorted_content_names) - set(sorted_proxy_names)
+ )
+ odd_content_nodes = [
+ content_nodes_by_name[x] for x in odd_content_names
+ ]
+ odd_proxy_names = list(
+ set(sorted_proxy_names) - set(sorted_content_names)
+ )
+ odd_proxy_nodes = [
+ proxy_nodes_by_name[x] for x in odd_proxy_names
+ ]
+ if not sorted_content_names == sorted_proxy_names:
+ raise PublishValidationError(
+ "Content and proxy nodes need to share the same names.\n"
+ "Content nodes not matching: {}\n"
+ "Proxy nodes not matching: {}".format(
+ odd_content_nodes, odd_proxy_nodes
+ )
+ )
diff --git a/openpype/hosts/maya/plugins/publish/validate_maya_units.py b/openpype/hosts/maya/plugins/publish/validate_maya_units.py
index e6fabb1712..ad256b6a72 100644
--- a/openpype/hosts/maya/plugins/publish/validate_maya_units.py
+++ b/openpype/hosts/maya/plugins/publish/validate_maya_units.py
@@ -33,18 +33,11 @@ class ValidateMayaUnits(pyblish.api.ContextPlugin):
linearunits = context.data.get('linearUnits')
angularunits = context.data.get('angularUnits')
- # TODO(antirotor): This is hack as for framerates having multiple
- # decimal places. FTrack is ceiling decimal values on
- # fps to two decimal places but Maya 2019+ is reporting those fps
- # with much higher resolution. As we currently cannot fix Ftrack
- # rounding, we have to round those numbers coming from Maya.
- # NOTE: this must be revisited yet again as it seems that Ftrack is
- # now flooring the value?
- fps = mayalib.float_round(context.data.get('fps'), 2, ceil)
+ fps = context.data.get('fps')
# TODO repace query with using 'context.data["assetEntity"]'
asset_doc = get_current_project_asset()
- asset_fps = asset_doc["data"]["fps"]
+ asset_fps = mayalib.convert_to_maya_fps(asset_doc["data"]["fps"])
self.log.info('Units (linear): {0}'.format(linearunits))
self.log.info('Units (angular): {0}'.format(angularunits))
diff --git a/openpype/hosts/photoshop/plugins/create/create_image.py b/openpype/hosts/photoshop/plugins/create/create_image.py
index cdea82cb05..3d82d6b6f0 100644
--- a/openpype/hosts/photoshop/plugins/create/create_image.py
+++ b/openpype/hosts/photoshop/plugins/create/create_image.py
@@ -193,7 +193,7 @@ class ImageCreator(Creator):
instance_data.pop("uuid")
if not instance_data.get("task"):
- instance_data["task"] = legacy_io.Session.get("AVALON_TASK")
+ instance_data["task"] = self.create_context.get_current_task_name()
if not instance_data.get("variant"):
instance_data["variant"] = ''
diff --git a/openpype/hosts/photoshop/plugins/create/workfile_creator.py b/openpype/hosts/photoshop/plugins/create/workfile_creator.py
index 8ee9a0d832..f5d56adcbc 100644
--- a/openpype/hosts/photoshop/plugins/create/workfile_creator.py
+++ b/openpype/hosts/photoshop/plugins/create/workfile_creator.py
@@ -2,8 +2,7 @@ import openpype.hosts.photoshop.api as api
from openpype.client import get_asset_by_name
from openpype.pipeline import (
AutoCreator,
- CreatedInstance,
- legacy_io
+ CreatedInstance
)
from openpype.hosts.photoshop.api.pipeline import cache_and_get_instances
@@ -38,10 +37,11 @@ class PSWorkfileCreator(AutoCreator):
existing_instance = instance
break
- project_name = legacy_io.Session["AVALON_PROJECT"]
- asset_name = legacy_io.Session["AVALON_ASSET"]
- task_name = legacy_io.Session["AVALON_TASK"]
- host_name = legacy_io.Session["AVALON_APP"]
+ context = self.create_context
+ project_name = context.get_current_project_name()
+ asset_name = context.get_current_asset_name()
+ task_name = context.get_current_task_name()
+ host_name = context.host_name
if existing_instance is None:
asset_doc = get_asset_by_name(project_name, asset_name)
subset_name = self.get_subset_name(
diff --git a/openpype/lib/__init__.py b/openpype/lib/__init__.py
index b5fb955a84..9eb7724a60 100644
--- a/openpype/lib/__init__.py
+++ b/openpype/lib/__init__.py
@@ -82,9 +82,6 @@ from .mongo import (
validate_mongo_connection,
OpenPypeMongoConnection
)
-from .anatomy import (
- Anatomy
-)
from .dateutils import (
get_datetime_data,
@@ -119,36 +116,19 @@ from .transcoding import (
)
from .avalon_context import (
CURRENT_DOC_SCHEMAS,
- PROJECT_NAME_ALLOWED_SYMBOLS,
- PROJECT_NAME_REGEX,
create_project,
- is_latest,
- any_outdated,
- get_asset,
- get_linked_assets,
- get_latest_version,
- get_system_general_anatomy_data,
get_workfile_template_key,
get_workfile_template_key_from_context,
- get_workdir_data,
- get_workdir,
- get_workdir_with_workdir_data,
get_last_workfile_with_version,
get_last_workfile,
- create_workfile_doc,
- save_workfile_data_to_doc,
- get_workfile_doc,
-
BuildWorkfile,
get_creator_by_name,
get_custom_workfile_template,
- change_timer_to_current_context,
-
get_custom_workfile_template_by_context,
get_custom_workfile_template_by_string_context,
get_custom_workfile_template
@@ -186,8 +166,6 @@ from .plugin_tools import (
get_subset_name,
get_subset_name_with_asset_doc,
prepare_template_data,
- filter_pyblish_plugins,
- set_plugin_attributes_from_settings,
source_hash,
)
@@ -278,34 +256,17 @@ __all__ = [
"convert_ffprobe_fps_to_float",
"CURRENT_DOC_SCHEMAS",
- "PROJECT_NAME_ALLOWED_SYMBOLS",
- "PROJECT_NAME_REGEX",
"create_project",
- "is_latest",
- "any_outdated",
- "get_asset",
- "get_linked_assets",
- "get_latest_version",
- "get_system_general_anatomy_data",
"get_workfile_template_key",
"get_workfile_template_key_from_context",
- "get_workdir_data",
- "get_workdir",
- "get_workdir_with_workdir_data",
"get_last_workfile_with_version",
"get_last_workfile",
- "create_workfile_doc",
- "save_workfile_data_to_doc",
- "get_workfile_doc",
-
"BuildWorkfile",
"get_creator_by_name",
- "change_timer_to_current_context",
-
"get_custom_workfile_template_by_context",
"get_custom_workfile_template_by_string_context",
"get_custom_workfile_template",
@@ -338,8 +299,6 @@ __all__ = [
"TaskNotSetError",
"get_subset_name",
"get_subset_name_with_asset_doc",
- "filter_pyblish_plugins",
- "set_plugin_attributes_from_settings",
"source_hash",
"format_file_size",
@@ -358,8 +317,6 @@ __all__ = [
"terminal",
- "Anatomy",
-
"get_datetime_data",
"get_formatted_current_time",
diff --git a/openpype/lib/anatomy.py b/openpype/lib/anatomy.py
deleted file mode 100644
index 6d339f058f..0000000000
--- a/openpype/lib/anatomy.py
+++ /dev/null
@@ -1,38 +0,0 @@
-"""Code related to project Anatomy was moved
-to 'openpype.pipeline.anatomy' please change your imports as soon as
-possible. File will be probably removed in OpenPype 3.14.*
-"""
-
-import warnings
-import functools
-
-
-class AnatomyDeprecatedWarning(DeprecationWarning):
- pass
-
-
-def anatomy_deprecated(func):
- """Mark functions as deprecated.
-
- It will result in a warning being emitted when the function is used.
- """
-
- @functools.wraps(func)
- def new_func(*args, **kwargs):
- warnings.simplefilter("always", AnatomyDeprecatedWarning)
- warnings.warn(
- (
- "Deprecated import of 'Anatomy'."
- " Class was moved to 'openpype.pipeline.anatomy'."
- " Please change your imports of Anatomy in codebase."
- ),
- category=AnatomyDeprecatedWarning
- )
- return func(*args, **kwargs)
- return new_func
-
-
-@anatomy_deprecated
-def Anatomy(*args, **kwargs):
- from openpype.pipeline.anatomy import Anatomy
- return Anatomy(*args, **kwargs)
diff --git a/openpype/lib/avalon_context.py b/openpype/lib/avalon_context.py
index 12f4a5198b..a9ae27cb79 100644
--- a/openpype/lib/avalon_context.py
+++ b/openpype/lib/avalon_context.py
@@ -1,6 +1,5 @@
"""Should be used only inside of hosts."""
-import os
-import copy
+
import platform
import logging
import functools
@@ -10,17 +9,12 @@ import six
from openpype.client import (
get_project,
- get_assets,
get_asset_by_name,
- get_last_version_by_subset_name,
- get_workfile_info,
)
from openpype.client.operations import (
CURRENT_ASSET_DOC_SCHEMA,
CURRENT_PROJECT_SCHEMA,
CURRENT_PROJECT_CONFIG_SCHEMA,
- PROJECT_NAME_ALLOWED_SYMBOLS,
- PROJECT_NAME_REGEX,
)
from .profiles_filtering import filter_profiles
from .path_templates import StringTemplate
@@ -128,70 +122,6 @@ def with_pipeline_io(func):
return wrapped
-@deprecated("openpype.pipeline.context_tools.is_representation_from_latest")
-def is_latest(representation):
- """Return whether the representation is from latest version
-
- Args:
- representation (dict): The representation document from the database.
-
- Returns:
- bool: Whether the representation is of latest version.
-
- Deprecated:
- Function will be removed after release version 3.15.*
- """
-
- from openpype.pipeline.context_tools import is_representation_from_latest
-
- return is_representation_from_latest(representation)
-
-
-@deprecated("openpype.pipeline.load.any_outdated_containers")
-def any_outdated():
- """Return whether the current scene has any outdated content.
-
- Deprecated:
- Function will be removed after release version 3.15.*
- """
-
- from openpype.pipeline.load import any_outdated_containers
-
- return any_outdated_containers()
-
-
-@deprecated("openpype.pipeline.context_tools.get_current_project_asset")
-def get_asset(asset_name=None):
- """ Returning asset document from database by its name.
-
- Doesn't count with duplicities on asset names!
-
- Args:
- asset_name (str)
-
- Returns:
- (MongoDB document)
-
- Deprecated:
- Function will be removed after release version 3.15.*
- """
-
- from openpype.pipeline.context_tools import get_current_project_asset
-
- return get_current_project_asset(asset_name=asset_name)
-
-
-@deprecated("openpype.pipeline.template_data.get_general_template_data")
-def get_system_general_anatomy_data(system_settings=None):
- """
- Deprecated:
- Function will be removed after release version 3.15.*
- """
- from openpype.pipeline.template_data import get_general_template_data
-
- return get_general_template_data(system_settings)
-
-
@deprecated("openpype.client.get_linked_asset_ids")
def get_linked_asset_ids(asset_doc):
"""Return linked asset ids for `asset_doc` from DB
@@ -214,66 +144,6 @@ def get_linked_asset_ids(asset_doc):
return get_linked_asset_ids(project_name, asset_doc=asset_doc)
-@deprecated("openpype.client.get_linked_assets")
-def get_linked_assets(asset_doc):
- """Return linked assets for `asset_doc` from DB
-
- Args:
- asset_doc (dict): Asset document from DB
-
- Returns:
- (list) Asset documents of input links for passed asset doc.
-
- Deprecated:
- Function will be removed after release version 3.15.*
- """
-
- from openpype.pipeline import legacy_io
- from openpype.client import get_linked_assets
-
- project_name = legacy_io.active_project()
-
- return get_linked_assets(project_name, asset_doc=asset_doc)
-
-
-@deprecated("openpype.client.get_last_version_by_subset_name")
-def get_latest_version(asset_name, subset_name, dbcon=None, project_name=None):
- """Retrieve latest version from `asset_name`, and `subset_name`.
-
- Do not use if you want to query more than 5 latest versions as this method
- query 3 times to mongo for each call. For those cases is better to use
- more efficient way, e.g. with help of aggregations.
-
- Args:
- asset_name (str): Name of asset.
- subset_name (str): Name of subset.
- dbcon (AvalonMongoDB, optional): Avalon Mongo connection with Session.
- project_name (str, optional): Find latest version in specific project.
-
- Returns:
- None: If asset, subset or version were not found.
- dict: Last version document for entered.
-
- Deprecated:
- Function will be removed after release version 3.15.*
- """
-
- if not project_name:
- if not dbcon:
- from openpype.pipeline import legacy_io
-
- log.debug("Using `legacy_io` for query.")
- dbcon = legacy_io
- # Make sure is installed
- dbcon.install()
-
- project_name = dbcon.active_project()
-
- return get_last_version_by_subset_name(
- project_name, subset_name, asset_name=asset_name
- )
-
-
@deprecated(
"openpype.pipeline.workfile.get_workfile_template_key_from_context")
def get_workfile_template_key_from_context(
@@ -361,142 +231,6 @@ def get_workfile_template_key(
)
-@deprecated("openpype.pipeline.template_data.get_template_data")
-def get_workdir_data(project_doc, asset_doc, task_name, host_name):
- """Prepare data for workdir template filling from entered information.
-
- Args:
- project_doc (dict): Mongo document of project from MongoDB.
- asset_doc (dict): Mongo document of asset from MongoDB.
- task_name (str): Task name for which are workdir data preapred.
- host_name (str): Host which is used to workdir. This is required
- because workdir template may contain `{app}` key.
-
- Returns:
- dict: Data prepared for filling workdir template.
-
- Deprecated:
- Function will be removed after release version 3.15.*
- """
-
- from openpype.pipeline.template_data import get_template_data
-
- return get_template_data(
- project_doc, asset_doc, task_name, host_name
- )
-
-
-@deprecated("openpype.pipeline.workfile.get_workdir_with_workdir_data")
-def get_workdir_with_workdir_data(
- workdir_data, anatomy=None, project_name=None, template_key=None
-):
- """Fill workdir path from entered data and project's anatomy.
-
- It is possible to pass only project's name instead of project's anatomy but
- one of them **must** be entered. It is preferred to enter anatomy if is
- available as initialization of a new Anatomy object may be time consuming.
-
- Args:
- workdir_data (dict): Data to fill workdir template.
- anatomy (Anatomy): Anatomy object for specific project. Optional if
- `project_name` is entered.
- project_name (str): Project's name. Optional if `anatomy` is entered
- otherwise Anatomy object is created with using the project name.
- template_key (str): Key of work templates in anatomy templates. If not
- passed `get_workfile_template_key_from_context` is used to get it.
- dbcon(AvalonMongoDB): Mongo connection. Required only if 'template_key'
- and 'project_name' are not passed.
-
- Returns:
- TemplateResult: Workdir path.
-
- Raises:
- ValueError: When both `anatomy` and `project_name` are set to None.
-
- Deprecated:
- Function will be removed after release version 3.15.*
- """
-
- if not anatomy and not project_name:
- raise ValueError((
- "Missing required arguments one of `project_name` or `anatomy`"
- " must be entered."
- ))
-
- if not project_name:
- project_name = anatomy.project_name
-
- from openpype.pipeline.workfile import get_workdir_with_workdir_data
-
- return get_workdir_with_workdir_data(
- workdir_data, project_name, anatomy, template_key
- )
-
-
-@deprecated("openpype.pipeline.workfile.get_workdir_with_workdir_data")
-def get_workdir(
- project_doc,
- asset_doc,
- task_name,
- host_name,
- anatomy=None,
- template_key=None
-):
- """Fill workdir path from entered data and project's anatomy.
-
- Args:
- project_doc (dict): Mongo document of project from MongoDB.
- asset_doc (dict): Mongo document of asset from MongoDB.
- task_name (str): Task name for which are workdir data preapred.
- host_name (str): Host which is used to workdir. This is required
- because workdir template may contain `{app}` key. In `Session`
- is stored under `AVALON_APP` key.
- anatomy (Anatomy): Optional argument. Anatomy object is created using
- project name from `project_doc`. It is preferred to pass this
- argument as initialization of a new Anatomy object may be time
- consuming.
- template_key (str): Key of work templates in anatomy templates. Default
- value is defined in `get_workdir_with_workdir_data`.
-
- Returns:
- TemplateResult: Workdir path.
-
- Deprecated:
- Function will be removed after release version 3.15.*
- """
-
- from openpype.pipeline.workfile import get_workdir
- # Output is TemplateResult object which contain useful data
- return get_workdir(
- project_doc,
- asset_doc,
- task_name,
- host_name,
- anatomy,
- template_key
- )
-
-
-@deprecated("openpype.pipeline.context_tools.get_template_data_from_session")
-def template_data_from_session(session=None):
- """ Return dictionary with template from session keys.
-
- Args:
- session (dict, Optional): The Session to use. If not provided use the
- currently active global Session.
-
- Returns:
- dict: All available data from session.
-
- Deprecated:
- Function will be removed after release version 3.15.*
- """
-
- from openpype.pipeline.context_tools import get_template_data_from_session
-
- return get_template_data_from_session(session)
-
-
@deprecated("openpype.pipeline.context_tools.compute_session_changes")
def compute_session_changes(
session, task=None, asset=None, app=None, template_key=None
@@ -588,133 +322,6 @@ def update_current_task(task=None, asset=None, app=None, template_key=None):
return change_current_context(asset, task, template_key)
-@deprecated("openpype.client.get_workfile_info")
-def get_workfile_doc(asset_id, task_name, filename, dbcon=None):
- """Return workfile document for entered context.
-
- Do not use this method to get more than one document. In that cases use
- custom query as this will return documents from database one by one.
-
- Args:
- asset_id (ObjectId): Mongo ID of an asset under which workfile belongs.
- task_name (str): Name of task under which the workfile belongs.
- filename (str): Name of a workfile.
- dbcon (AvalonMongoDB): Optionally enter avalon AvalonMongoDB object and
- `legacy_io` is used if not entered.
-
- Returns:
- dict: Workfile document or None.
-
- Deprecated:
- Function will be removed after release version 3.15.*
- """
-
- # Use legacy_io if dbcon is not entered
- if not dbcon:
- from openpype.pipeline import legacy_io
- dbcon = legacy_io
-
- project_name = dbcon.active_project()
- return get_workfile_info(project_name, asset_id, task_name, filename)
-
-
-@deprecated
-def create_workfile_doc(asset_doc, task_name, filename, workdir, dbcon=None):
- """Creates or replace workfile document in mongo.
-
- Do not use this method to update data. This method will remove all
- additional data from existing document.
-
- Args:
- asset_doc (dict): Document of asset under which workfile belongs.
- task_name (str): Name of task for which is workfile related to.
- filename (str): Filename of workfile.
- workdir (str): Path to directory where `filename` is located.
- dbcon (AvalonMongoDB): Optionally enter avalon AvalonMongoDB object and
- `legacy_io` is used if not entered.
- """
-
- from openpype.pipeline import Anatomy
- from openpype.pipeline.template_data import get_template_data
-
- # Use legacy_io if dbcon is not entered
- if not dbcon:
- from openpype.pipeline import legacy_io
- dbcon = legacy_io
-
- # Filter of workfile document
- doc_filter = {
- "type": "workfile",
- "parent": asset_doc["_id"],
- "task_name": task_name,
- "filename": filename
- }
- # Document data are copy of filter
- doc_data = copy.deepcopy(doc_filter)
-
- # Prepare project for workdir data
- project_name = dbcon.active_project()
- project_doc = get_project(project_name)
- workdir_data = get_template_data(
- project_doc, asset_doc, task_name, dbcon.Session["AVALON_APP"]
- )
- # Prepare anatomy
- anatomy = Anatomy(project_name)
- # Get workdir path (result is anatomy.TemplateResult)
- template_workdir = get_workdir_with_workdir_data(
- workdir_data, anatomy
- )
- template_workdir_path = str(template_workdir).replace("\\", "/")
-
- # Replace slashses in workdir path where workfile is located
- mod_workdir = workdir.replace("\\", "/")
-
- # Replace workdir from templates with rootless workdir
- rootles_workdir = mod_workdir.replace(
- template_workdir_path,
- template_workdir.rootless.replace("\\", "/")
- )
-
- doc_data["schema"] = "pype:workfile-1.0"
- doc_data["files"] = ["/".join([rootles_workdir, filename])]
- doc_data["data"] = {}
-
- dbcon.replace_one(
- doc_filter,
- doc_data,
- upsert=True
- )
-
-
-@deprecated
-def save_workfile_data_to_doc(workfile_doc, data, dbcon=None):
- if not workfile_doc:
- # TODO add log message
- return
-
- if not data:
- return
-
- # Use legacy_io if dbcon is not entered
- if not dbcon:
- from openpype.pipeline import legacy_io
- dbcon = legacy_io
-
- # Convert data to mongo modification keys/values
- # - this is naive implementation which does not expect nested
- # dictionaries
- set_data = {}
- for key, value in data.items():
- new_key = "data.{}".format(key)
- set_data[new_key] = value
-
- # Update workfile document with data
- dbcon.update_one(
- {"_id": workfile_doc["_id"]},
- {"$set": set_data}
- )
-
-
@deprecated("openpype.pipeline.workfile.BuildWorkfile")
def BuildWorkfile():
"""Build workfile class was moved to workfile pipeline.
@@ -747,38 +354,6 @@ def get_creator_by_name(creator_name, case_sensitive=False):
return get_legacy_creator_by_name(creator_name, case_sensitive)
-@deprecated
-def change_timer_to_current_context():
- """Called after context change to change timers.
-
- Deprecated:
- This method is specific for TimersManager module so please use the
- functionality from there. Function will be removed after release
- version 3.15.*
- """
-
- from openpype.pipeline import legacy_io
-
- webserver_url = os.environ.get("OPENPYPE_WEBSERVER_URL")
- if not webserver_url:
- log.warning("Couldn't find webserver url")
- return
-
- rest_api_url = "{}/timers_manager/start_timer".format(webserver_url)
- try:
- import requests
- except Exception:
- log.warning("Couldn't start timer")
- return
- data = {
- "project_name": legacy_io.Session["AVALON_PROJECT"],
- "asset_name": legacy_io.Session["AVALON_ASSET"],
- "task_name": legacy_io.Session["AVALON_TASK"]
- }
-
- requests.post(rest_api_url, json=data)
-
-
def _get_task_context_data_for_anatomy(
project_doc, asset_doc, task_name, anatomy=None
):
@@ -800,6 +375,8 @@ def _get_task_context_data_for_anatomy(
dict: With Anatomy context data.
"""
+ from openpype.pipeline.template_data import get_general_template_data
+
if anatomy is None:
from openpype.pipeline import Anatomy
anatomy = Anatomy(project_doc["name"])
@@ -840,7 +417,7 @@ def _get_task_context_data_for_anatomy(
}
}
- system_general_data = get_system_general_anatomy_data()
+ system_general_data = get_general_template_data()
data.update(system_general_data)
return data
diff --git a/openpype/lib/plugin_tools.py b/openpype/lib/plugin_tools.py
index 1e157dfbfd..10fd3940b8 100644
--- a/openpype/lib/plugin_tools.py
+++ b/openpype/lib/plugin_tools.py
@@ -8,7 +8,6 @@ import warnings
import functools
from openpype.client import get_asset_by_id
-from openpype.settings import get_project_settings
log = logging.getLogger(__name__)
@@ -101,8 +100,6 @@ def get_subset_name_with_asset_doc(
is not passed.
dynamic_data (dict): Dynamic data specific for a creator which creates
instance.
- dbcon (AvalonMongoDB): Mongo connection to be able query asset document
- if 'asset_doc' is not passed.
"""
from openpype.pipeline.create import get_subset_name
@@ -202,122 +199,6 @@ def prepare_template_data(fill_pairs):
return fill_data
-@deprecated("openpype.pipeline.publish.lib.filter_pyblish_plugins")
-def filter_pyblish_plugins(plugins):
- """Filter pyblish plugins by presets.
-
- This servers as plugin filter / modifier for pyblish. It will load plugin
- definitions from presets and filter those needed to be excluded.
-
- Args:
- plugins (dict): Dictionary of plugins produced by :mod:`pyblish-base`
- `discover()` method.
-
- Deprecated:
- Function will be removed after release version 3.15.*
- """
-
- from openpype.pipeline.publish.lib import filter_pyblish_plugins
-
- filter_pyblish_plugins(plugins)
-
-
-@deprecated
-def set_plugin_attributes_from_settings(
- plugins, superclass, host_name=None, project_name=None
-):
- """Change attribute values on Avalon plugins by project settings.
-
- This function should be used only in host context. Modify
- behavior of plugins.
-
- Args:
- plugins (list): Plugins discovered by origin avalon discover method.
- superclass (object): Superclass of plugin type (e.g. Cretor, Loader).
- host_name (str): Name of host for which plugins are loaded and from.
- Value from environment `AVALON_APP` is used if not entered.
- project_name (str): Name of project for which settings will be loaded.
- Value from environment `AVALON_PROJECT` is used if not entered.
-
- Deprecated:
- Function will be removed after release version 3.15.*
- """
-
- # Function is not used anymore
- from openpype.pipeline import LegacyCreator, LoaderPlugin
-
- # determine host application to use for finding presets
- if host_name is None:
- host_name = os.environ.get("AVALON_APP")
-
- if project_name is None:
- project_name = os.environ.get("AVALON_PROJECT")
-
- # map plugin superclass to preset json. Currently supported is load and
- # create (LoaderPlugin and LegacyCreator)
- plugin_type = None
- if superclass is LoaderPlugin or issubclass(superclass, LoaderPlugin):
- plugin_type = "load"
- elif superclass is LegacyCreator or issubclass(superclass, LegacyCreator):
- plugin_type = "create"
-
- if not host_name or not project_name or plugin_type is None:
- msg = "Skipped attributes override from settings."
- if not host_name:
- msg += " Host name is not defined."
-
- if not project_name:
- msg += " Project name is not defined."
-
- if plugin_type is None:
- msg += " Plugin type is unsupported for class {}.".format(
- superclass.__name__
- )
-
- print(msg)
- return
-
- print(">>> Finding presets for {}:{} ...".format(host_name, plugin_type))
-
- project_settings = get_project_settings(project_name)
- plugin_type_settings = (
- project_settings
- .get(host_name, {})
- .get(plugin_type, {})
- )
- global_type_settings = (
- project_settings
- .get("global", {})
- .get(plugin_type, {})
- )
- if not global_type_settings and not plugin_type_settings:
- return
-
- for plugin in plugins:
- plugin_name = plugin.__name__
-
- plugin_settings = None
- # Look for plugin settings in host specific settings
- if plugin_name in plugin_type_settings:
- plugin_settings = plugin_type_settings[plugin_name]
-
- # Look for plugin settings in global settings
- elif plugin_name in global_type_settings:
- plugin_settings = global_type_settings[plugin_name]
-
- if not plugin_settings:
- continue
-
- print(">>> We have preset for {}".format(plugin_name))
- for option, value in plugin_settings.items():
- if option == "enabled" and value is False:
- setattr(plugin, "active", False)
- print(" - is disabled by preset")
- else:
- setattr(plugin, option, value)
- print(" - setting `{}`: `{}`".format(option, value))
-
-
def source_hash(filepath, *args):
"""Generate simple identifier for a source file.
This is used to identify whether a source file has previously been
diff --git a/openpype/modules/deadline/repository/custom/plugins/GlobalJobPreLoad.py b/openpype/modules/deadline/repository/custom/plugins/GlobalJobPreLoad.py
index b0560ce1e8..e4fc64269a 100644
--- a/openpype/modules/deadline/repository/custom/plugins/GlobalJobPreLoad.py
+++ b/openpype/modules/deadline/repository/custom/plugins/GlobalJobPreLoad.py
@@ -201,19 +201,21 @@ def get_openpype_versions(dir_list):
print(">>> Getting OpenPype executable ...")
openpype_versions = []
- install_dir = DirectoryUtils.SearchDirectoryList(dir_list)
- if install_dir:
- print("--- Looking for OpenPype at: {}".format(install_dir))
- sub_dirs = [
- f.path for f in os.scandir(install_dir)
- if f.is_dir()
- ]
- for subdir in sub_dirs:
- version = get_openpype_version_from_path(subdir)
- if not version:
- continue
- print(" - found: {} - {}".format(version, subdir))
- openpype_versions.append((version, subdir))
+ # special case of multiple install dirs
+ for dir_list in dir_list.split(","):
+ install_dir = DirectoryUtils.SearchDirectoryList(dir_list)
+ if install_dir:
+ print("--- Looking for OpenPype at: {}".format(install_dir))
+ sub_dirs = [
+ f.path for f in os.scandir(install_dir)
+ if f.is_dir()
+ ]
+ for subdir in sub_dirs:
+ version = get_openpype_version_from_path(subdir)
+ if not version:
+ continue
+ print(" - found: {} - {}".format(version, subdir))
+ openpype_versions.append((version, subdir))
return openpype_versions
diff --git a/openpype/modules/deadline/repository/custom/plugins/OpenPype/OpenPype.py b/openpype/modules/deadline/repository/custom/plugins/OpenPype/OpenPype.py
index ab4a3d5e9b..6e1b973fb9 100644
--- a/openpype/modules/deadline/repository/custom/plugins/OpenPype/OpenPype.py
+++ b/openpype/modules/deadline/repository/custom/plugins/OpenPype/OpenPype.py
@@ -107,20 +107,23 @@ class OpenPypeDeadlinePlugin(DeadlinePlugin):
"Scanning for compatible requested "
f"version {requested_version}"))
dir_list = self.GetConfigEntry("OpenPypeInstallationDirs")
+
# clean '\ ' for MacOS pasting
if platform.system().lower() == "darwin":
dir_list = dir_list.replace("\\ ", " ")
- install_dir = DirectoryUtils.SearchDirectoryList(dir_list)
- if install_dir:
- sub_dirs = [
- f.path for f in os.scandir(install_dir)
- if f.is_dir()
- ]
- for subdir in sub_dirs:
- version = self.get_openpype_version_from_path(subdir)
- if not version:
- continue
- openpype_versions.append((version, subdir))
+
+ for dir_list in dir_list.split(","):
+ install_dir = DirectoryUtils.SearchDirectoryList(dir_list)
+ if install_dir:
+ sub_dirs = [
+ f.path for f in os.scandir(install_dir)
+ if f.is_dir()
+ ]
+ for subdir in sub_dirs:
+ version = self.get_openpype_version_from_path(subdir)
+ if not version:
+ continue
+ openpype_versions.append((version, subdir))
exe_list = self.GetConfigEntry("OpenPypeExecutable")
# clean '\ ' for MacOS pasting
diff --git a/openpype/modules/ftrack/plugins/publish/integrate_ftrack_instances.py b/openpype/modules/ftrack/plugins/publish/integrate_ftrack_instances.py
index 2d06e2ab02..75f43cb22f 100644
--- a/openpype/modules/ftrack/plugins/publish/integrate_ftrack_instances.py
+++ b/openpype/modules/ftrack/plugins/publish/integrate_ftrack_instances.py
@@ -3,6 +3,7 @@ import json
import copy
import pyblish.api
+from openpype.pipeline.publish import get_publish_repre_path
from openpype.lib.openpype_version import get_openpype_version
from openpype.lib.transcoding import (
get_ffprobe_streams,
@@ -55,6 +56,7 @@ class IntegrateFtrackInstance(pyblish.api.InstancePlugin):
"reference": "reference"
}
keep_first_subset_name_for_review = True
+ upload_reviewable_with_origin_name = False
asset_versions_status_profiles = []
additional_metadata_keys = []
@@ -153,7 +155,7 @@ class IntegrateFtrackInstance(pyblish.api.InstancePlugin):
if not review_representations or has_movie_review:
for repre in thumbnail_representations:
- repre_path = self._get_repre_path(instance, repre, False)
+ repre_path = get_publish_repre_path(instance, repre, False)
if not repre_path:
self.log.warning(
"Published path is not set and source was removed."
@@ -210,7 +212,7 @@ class IntegrateFtrackInstance(pyblish.api.InstancePlugin):
"from {}".format(repre))
continue
- repre_path = self._get_repre_path(instance, repre, False)
+ repre_path = get_publish_repre_path(instance, repre, False)
if not repre_path:
self.log.warning(
"Published path is not set and source was removed."
@@ -293,6 +295,13 @@ class IntegrateFtrackInstance(pyblish.api.InstancePlugin):
)
# Add item to component list
component_list.append(review_item)
+ if self.upload_reviewable_with_origin_name:
+ origin_name_component = copy.deepcopy(review_item)
+ filename = os.path.basename(repre_path)
+ origin_name_component["component_data"]["name"] = (
+ os.path.splitext(filename)[0]
+ )
+ component_list.append(origin_name_component)
# Duplicate thumbnail component for all not first reviews
if first_thumbnail_component is not None:
@@ -324,7 +333,7 @@ class IntegrateFtrackInstance(pyblish.api.InstancePlugin):
# Add others representations as component
for repre in other_representations:
- published_path = self._get_repre_path(instance, repre, True)
+ published_path = get_publish_repre_path(instance, repre, True)
if not published_path:
continue
# Create copy of base comp item and append it
@@ -364,51 +373,6 @@ class IntegrateFtrackInstance(pyblish.api.InstancePlugin):
def _collect_additional_metadata(self, streams):
pass
- def _get_repre_path(self, instance, repre, only_published):
- """Get representation path that can be used for integration.
-
- When 'only_published' is set to true the validation of path is not
- relevant. In that case we just need what is set in 'published_path'
- as "reference". The reference is not used to get or upload the file but
- for reference where the file was published.
-
- Args:
- instance (pyblish.Instance): Processed instance object. Used
- for source of staging dir if representation does not have
- filled it.
- repre (dict): Representation on instance which could be and
- could not be integrated with main integrator.
- only_published (bool): Care only about published paths and
- ignore if filepath is not existing anymore.
-
- Returns:
- str: Path to representation file.
- None: Path is not filled or does not exists.
- """
-
- published_path = repre.get("published_path")
- if published_path:
- published_path = os.path.normpath(published_path)
- if os.path.exists(published_path):
- return published_path
-
- if only_published:
- return published_path
-
- comp_files = repre["files"]
- if isinstance(comp_files, (tuple, list, set)):
- filename = comp_files[0]
- else:
- filename = comp_files
-
- staging_dir = repre.get("stagingDir")
- if not staging_dir:
- staging_dir = instance.data["stagingDir"]
- src_path = os.path.normpath(os.path.join(staging_dir, filename))
- if os.path.exists(src_path):
- return src_path
- return None
-
def _get_asset_version_status_name(self, instance):
if not self.asset_versions_status_profiles:
return None
diff --git a/openpype/modules/shotgrid/plugins/publish/integrate_shotgrid_publish.py b/openpype/modules/shotgrid/plugins/publish/integrate_shotgrid_publish.py
index cfd2d10fd9..fc15d5515f 100644
--- a/openpype/modules/shotgrid/plugins/publish/integrate_shotgrid_publish.py
+++ b/openpype/modules/shotgrid/plugins/publish/integrate_shotgrid_publish.py
@@ -1,6 +1,8 @@
import os
import pyblish.api
+from openpype.pipeline.publish import get_publish_repre_path
+
class IntegrateShotgridPublish(pyblish.api.InstancePlugin):
"""
@@ -22,7 +24,9 @@ class IntegrateShotgridPublish(pyblish.api.InstancePlugin):
for representation in instance.data.get("representations", []):
- local_path = representation.get("published_path")
+ local_path = get_publish_repre_path(
+ instance, representation, False
+ )
code = os.path.basename(local_path)
if representation.get("tags", []):
diff --git a/openpype/modules/shotgrid/plugins/publish/integrate_shotgrid_version.py b/openpype/modules/shotgrid/plugins/publish/integrate_shotgrid_version.py
index a1b7140e22..adfdca718c 100644
--- a/openpype/modules/shotgrid/plugins/publish/integrate_shotgrid_version.py
+++ b/openpype/modules/shotgrid/plugins/publish/integrate_shotgrid_version.py
@@ -1,6 +1,7 @@
-import os
import pyblish.api
+from openpype.pipeline.publish import get_publish_repre_path
+
class IntegrateShotgridVersion(pyblish.api.InstancePlugin):
"""Integrate Shotgrid Version"""
@@ -41,8 +42,9 @@ class IntegrateShotgridVersion(pyblish.api.InstancePlugin):
data_to_update["sg_status_list"] = status
for representation in instance.data.get("representations", []):
- local_path = representation.get("published_path")
- code = os.path.basename(local_path)
+ local_path = get_publish_repre_path(
+ instance, representation, False
+ )
if "shotgridreview" in representation.get("tags", []):
diff --git a/openpype/modules/slack/plugins/publish/integrate_slack_api.py b/openpype/modules/slack/plugins/publish/integrate_slack_api.py
index 612031efac..4e2557ccc7 100644
--- a/openpype/modules/slack/plugins/publish/integrate_slack_api.py
+++ b/openpype/modules/slack/plugins/publish/integrate_slack_api.py
@@ -8,6 +8,7 @@ from abc import ABCMeta, abstractmethod
import time
from openpype.client import OpenPypeMongoConnection
+from openpype.pipeline.publish import get_publish_repre_path
from openpype.lib.plugin_tools import prepare_template_data
@@ -167,9 +168,8 @@ class IntegrateSlackAPI(pyblish.api.InstancePlugin):
thumbnail_path = None
for repre in instance.data.get("representations", []):
if repre.get('thumbnail') or "thumbnail" in repre.get('tags', []):
- repre_thumbnail_path = (
- repre.get("published_path") or
- os.path.join(repre["stagingDir"], repre["files"])
+ repre_thumbnail_path = get_publish_repre_path(
+ instance, repre, False
)
if os.path.exists(repre_thumbnail_path):
thumbnail_path = repre_thumbnail_path
@@ -184,9 +184,8 @@ class IntegrateSlackAPI(pyblish.api.InstancePlugin):
if (repre.get("review")
or "review" in tags
or "burnin" in tags):
- repre_review_path = (
- repre.get("published_path") or
- os.path.join(repre["stagingDir"], repre["files"])
+ repre_review_path = get_publish_repre_path(
+ instance, repre, False
)
if os.path.exists(repre_review_path):
review_path = repre_review_path
diff --git a/openpype/pipeline/create/context.py b/openpype/pipeline/create/context.py
index bce304ab55..7672c49eb3 100644
--- a/openpype/pipeline/create/context.py
+++ b/openpype/pipeline/create/context.py
@@ -8,7 +8,10 @@ import inspect
from uuid import uuid4
from contextlib import contextmanager
-from openpype.client import get_assets
+import pyblish.logic
+import pyblish.api
+
+from openpype.client import get_assets, get_asset_by_name
from openpype.settings import (
get_system_settings,
get_project_settings
@@ -17,13 +20,11 @@ from openpype.lib.attribute_definitions import (
UnknownDef,
serialize_attr_defs,
deserialize_attr_defs,
+ get_default_values,
)
from openpype.host import IPublishHost
from openpype.pipeline import legacy_io
-from openpype.pipeline.mongodb import (
- AvalonMongoDB,
- session_data_from_environment,
-)
+from openpype.pipeline.plugin_discover import DiscoverResult
from .creator_plugins import (
Creator,
@@ -1338,8 +1339,6 @@ class CreateContext:
Args:
host(ModuleType): Host implementation which handles implementation and
global metadata.
- dbcon(AvalonMongoDB): Connection to mongo with context (at least
- project).
headless(bool): Context is created out of UI (Current not used).
reset(bool): Reset context on initialization.
discover_publish_plugins(bool): Discover publish plugins during reset
@@ -1347,16 +1346,8 @@ class CreateContext:
"""
def __init__(
- self, host, dbcon=None, headless=False, reset=True,
- discover_publish_plugins=True
+ self, host, headless=False, reset=True, discover_publish_plugins=True
):
- # Create conncetion if is not passed
- if dbcon is None:
- session = session_data_from_environment(True)
- dbcon = AvalonMongoDB(session)
- dbcon.install()
-
- self.dbcon = dbcon
self.host = host
# Prepare attribute for logger (Created on demand in `log` property)
@@ -1380,6 +1371,10 @@ class CreateContext:
" Missing methods: {}"
).format(joined_methods))
+ self._current_project_name = None
+ self._current_asset_name = None
+ self._current_task_name = None
+
self._host_is_valid = host_is_valid
# Currently unused variable
self.headless = headless
@@ -1387,6 +1382,8 @@ class CreateContext:
# Instances by their ID
self._instances_by_id = {}
+ self.creator_discover_result = None
+ self.convertor_discover_result = None
# Discovered creators
self.creators = {}
# Prepare categories of creators
@@ -1499,11 +1496,20 @@ class CreateContext:
@property
def host_name(self):
+ if hasattr(self.host, "name"):
+ return self.host.name
return os.environ["AVALON_APP"]
- @property
- def project_name(self):
- return self.dbcon.active_project()
+ def get_current_project_name(self):
+ return self._current_project_name
+
+ def get_current_asset_name(self):
+ return self._current_asset_name
+
+ def get_current_task_name(self):
+ return self._current_task_name
+
+ project_name = property(get_current_project_name)
@property
def log(self):
@@ -1520,7 +1526,7 @@ class CreateContext:
self.reset_preparation()
- self.reset_avalon_context()
+ self.reset_current_context()
self.reset_plugins(discover_publish_plugins)
self.reset_context_data()
@@ -1567,14 +1573,22 @@ class CreateContext:
self._collection_shared_data = None
self.refresh_thumbnails()
- def reset_avalon_context(self):
- """Give ability to reset avalon context.
+ def reset_current_context(self):
+ """Refresh current context.
Reset is based on optional host implementation of `get_current_context`
function or using `legacy_io.Session`.
Some hosts have ability to change context file without using workfiles
- tool but that change is not propagated to
+ tool but that change is not propagated to 'legacy_io.Session'
+ nor 'os.environ'.
+
+ Todos:
+ UI: Current context should be also checked on save - compare
+ initial values vs. current values.
+ Related to UI checks: Current workfile can be also considered
+ as current context information as that's where the metadata
+ are stored. We should store the workfile (if is available) too.
"""
project_name = asset_name = task_name = None
@@ -1592,12 +1606,9 @@ class CreateContext:
if not task_name:
task_name = legacy_io.Session.get("AVALON_TASK")
- if project_name:
- self.dbcon.Session["AVALON_PROJECT"] = project_name
- if asset_name:
- self.dbcon.Session["AVALON_ASSET"] = asset_name
- if task_name:
- self.dbcon.Session["AVALON_TASK"] = task_name
+ self._current_project_name = project_name
+ self._current_asset_name = asset_name
+ self._current_task_name = task_name
def reset_plugins(self, discover_publish_plugins=True):
"""Reload plugins.
@@ -1611,18 +1622,15 @@ class CreateContext:
self._reset_convertor_plugins()
def _reset_publish_plugins(self, discover_publish_plugins):
- import pyblish.logic
-
from openpype.pipeline import OpenPypePyblishPluginMixin
from openpype.pipeline.publish import (
- publish_plugins_discover,
- DiscoverResult
+ publish_plugins_discover
)
# Reset publish plugins
self._attr_plugins_by_family = {}
- discover_result = DiscoverResult()
+ discover_result = DiscoverResult(pyblish.api.Plugin)
plugins_with_defs = []
plugins_by_targets = []
plugins_mismatch_targets = []
@@ -1661,7 +1669,9 @@ class CreateContext:
creators = {}
autocreators = {}
manual_creators = {}
- for creator_class in discover_creator_plugins():
+ report = discover_creator_plugins(return_report=True)
+ self.creator_discover_result = report
+ for creator_class in report.plugins:
if inspect.isabstract(creator_class):
self.log.info(
"Skipping abstract Creator {}".format(str(creator_class))
@@ -1706,7 +1716,9 @@ class CreateContext:
def _reset_convertor_plugins(self):
convertors_plugins = {}
- for convertor_class in discover_convertor_plugins():
+ report = discover_convertor_plugins(return_report=True)
+ self.convertor_discover_result = report
+ for convertor_class in report.plugins:
if inspect.isabstract(convertor_class):
self.log.info(
"Skipping abstract Creator {}".format(str(convertor_class))
@@ -1792,40 +1804,128 @@ class CreateContext:
with self.bulk_instances_collection():
self._bulk_instances_to_process.append(instance)
- def create(self, identifier, *args, **kwargs):
- """Wrapper for creators to trigger created.
+ def _get_creator_in_create(self, identifier):
+ """Creator by identifier with unified error.
- Different types of creators may expect different arguments thus the
- hints for args are blind.
+ Helper method to get creator by identifier with same error when creator
+ is not available.
Args:
- identifier (str): Creator's identifier.
- *args (Tuple[Any]): Arguments for create method.
- **kwargs (Dict[Any, Any]): Keyword argument for create method.
+ identifier (str): Identifier of creator plugin.
+
+ Returns:
+ BaseCreator: Creator found by identifier.
+
+ Raises:
+ CreatorError: When identifier is not known.
"""
- error_message = "Failed to run Creator with identifier \"{}\". {}"
creator = self.creators.get(identifier)
- label = getattr(creator, "label", None)
- failed = False
- add_traceback = False
- exc_info = None
- try:
- # Fake CreatorError (Could be maybe specific exception?)
- if creator is None:
+ # Fake CreatorError (Could be maybe specific exception?)
+ if creator is None:
+ raise CreatorError(
+ "Creator {} was not found".format(identifier)
+ )
+ return creator
+
+ def create(
+ self,
+ creator_identifier,
+ variant,
+ asset_doc=None,
+ task_name=None,
+ pre_create_data=None
+ ):
+ """Trigger create of plugins with standartized arguments.
+
+ Arguments 'asset_doc' and 'task_name' use current context as default
+ values. If only 'task_name' is provided it will be overriden by
+ task name from current context. If 'task_name' is not provided
+ when 'asset_doc' is, it is considered that task name is not specified,
+ which can lead to error if subset name template requires task name.
+
+ Args:
+ creator_identifier (str): Identifier of creator plugin.
+ variant (str): Variant used for subset name.
+ asset_doc (Dict[str, Any]): Asset document which define context of
+ creation (possible context of created instance/s).
+ task_name (str): Name of task to which is context related.
+ pre_create_data (Dict[str, Any]): Pre-create attribute values.
+
+ Returns:
+ Any: Output of triggered creator's 'create' method.
+
+ Raises:
+ CreatorError: If creator was not found or asset is empty.
+ """
+
+ creator = self._get_creator_in_create(creator_identifier)
+
+ project_name = self.project_name
+ if asset_doc is None:
+ asset_name = self.get_current_asset_name()
+ asset_doc = get_asset_by_name(project_name, asset_name)
+ task_name = self.get_current_task_name()
+ if asset_doc is None:
raise CreatorError(
- "Creator {} was not found".format(identifier)
+ "Asset with name {} was not found".format(asset_name)
)
- creator.create(*args, **kwargs)
+ if pre_create_data is None:
+ pre_create_data = {}
+
+ precreate_attr_defs = creator.get_pre_create_attr_defs() or []
+ # Create default values of precreate data
+ _pre_create_data = get_default_values(precreate_attr_defs)
+ # Update passed precreate data to default values
+ # TODO validate types
+ _pre_create_data.update(pre_create_data)
+
+ subset_name = creator.get_subset_name(
+ variant,
+ task_name,
+ asset_doc,
+ project_name,
+ self.host_name
+ )
+ instance_data = {
+ "asset": asset_doc["name"],
+ "task": task_name,
+ "family": creator.family,
+ "variant": variant
+ }
+ return creator.create(
+ subset_name,
+ instance_data,
+ _pre_create_data
+ )
+
+ def _create_with_unified_error(
+ self, identifier, creator, *args, **kwargs
+ ):
+ error_message = "Failed to run Creator with identifier \"{}\". {}"
+
+ label = None
+ add_traceback = False
+ result = None
+ fail_info = None
+ success = False
+
+ try:
+ # Try to get creator and his label
+ if creator is None:
+ creator = self._get_creator_in_create(identifier)
+ label = getattr(creator, "label", label)
+
+ # Run create
+ result = creator.create(*args, **kwargs)
+ success = True
except CreatorError:
- failed = True
exc_info = sys.exc_info()
self.log.warning(error_message.format(identifier, exc_info[1]))
except:
- failed = True
add_traceback = True
exc_info = sys.exc_info()
self.log.warning(
@@ -1833,12 +1933,35 @@ class CreateContext:
exc_info=True
)
- if failed:
- raise CreatorsCreateFailed([
- prepare_failed_creator_operation_info(
- identifier, label, exc_info, add_traceback
- )
- ])
+ if not success:
+ fail_info = prepare_failed_creator_operation_info(
+ identifier, label, exc_info, add_traceback
+ )
+ return result, fail_info
+
+ def create_with_unified_error(self, identifier, *args, **kwargs):
+ """Trigger create but raise only one error if anything fails.
+
+ Added to raise unified exception. Capture any possible issues and
+ reraise it with unified information.
+
+ Args:
+ identifier (str): Identifier of creator.
+ *args (Tuple[Any]): Arguments for create method.
+ **kwargs (Dict[Any, Any]): Keyword argument for create method.
+
+ Raises:
+ CreatorsCreateFailed: When creation fails due to any possible
+ reason. If anything goes wrong this is only possible exception
+ the method should raise.
+ """
+
+ result, fail_info = self._create_with_unified_error(
+ identifier, None, *args, **kwargs
+ )
+ if fail_info is not None:
+ raise CreatorsCreateFailed([fail_info])
+ return result
def _remove_instance(self, instance):
self._instances_by_id.pop(instance.id, None)
@@ -1968,38 +2091,12 @@ class CreateContext:
Reset instances if any autocreator executed properly.
"""
- error_message = "Failed to run AutoCreator with identifier \"{}\". {}"
failed_info = []
for creator in self.sorted_autocreators:
identifier = creator.identifier
- label = creator.label
- failed = False
- add_traceback = False
- try:
- creator.create()
-
- except CreatorError:
- failed = True
- exc_info = sys.exc_info()
- self.log.warning(error_message.format(identifier, exc_info[1]))
-
- # Use bare except because some hosts raise their exceptions that
- # do not inherit from python's `BaseException`
- except:
- failed = True
- add_traceback = True
- exc_info = sys.exc_info()
- self.log.warning(
- error_message.format(identifier, ""),
- exc_info=True
- )
-
- if failed:
- failed_info.append(
- prepare_failed_creator_operation_info(
- identifier, label, exc_info, add_traceback
- )
- )
+ _, fail_info = self._create_with_unified_error(identifier, creator)
+ if fail_info is not None:
+ failed_info.append(fail_info)
if failed_info:
raise CreatorsCreateFailed(failed_info)
diff --git a/openpype/pipeline/create/creator_plugins.py b/openpype/pipeline/create/creator_plugins.py
index 53acb618ed..628245faf2 100644
--- a/openpype/pipeline/create/creator_plugins.py
+++ b/openpype/pipeline/create/creator_plugins.py
@@ -153,6 +153,12 @@ class BaseCreator:
Single object should be used for multiple instances instead of single
instance per one creator object. Do not store temp data or mid-process data
to `self` if it's not Plugin specific.
+
+ Args:
+ project_settings (Dict[str, Any]): Project settings.
+ system_settings (Dict[str, Any]): System settings.
+ create_context (CreateContext): Context which initialized creator.
+ headless (bool): Running in headless mode.
"""
# Label shown in UI
@@ -605,12 +611,12 @@ class AutoCreator(BaseCreator):
pass
-def discover_creator_plugins():
- return discover(BaseCreator)
+def discover_creator_plugins(*args, **kwargs):
+ return discover(BaseCreator, *args, **kwargs)
-def discover_convertor_plugins():
- return discover(SubsetConvertorPlugin)
+def discover_convertor_plugins(*args, **kwargs):
+ return discover(SubsetConvertorPlugin, *args, **kwargs)
def discover_legacy_creator_plugins():
diff --git a/openpype/pipeline/load/utils.py b/openpype/pipeline/load/utils.py
index e30923f922..fefdb8537b 100644
--- a/openpype/pipeline/load/utils.py
+++ b/openpype/pipeline/load/utils.py
@@ -28,7 +28,6 @@ from openpype.lib import (
TemplateUnsolved,
)
from openpype.pipeline import (
- schema,
legacy_io,
Anatomy,
)
@@ -643,7 +642,10 @@ def get_representation_path(representation, root=None, dbcon=None):
def path_from_config():
try:
- version_, subset, asset, project = dbcon.parenthood(representation)
+ project_name = dbcon.active_project()
+ version_, subset, asset, project = get_representation_parents(
+ project_name, representation
+ )
except ValueError:
log.debug(
"Representation %s wasn't found in database, "
diff --git a/openpype/pipeline/plugin_discover.py b/openpype/pipeline/plugin_discover.py
index 7edd9ac290..e5257b801a 100644
--- a/openpype/pipeline/plugin_discover.py
+++ b/openpype/pipeline/plugin_discover.py
@@ -135,11 +135,12 @@ class PluginDiscoverContext(object):
allow_duplicates (bool): Validate class name duplications.
ignore_classes (list): List of classes that will be ignored
and not added to result.
+ return_report (bool): Output will be full report if set to 'True'.
Returns:
- DiscoverResult: Object holding succesfully discovered plugins,
- ignored plugins, plugins with missing abstract implementation
- and duplicated plugin.
+ Union[DiscoverResult, list[Any]]: Object holding successfully
+ discovered plugins, ignored plugins, plugins with missing
+ abstract implementation and duplicated plugin.
"""
if not ignore_classes:
@@ -268,9 +269,34 @@ class _GlobalDiscover:
return cls._context
-def discover(superclass, allow_duplicates=True):
+def discover(
+ superclass,
+ allow_duplicates=True,
+ ignore_classes=None,
+ return_report=False
+):
+ """Find and return subclasses of `superclass`
+
+ Args:
+ superclass (type): Class which determines discovered subclasses.
+ allow_duplicates (bool): Validate class name duplications.
+ ignore_classes (list): List of classes that will be ignored
+ and not added to result.
+ return_report (bool): Output will be full report if set to 'True'.
+
+ Returns:
+ Union[DiscoverResult, list[Any]]: Object holding successfully
+ discovered plugins, ignored plugins, plugins with missing
+ abstract implementation and duplicated plugin.
+ """
+
context = _GlobalDiscover.get_context()
- return context.discover(superclass, allow_duplicates)
+ return context.discover(
+ superclass,
+ allow_duplicates,
+ ignore_classes,
+ return_report
+ )
def get_last_discovered_plugins(superclass):
diff --git a/openpype/pipeline/publish/__init__.py b/openpype/pipeline/publish/__init__.py
index dc6fc0f97a..05ba1c9c33 100644
--- a/openpype/pipeline/publish/__init__.py
+++ b/openpype/pipeline/publish/__init__.py
@@ -25,7 +25,6 @@ from .publish_plugins import (
from .lib import (
get_publish_template_name,
- DiscoverResult,
publish_plugins_discover,
load_help_content_from_plugin,
load_help_content_from_filepath,
@@ -36,6 +35,7 @@ from .lib import (
filter_instances_for_context_plugin,
context_plugin_should_run,
get_instance_staging_dir,
+ get_publish_repre_path,
)
from .abstract_expected_files import ExpectedFiles
@@ -68,7 +68,6 @@ __all__ = (
"get_publish_template_name",
- "DiscoverResult",
"publish_plugins_discover",
"load_help_content_from_plugin",
"load_help_content_from_filepath",
@@ -79,6 +78,7 @@ __all__ = (
"filter_instances_for_context_plugin",
"context_plugin_should_run",
"get_instance_staging_dir",
+ "get_publish_repre_path",
"ExpectedFiles",
diff --git a/openpype/pipeline/publish/lib.py b/openpype/pipeline/publish/lib.py
index c76671fa39..bbc511fc5a 100644
--- a/openpype/pipeline/publish/lib.py
+++ b/openpype/pipeline/publish/lib.py
@@ -10,11 +10,18 @@ import six
import pyblish.plugin
import pyblish.api
-from openpype.lib import Logger, filter_profiles
+from openpype.lib import (
+ Logger,
+ filter_profiles
+)
from openpype.settings import (
get_project_settings,
get_system_settings,
)
+from openpype.pipeline import (
+ tempdir
+)
+from openpype.pipeline.plugin_discover import DiscoverResult
from .contants import (
DEFAULT_PUBLISH_TEMPLATE,
@@ -196,28 +203,6 @@ def get_publish_template_name(
return template or default_template
-class DiscoverResult:
- """Hold result of publish plugins discovery.
-
- Stores discovered plugins duplicated plugins and file paths which
- crashed on execution of file.
- """
- def __init__(self):
- self.plugins = []
- self.crashed_file_paths = {}
- self.duplicated_plugins = []
-
- def __iter__(self):
- for plugin in self.plugins:
- yield plugin
-
- def __getitem__(self, item):
- return self.plugins[item]
-
- def __setitem__(self, item, value):
- self.plugins[item] = value
-
-
class HelpContent:
def __init__(self, title, description, detail=None):
self.title = title
@@ -285,7 +270,7 @@ def publish_plugins_discover(paths=None):
"""
# The only difference with `pyblish.api.discover`
- result = DiscoverResult()
+ result = DiscoverResult(pyblish.api.Plugin)
plugins = dict()
plugin_names = []
@@ -595,7 +580,7 @@ def context_plugin_should_run(plugin, context):
Args:
plugin (pyblish.api.Plugin): Plugin with filters.
- context (pyblish.api.Context): Pyblish context with insances.
+ context (pyblish.api.Context): Pyblish context with instances.
Returns:
bool: Context plugin should run based on valid instances.
@@ -609,12 +594,21 @@ def context_plugin_should_run(plugin, context):
def get_instance_staging_dir(instance):
"""Unified way how staging dir is stored and created on instances.
- First check if 'stagingDir' is already set in instance data. If there is
- not create new in tempdir.
+ First check if 'stagingDir' is already set in instance data.
+ In case there already is new tempdir will not be created.
+
+ It also supports `OPENPYPE_TMPDIR`, so studio can define own temp
+ shared repository per project or even per more granular context.
+ Template formatting is supported also with optional keys. Folder is
+ created in case it doesn't exists.
+
+ Available anatomy formatting keys:
+ - root[work | ]
+ - project[name | code]
Note:
- Staging dir does not have to be necessarily in tempdir so be carefull
- about it's usage.
+ Staging dir does not have to be necessarily in tempdir so be careful
+ about its usage.
Args:
instance (pyblish.lib.Instance): Instance for which we want to get
@@ -623,12 +617,73 @@ def get_instance_staging_dir(instance):
Returns:
str: Path to staging dir of instance.
"""
+ staging_dir = instance.data.get('stagingDir')
+ if staging_dir:
+ return staging_dir
- staging_dir = instance.data.get("stagingDir")
- if not staging_dir:
+ anatomy = instance.context.data.get("anatomy")
+
+ # get customized tempdir path from `OPENPYPE_TMPDIR` env var
+ custom_temp_dir = tempdir.create_custom_tempdir(
+ anatomy.project_name, anatomy)
+
+ if custom_temp_dir:
+ staging_dir = os.path.normpath(
+ tempfile.mkdtemp(
+ prefix="pyblish_tmp_",
+ dir=custom_temp_dir
+ )
+ )
+ else:
staging_dir = os.path.normpath(
tempfile.mkdtemp(prefix="pyblish_tmp_")
)
- instance.data["stagingDir"] = staging_dir
+ instance.data['stagingDir'] = staging_dir
return staging_dir
+
+
+def get_publish_repre_path(instance, repre, only_published=False):
+ """Get representation path that can be used for integration.
+
+ When 'only_published' is set to true the validation of path is not
+ relevant. In that case we just need what is set in 'published_path'
+ as "reference". The reference is not used to get or upload the file but
+ for reference where the file was published.
+
+ Args:
+ instance (pyblish.Instance): Processed instance object. Used
+ for source of staging dir if representation does not have
+ filled it.
+ repre (dict): Representation on instance which could be and
+ could not be integrated with main integrator.
+ only_published (bool): Care only about published paths and
+ ignore if filepath is not existing anymore.
+
+ Returns:
+ str: Path to representation file.
+ None: Path is not filled or does not exists.
+ """
+
+ published_path = repre.get("published_path")
+ if published_path:
+ published_path = os.path.normpath(published_path)
+ if os.path.exists(published_path):
+ return published_path
+
+ if only_published:
+ return published_path
+
+ comp_files = repre["files"]
+ if isinstance(comp_files, (tuple, list, set)):
+ filename = comp_files[0]
+ else:
+ filename = comp_files
+
+ staging_dir = repre.get("stagingDir")
+ if not staging_dir:
+ staging_dir = get_instance_staging_dir(instance)
+ src_path = os.path.normpath(os.path.join(staging_dir, filename))
+ if os.path.exists(src_path):
+ return src_path
+ return None
diff --git a/openpype/pipeline/tempdir.py b/openpype/pipeline/tempdir.py
new file mode 100644
index 0000000000..55a1346b08
--- /dev/null
+++ b/openpype/pipeline/tempdir.py
@@ -0,0 +1,59 @@
+"""
+Temporary folder operations
+"""
+
+import os
+from openpype.lib import StringTemplate
+from openpype.pipeline import Anatomy
+
+
+def create_custom_tempdir(project_name, anatomy=None):
+ """ Create custom tempdir
+
+ Template path formatting is supporting:
+ - optional key formatting
+ - available keys:
+ - root[work | ]
+ - project[name | code]
+
+ Args:
+ project_name (str): project name
+ anatomy (openpype.pipeline.Anatomy)[optional]: Anatomy object
+
+ Returns:
+ str | None: formatted path or None
+ """
+ openpype_tempdir = os.getenv("OPENPYPE_TMPDIR")
+ if not openpype_tempdir:
+ return
+
+ custom_tempdir = None
+ if "{" in openpype_tempdir:
+ if anatomy is None:
+ anatomy = Anatomy(project_name)
+ # create base formate data
+ data = {
+ "root": anatomy.roots,
+ "project": {
+ "name": anatomy.project_name,
+ "code": anatomy.project_code,
+ }
+ }
+ # path is anatomy template
+ custom_tempdir = StringTemplate.format_template(
+ openpype_tempdir, data).normalized()
+
+ else:
+ # path is absolute
+ custom_tempdir = openpype_tempdir
+
+ # create the dir path if it doesn't exists
+ if not os.path.exists(custom_tempdir):
+ try:
+ # create it if it doesn't exists
+ os.makedirs(custom_tempdir)
+ except IOError as error:
+ raise IOError(
+ "Path couldn't be created: {}".format(error))
+
+ return custom_tempdir
diff --git a/openpype/plugins/publish/collect_from_create_context.py b/openpype/plugins/publish/collect_from_create_context.py
index d3398c885e..5fcf8feb56 100644
--- a/openpype/plugins/publish/collect_from_create_context.py
+++ b/openpype/plugins/publish/collect_from_create_context.py
@@ -32,7 +32,7 @@ class CollectFromCreateContext(pyblish.api.ContextPlugin):
thumbnail_paths_by_instance_id.get(None)
)
- project_name = create_context.project_name
+ project_name = create_context.get_current_project_name()
if project_name:
context.data["projectName"] = project_name
@@ -53,11 +53,15 @@ class CollectFromCreateContext(pyblish.api.ContextPlugin):
context.data.update(create_context.context_data_to_store())
context.data["newPublishing"] = True
# Update context data
- for key in ("AVALON_PROJECT", "AVALON_ASSET", "AVALON_TASK"):
- value = create_context.dbcon.Session.get(key)
- if value is not None:
- legacy_io.Session[key] = value
- os.environ[key] = value
+ asset_name = create_context.get_current_asset_name()
+ task_name = create_context.get_current_task_name()
+ for key, value in (
+ ("AVALON_PROJECT", project_name),
+ ("AVALON_ASSET", asset_name),
+ ("AVALON_TASK", task_name)
+ ):
+ legacy_io.Session[key] = value
+ os.environ[key] = value
def create_instance(
self,
diff --git a/openpype/settings/defaults/project_settings/ftrack.json b/openpype/settings/defaults/project_settings/ftrack.json
index cdf861df4a..f3f2345a0f 100644
--- a/openpype/settings/defaults/project_settings/ftrack.json
+++ b/openpype/settings/defaults/project_settings/ftrack.json
@@ -488,7 +488,8 @@
},
"keep_first_subset_name_for_review": true,
"asset_versions_status_profiles": [],
- "additional_metadata_keys": []
+ "additional_metadata_keys": [],
+ "upload_reviewable_with_origin_name": false
},
"IntegrateFtrackFarmStatus": {
"farm_status_profiles": []
diff --git a/openpype/settings/defaults/project_settings/nuke.json b/openpype/settings/defaults/project_settings/nuke.json
index cd8ea02272..2999d1427d 100644
--- a/openpype/settings/defaults/project_settings/nuke.json
+++ b/openpype/settings/defaults/project_settings/nuke.json
@@ -246,6 +246,7 @@
"sourcetype": "python",
"title": "Gizmo Note",
"command": "nuke.nodes.StickyNote(label='You can create your own toolbar menu in the Nuke GizmoMenu of OpenPype')",
+ "icon": "",
"shortcut": ""
}
]
diff --git a/openpype/settings/entities/schemas/projects_schema/schema_project_ftrack.json b/openpype/settings/entities/schemas/projects_schema/schema_project_ftrack.json
index da414cc961..7050721742 100644
--- a/openpype/settings/entities/schemas/projects_schema/schema_project_ftrack.json
+++ b/openpype/settings/entities/schemas/projects_schema/schema_project_ftrack.json
@@ -1037,6 +1037,21 @@
{"fps": "FPS"},
{"code": "Codec"}
]
+ },
+ {
+ "type": "separator"
+ },
+ {
+ "type": "boolean",
+ "key": "upload_reviewable_with_origin_name",
+ "label": "Upload reviewable with origin name"
+ },
+ {
+ "type": "label",
+ "label": "Note: Reviewable will be uploaded twice into ftrack when enabled. One with original name and second with required 'ftrackreview-mp4'. That may cause dramatic increase of ftrack storage usage."
+ },
+ {
+ "type": "separator"
}
]
},
diff --git a/openpype/settings/entities/schemas/projects_schema/schema_project_nuke.json b/openpype/settings/entities/schemas/projects_schema/schema_project_nuke.json
index b1a8cc1812..26c64e6219 100644
--- a/openpype/settings/entities/schemas/projects_schema/schema_project_nuke.json
+++ b/openpype/settings/entities/schemas/projects_schema/schema_project_nuke.json
@@ -17,6 +17,11 @@
"key": "menu",
"label": "OpenPype Menu shortcuts",
"children": [
+ {
+ "type": "text",
+ "key": "create",
+ "label": "Create..."
+ },
{
"type": "text",
"key": "publish",
@@ -288,4 +293,4 @@
"name": "schema_publish_gui_filter"
}
]
-}
\ No newline at end of file
+}
diff --git a/openpype/tests/test_lib_restructuralization.py b/openpype/tests/test_lib_restructuralization.py
index c8952e5a1c..669706d470 100644
--- a/openpype/tests/test_lib_restructuralization.py
+++ b/openpype/tests/test_lib_restructuralization.py
@@ -5,11 +5,9 @@
def test_backward_compatibility(printer):
printer("Test if imports still work")
try:
- from openpype.lib import filter_pyblish_plugins
from openpype.lib import execute_hook
from openpype.lib import PypeHook
- from openpype.lib import get_latest_version
from openpype.lib import ApplicationLaunchFailed
from openpype.lib import get_ffmpeg_tool_path
@@ -18,10 +16,6 @@ def test_backward_compatibility(printer):
from openpype.lib import get_version_from_path
from openpype.lib import version_up
- from openpype.lib import is_latest
- from openpype.lib import any_outdated
- from openpype.lib import get_asset
- from openpype.lib import get_linked_assets
from openpype.lib import get_ffprobe_streams
from openpype.hosts.fusion.lib import switch_item
diff --git a/openpype/tests/test_pyblish_filter.py b/openpype/tests/test_pyblish_filter.py
index ea23da26e4..b74784145f 100644
--- a/openpype/tests/test_pyblish_filter.py
+++ b/openpype/tests/test_pyblish_filter.py
@@ -1,9 +1,9 @@
-from . import lib
+import os
import pyblish.api
import pyblish.util
import pyblish.plugin
-from openpype.lib import filter_pyblish_plugins
-import os
+from openpype.pipeline.publish.lib import filter_pyblish_plugins
+from . import lib
def test_pyblish_plugin_filter_modifier(printer, monkeypatch):
diff --git a/openpype/tools/publisher/control.py b/openpype/tools/publisher/control.py
index 435db5fcb3..023a20ca5e 100644
--- a/openpype/tools/publisher/control.py
+++ b/openpype/tools/publisher/control.py
@@ -169,6 +169,8 @@ class PublishReport:
def __init__(self, controller):
self.controller = controller
+ self._create_discover_result = None
+ self._convert_discover_result = None
self._publish_discover_result = None
self._plugin_data = []
self._plugin_data_with_plugin = []
@@ -181,6 +183,10 @@ class PublishReport:
def reset(self, context, create_context):
"""Reset report and clear all data."""
+ self._create_discover_result = create_context.creator_discover_result
+ self._convert_discover_result = (
+ create_context.convertor_discover_result
+ )
self._publish_discover_result = create_context.publish_discover_result
self._plugin_data = []
self._plugin_data_with_plugin = []
@@ -293,9 +299,19 @@ class PublishReport:
if plugin not in self._stored_plugins:
plugins_data.append(self._create_plugin_data_item(plugin))
- crashed_file_paths = {}
+ reports = []
+ if self._create_discover_result is not None:
+ reports.append(self._create_discover_result)
+
+ if self._convert_discover_result is not None:
+ reports.append(self._convert_discover_result)
+
if self._publish_discover_result is not None:
- items = self._publish_discover_result.crashed_file_paths.items()
+ reports.append(self._publish_discover_result)
+
+ crashed_file_paths = {}
+ for report in reports:
+ items = report.crashed_file_paths.items()
for filepath, exc_info in items:
crashed_file_paths[filepath] = "".join(
traceback.format_exception(*exc_info)
@@ -1573,20 +1589,19 @@ class PublisherController(BasePublisherController):
Handle both creation and publishing parts.
Args:
- dbcon (AvalonMongoDB): Connection to mongo with context.
headless (bool): Headless publishing. ATM not implemented or used.
"""
_log = None
- def __init__(self, dbcon=None, headless=False):
+ def __init__(self, headless=False):
super(PublisherController, self).__init__()
self._host = registered_host()
self._headless = headless
self._create_context = CreateContext(
- self._host, dbcon, headless=headless, reset=False
+ self._host, headless=headless, reset=False
)
self._publish_plugins_proxy = None
@@ -1740,7 +1755,7 @@ class PublisherController(BasePublisherController):
self._create_context.reset_preparation()
# Reset avalon context
- self._create_context.reset_avalon_context()
+ self._create_context.reset_current_context()
self._asset_docs_cache.reset()
@@ -2004,9 +2019,10 @@ class PublisherController(BasePublisherController):
success = True
try:
- self._create_context.create(
+ self._create_context.create_with_unified_error(
creator_identifier, subset_name, instance_data, options
)
+
except CreatorsOperationFailed as exc:
success = False
self._emit_event(
diff --git a/openpype/tools/publisher/widgets/create_widget.py b/openpype/tools/publisher/widgets/create_widget.py
index dbf075c216..ef9c5b98fe 100644
--- a/openpype/tools/publisher/widgets/create_widget.py
+++ b/openpype/tools/publisher/widgets/create_widget.py
@@ -457,13 +457,14 @@ class CreateWidget(QtWidgets.QWidget):
# TODO add details about creator
new_creators.add(identifier)
if identifier in existing_items:
+ is_new = False
item = existing_items[identifier]
else:
+ is_new = True
item = QtGui.QStandardItem()
item.setFlags(
QtCore.Qt.ItemIsEnabled | QtCore.Qt.ItemIsSelectable
)
- self._creators_model.appendRow(item)
item.setData(creator_item.label, QtCore.Qt.DisplayRole)
item.setData(creator_item.show_order, CREATOR_SORT_ROLE)
@@ -473,6 +474,8 @@ class CreateWidget(QtWidgets.QWidget):
CREATOR_THUMBNAIL_ENABLED_ROLE
)
item.setData(creator_item.family, FAMILY_ROLE)
+ if is_new:
+ self._creators_model.appendRow(item)
# Remove families that are no more available
for identifier in (old_creators - new_creators):
diff --git a/openpype/tools/publisher/widgets/widgets.py b/openpype/tools/publisher/widgets/widgets.py
index 587bcb059d..8da3886419 100644
--- a/openpype/tools/publisher/widgets/widgets.py
+++ b/openpype/tools/publisher/widgets/widgets.py
@@ -250,21 +250,25 @@ class PublishReportBtn(PublishIconBtn):
self._actions = []
def add_action(self, label, identifier):
- action = QtWidgets.QAction(label)
- action.setData(identifier)
- action.triggered.connect(
- functools.partial(self._on_action_trigger, action)
+ self._actions.append(
+ (label, identifier)
)
- self._actions.append(action)
- def _on_action_trigger(self, action):
- identifier = action.data()
+ def _on_action_trigger(self, identifier):
self.triggered.emit(identifier)
def mouseReleaseEvent(self, event):
super(PublishReportBtn, self).mouseReleaseEvent(event)
menu = QtWidgets.QMenu(self)
- menu.addActions(self._actions)
+ actions = []
+ for item in self._actions:
+ label, identifier = item
+ action = QtWidgets.QAction(label, menu)
+ action.triggered.connect(
+ functools.partial(self._on_action_trigger, identifier)
+ )
+ actions.append(action)
+ menu.addActions(actions)
menu.exec_(event.globalPos())
diff --git a/openpype/tools/publisher/window.py b/openpype/tools/publisher/window.py
index 097e289f32..6f7ffdb8ea 100644
--- a/openpype/tools/publisher/window.py
+++ b/openpype/tools/publisher/window.py
@@ -366,7 +366,7 @@ class PublisherWindow(QtWidgets.QDialog):
def make_sure_is_visible(self):
if self._window_is_visible:
- self.setWindowState(QtCore.Qt.ActiveWindow)
+ self.setWindowState(QtCore.Qt.WindowActive)
else:
self.show()
@@ -566,24 +566,24 @@ class PublisherWindow(QtWidgets.QDialog):
def _go_to_publish_tab(self):
self._set_current_tab("publish")
- def _go_to_details_tab(self):
- self._set_current_tab("details")
-
def _go_to_report_tab(self):
self._set_current_tab("report")
+ def _go_to_details_tab(self):
+ self._set_current_tab("details")
+
def _is_on_create_tab(self):
return self._is_current_tab("create")
def _is_on_publish_tab(self):
return self._is_current_tab("publish")
- def _is_on_details_tab(self):
- return self._is_current_tab("details")
-
def _is_on_report_tab(self):
return self._is_current_tab("report")
+ def _is_on_details_tab(self):
+ return self._is_current_tab("details")
+
def _set_publish_overlay_visibility(self, visible):
if visible:
widget = self._publish_overlay
@@ -647,16 +647,10 @@ class PublisherWindow(QtWidgets.QDialog):
# otherwise 'create' is used
# - this happens only on first show
if first_reset:
- if self._overview_widget.has_items():
- self._go_to_publish_tab()
- else:
- self._go_to_create_tab()
+ self._go_to_create_tab()
- elif (
- not self._is_on_create_tab()
- and not self._is_on_publish_tab()
- ):
- # If current tab is not 'Create' or 'Publish' go to 'Publish'
+ elif self._is_on_report_tab():
+ # Go to 'Publish' tab if is on 'Details' tab
# - this can happen when publishing started and was reset
# at that moment it doesn't make sense to stay at publish
# specific tabs.
diff --git a/openpype/tools/workfiles/files_widget.py b/openpype/tools/workfiles/files_widget.py
index 765d32b3d5..18be746d49 100644
--- a/openpype/tools/workfiles/files_widget.py
+++ b/openpype/tools/workfiles/files_widget.py
@@ -621,7 +621,7 @@ class FilesWidget(QtWidgets.QWidget):
"caption": "Work Files",
"filter": ext_filter
}
- if qtpy.API in ("pyside", "pyside2"):
+ if qtpy.API in ("pyside", "pyside2", "pyside6"):
kwargs["dir"] = self._workfiles_root
else:
kwargs["directory"] = self._workfiles_root
diff --git a/openpype/version.py b/openpype/version.py
index 8dfd638414..72d6b64c60 100644
--- a/openpype/version.py
+++ b/openpype/version.py
@@ -1,3 +1,3 @@
# -*- coding: utf-8 -*-
"""Package declaring Pype version."""
-__version__ = "3.15.1-nightly.5"
+__version__ = "3.15.1"
diff --git a/pyproject.toml b/pyproject.toml
index a872ed3609..d1d5c8e2d3 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -1,6 +1,6 @@
[tool.poetry]
name = "OpenPype"
-version = "3.15.0" # OpenPype
+version = "3.15.1" # OpenPype
description = "Open VFX and Animation pipeline with support."
authors = ["OpenPype Team "]
license = "MIT License"
@@ -114,15 +114,15 @@ build-backend = "poetry.core.masonry.api"
# https://pip.pypa.io/en/stable/cli/pip_install/#requirement-specifiers
[openpype.qtbinding.windows]
package = "PySide2"
-version = "5.15.2"
+version = "3.15.1"
[openpype.qtbinding.darwin]
package = "PySide6"
-version = "6.4.1"
+version = "3.15.1"
[openpype.qtbinding.linux]
package = "PySide2"
-version = "5.15.2"
+version = "3.15.1"
# TODO: we will need to handle different linux flavours here and
# also different macos versions too.
diff --git a/website/docs/admin_environment.md b/website/docs/admin_environment.md
new file mode 100644
index 0000000000..1eb755b90b
--- /dev/null
+++ b/website/docs/admin_environment.md
@@ -0,0 +1,30 @@
+---
+id: admin_environment
+title: Environment
+sidebar_label: Environment
+---
+
+import Tabs from '@theme/Tabs';
+import TabItem from '@theme/TabItem';
+
+## OPENPYPE_TMPDIR:
+ - Custom staging dir directory
+ - Supports anatomy keys formatting. ex `{root[work]}/{project[name]}/temp`
+ - supported formatting keys:
+ - root[work]
+ - project[name | code]
+
+## OPENPYPE_DEBUG
+ - setting logger to debug mode
+ - example value: "1" (to activate)
+
+## OPENPYPE_LOG_LEVEL
+ - stringified numeric value of log level. [Here for more info](https://docs.python.org/3/library/logging.html#logging-levels)
+ - example value: "10"
+
+## OPENPYPE_MONGO
+- If set it takes precedence over the one set in keyring
+- for more details on how to use it go [here](admin_use#check-for-mongodb-database-connection)
+
+## OPENPYPE_USERNAME
+- if set it overides system created username
diff --git a/website/docs/admin_settings_system.md b/website/docs/admin_settings_system.md
index 8aeb281109..d61713ccd5 100644
--- a/website/docs/admin_settings_system.md
+++ b/website/docs/admin_settings_system.md
@@ -13,18 +13,23 @@ Settings applicable to the full studio.

-**`Studio Name`** - Full name of the studio (can be used as variable on some places)
+### Studio Name
+Full name of the studio (can be used as variable on some places)
-**`Studio Code`** - Studio acronym or a short code (can be used as variable on some places)
+### Studio Code
+Studio acronym or a short code (can be used as variable on some places)
-**`Admin Password`** - After setting admin password, normal user won't have access to OpenPype settings
+### Admin Password
+After setting admin password, normal user won't have access to OpenPype settings
and Project Manager GUI. Please keep in mind that this is a studio wide password and it is meant purely
as a simple barrier to prevent artists from accidental setting changes.
-**`Environment`** - Globally applied environment variables that will be appended to any OpenPype process in the studio.
+### Environment
+Globally applied environment variables that will be appended to any OpenPype process in the studio.
-**`Disk mapping`** - Platform dependent configuration for mapping of virtual disk(s) on an artist's OpenPype machines before OP starts up.
-Uses `subst` command, if configured volume character in `Destination` field already exists, no re-mapping is done for that character(volume).
+### Disk mapping
+- Platform dependent configuration for mapping of virtual disk(s) on an artist's OpenPype machines before OP starts up.
+- Uses `subst` command, if configured volume character in `Destination` field already exists, no re-mapping is done for that character(volume).
### FFmpeg and OpenImageIO tools
We bundle FFmpeg tools for all platforms and OpenImageIO tools for Windows and Linux. By default, bundled tools are used, but it is possible to set environment variables `OPENPYPE_FFMPEG_PATHS` and `OPENPYPE_OIIO_PATHS` in system settings environments to look for them in different directory.
@@ -171,4 +176,4 @@ In the image before you can see that we set most of the environment variables in
In this example MTOA will automatically will the `MAYA_VERSION`(which is set by Maya Application environment) and `MTOA_VERSION` into the `MTOA` variable. We then use the `MTOA` to set all the other variables needed for it to function within Maya.

-All of the tools defined in here can then be assigned to projects. You can also change the tools versions on any project level all the way down to individual asset or shot overrides. So if you just need to upgrade you render plugin for a single shot, while not risking the incompatibilities on the rest of the project, it is possible.
\ No newline at end of file
+All the tools defined in here can then be assigned to projects. You can also change the tools versions on any project level all the way down to individual asset or shot overrides. So if you just need to upgrade you render plugin for a single shot, while not risking the incompatibilities on the rest of the project, it is possible.
diff --git a/website/docs/artist_hosts_maya.md b/website/docs/artist_hosts_maya.md
index 14619e52a1..9fab845e62 100644
--- a/website/docs/artist_hosts_maya.md
+++ b/website/docs/artist_hosts_maya.md
@@ -308,6 +308,8 @@ Select its root and Go **OpenPype → Create...** and select **Point Cache**.
After that, publishing will create corresponding **abc** files.
+When creating the instance, a objectset child `proxy` will be created. Meshes in the `proxy` objectset will be the viewport representation where loading supports proxies. Proxy representations are stored as `resources` of the subset.
+
Example setup:

@@ -315,6 +317,7 @@ Example setup:
:::note Publish on farm
If your studio has Deadline configured, artists could choose to offload potentially long running export of pointache and publish it to the farm.
Only thing that is necessary is to toggle `Farm` property in created pointcache instance to True.
+:::
### Loading Point Caches
diff --git a/website/docs/artist_hosts_maya_arnold.md b/website/docs/artist_hosts_maya_arnold.md
new file mode 100644
index 0000000000..b3c02a0894
--- /dev/null
+++ b/website/docs/artist_hosts_maya_arnold.md
@@ -0,0 +1,30 @@
+---
+id: artist_hosts_maya_arnold
+title: Arnold for Maya
+sidebar_label: Arnold
+---
+## Arnold Scene Source (.ass)
+Arnold Scene Source can be published as a single file or a sequence of files, determined by the frame range.
+
+When creating the instance, two objectsets are created; `content` and `proxy`. Meshes in the `proxy` objectset will be the viewport representation when loading as `standin`. Proxy representations are stored as `resources` of the subset.
+
+### Arnold Scene Source Proxy Workflow
+In order to utilize operators and proxies, the content and proxy nodes need to share the same names (including the shape names). This is done by parenting the content and proxy nodes into separate groups. For example:
+
+
+
+## Standin
+Arnold Scene Source `ass` and Alembic `abc` are supported to load as standins.
+
+### Standin Proxy Workflow
+If a subset has a proxy representation, this will be used as display in the viewport. At render time the standin path will be replaced using the recommended string replacement workflow;
+
+https://help.autodesk.com/view/ARNOL/ENU/?guid=arnold_for_maya_operators_am_Updating_procedural_file_paths_with_string_replace_html
+
+Since the content and proxy nodes share the same names and hierarchy, any manually shader assignments will be shared.
+
+
+:::note for advanced users
+You can stop the proxy swapping by disabling the string replacement operator found in the container.
+
+:::
diff --git a/website/docs/assets/maya-arnold_scene_source.png b/website/docs/assets/maya-arnold_scene_source.png
new file mode 100644
index 0000000000..4150b78aac
Binary files /dev/null and b/website/docs/assets/maya-arnold_scene_source.png differ
diff --git a/website/docs/assets/maya-arnold_standin.png b/website/docs/assets/maya-arnold_standin.png
new file mode 100644
index 0000000000..74571a86fa
Binary files /dev/null and b/website/docs/assets/maya-arnold_standin.png differ
diff --git a/website/docs/assets/maya-pointcache_setup.png b/website/docs/assets/maya-pointcache_setup.png
index 8904baa239..b2dc126901 100644
Binary files a/website/docs/assets/maya-pointcache_setup.png and b/website/docs/assets/maya-pointcache_setup.png differ
diff --git a/website/sidebars.js b/website/sidebars.js
index dfc3d827e0..93887e00f6 100644
--- a/website/sidebars.js
+++ b/website/sidebars.js
@@ -44,6 +44,7 @@ module.exports = {
"artist_hosts_maya_multiverse",
"artist_hosts_maya_yeti",
"artist_hosts_maya_xgen",
+ "artist_hosts_maya_arnold",
"artist_hosts_maya_vray",
"artist_hosts_maya_redshift",
],
@@ -86,6 +87,7 @@ module.exports = {
type: "category",
label: "Configuration",
items: [
+ "admin_environment",
"admin_settings",
"admin_settings_system",
"admin_settings_project_anatomy",
diff --git a/website/yarn.lock b/website/yarn.lock
index 0a56928cd9..559c58f931 100644
--- a/website/yarn.lock
+++ b/website/yarn.lock
@@ -7180,9 +7180,9 @@ typedarray-to-buffer@^3.1.5:
is-typedarray "^1.0.0"
ua-parser-js@^0.7.30:
- version "0.7.31"
- resolved "https://registry.yarnpkg.com/ua-parser-js/-/ua-parser-js-0.7.31.tgz#649a656b191dffab4f21d5e053e27ca17cbff5c6"
- integrity sha512-qLK/Xe9E2uzmYI3qLeOmI0tEOt+TBBQyUIAh4aAgU05FVYzeZrKUdkAZfBNVGRaHVgV0TDkdEngJSw/SyQchkQ==
+ version "0.7.33"
+ resolved "https://registry.yarnpkg.com/ua-parser-js/-/ua-parser-js-0.7.33.tgz#1d04acb4ccef9293df6f70f2c3d22f3030d8b532"
+ integrity sha512-s8ax/CeZdK9R/56Sui0WM6y9OFREJarMRHqLB2EwkovemBxNQ+Bqu8GAsUnVcXKgphb++ghr/B2BZx4mahujPw==
unherit@^1.0.4:
version "1.1.3"