diff --git a/.github/ISSUE_TEMPLATE/bug_report.yml b/.github/ISSUE_TEMPLATE/bug_report.yml
index 78bea3d838..fd3455ac76 100644
--- a/.github/ISSUE_TEMPLATE/bug_report.yml
+++ b/.github/ISSUE_TEMPLATE/bug_report.yml
@@ -35,6 +35,35 @@ body:
label: Version
description: What version are you running? Look to OpenPype Tray
options:
+ - 3.18.2-nightly.2
+ - 3.18.2-nightly.1
+ - 3.18.1
+ - 3.18.1-nightly.1
+ - 3.18.0
+ - 3.17.7
+ - 3.17.7-nightly.7
+ - 3.17.7-nightly.6
+ - 3.17.7-nightly.5
+ - 3.17.7-nightly.4
+ - 3.17.7-nightly.3
+ - 3.17.7-nightly.2
+ - 3.17.7-nightly.1
+ - 3.17.6
+ - 3.17.6-nightly.3
+ - 3.17.6-nightly.2
+ - 3.17.6-nightly.1
+ - 3.17.5
+ - 3.17.5-nightly.3
+ - 3.17.5-nightly.2
+ - 3.17.5-nightly.1
+ - 3.17.4
+ - 3.17.4-nightly.2
+ - 3.17.4-nightly.1
+ - 3.17.3
+ - 3.17.3-nightly.2
+ - 3.17.3-nightly.1
+ - 3.17.2
+ - 3.17.2-nightly.4
- 3.17.2-nightly.3
- 3.17.2-nightly.2
- 3.17.2-nightly.1
@@ -106,35 +135,6 @@ body:
- 3.15.4
- 3.15.4-nightly.3
- 3.15.4-nightly.2
- - 3.15.4-nightly.1
- - 3.15.3
- - 3.15.3-nightly.4
- - 3.15.3-nightly.3
- - 3.15.3-nightly.2
- - 3.15.3-nightly.1
- - 3.15.2
- - 3.15.2-nightly.6
- - 3.15.2-nightly.5
- - 3.15.2-nightly.4
- - 3.15.2-nightly.3
- - 3.15.2-nightly.2
- - 3.15.2-nightly.1
- - 3.15.1
- - 3.15.1-nightly.6
- - 3.15.1-nightly.5
- - 3.15.1-nightly.4
- - 3.15.1-nightly.3
- - 3.15.1-nightly.2
- - 3.15.1-nightly.1
- - 3.15.0
- - 3.15.0-nightly.1
- - 3.14.11-nightly.4
- - 3.14.11-nightly.3
- - 3.14.11-nightly.2
- - 3.14.11-nightly.1
- - 3.14.10
- - 3.14.10-nightly.9
- - 3.14.10-nightly.8
validations:
required: true
- type: dropdown
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 8f14340348..f309d904eb 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,6 +1,3095 @@
# Changelog
+## [3.18.1](https://github.com/ynput/OpenPype/tree/3.18.1)
+
+
+[Full Changelog](https://github.com/ynput/OpenPype/compare/3.18.0...3.18.1)
+
+### **🚀 Enhancements**
+
+
+
+AYON: Update ayon api to 1.0.0-rc.3 #6052
+
+Updated ayon python api to 1.0.0-rc.3.
+
+
+___
+
+
+
+
+
+
+## [3.18.0](https://github.com/ynput/OpenPype/tree/3.18.0)
+
+
+[Full Changelog](https://github.com/ynput/OpenPype/compare/...3.18.0)
+
+### **🐛 Bug fixes**
+
+
+
+Chore: Fix subst paths handling #5702
+
+Make sure that source disk ends with `\` instead of destination disk.
+
+
+___
+
+
+
+
+
+
+## [3.17.7](https://github.com/ynput/OpenPype/tree/3.17.7)
+
+
+[Full Changelog](https://github.com/ynput/OpenPype/compare/3.17.6...3.17.7)
+
+### **🆕 New features**
+
+
+
+AYON: Use folder path as unique identifier #5817
+
+Use folder path instead of asset name as unique identifier, with OpenPype compatibility.
+
+
+___
+
+
+
+
+
+Houdini: Farm caching submission to Deadline #4903
+
+Implements functionality to offload instances of the specific families to be processed on Deadline instead of locally. This increases productivity as artist can use local machine could be used for other tasks.Implemented for families:
+- [x] ass
+- [x] redshift proxy
+- [x] ifd
+- [x] abc
+- [x] bgeo
+- [x] vdb
+
+
+___
+
+
+
+
+
+Houdini: Add support to split Deadline render tasks in export + render #5420
+
+This adds initial support in Houdini so when submitting render jobs to Deadline it's not running as a single Houdini task but rather it gets split in two different tasks: Export + Render. This way it's more efficient as we only need a Houdini license during the export step and the render tasks can run exclusively with a render license. Moreover, we aren't wasting all the overhead time of opening the render scene in Houdini for every frame.I have also added the corresponding settings json files so we can set some of the default values for the Houdini deadline submitter.
+
+
+___
+
+
+
+
+
+Wrap: new integration #5823
+
+These modifications are necessary for adding Wrap integration (DCC handling scans and textures) .
+
+
+___
+
+
+
+
+
+AYON: Prepare for 'data' via graphql #5923
+
+AYON server does support to query 'data' field for hierarchy entities (project > ... > representation) using GraphQl since version 0.5.5. Because of this PR in ayon-python-api it is required to modify custom graphql function in `openpype.client` to support that option.
+
+
+___
+
+
+
+
+
+Chore AYON: AYON addon class #5937
+
+Introduced base class for AYON addon in openpype modules discovery logic.
+
+
+___
+
+
+
+
+
+Asset Usage Reporter Tool #5946
+
+This adds simple tool for OpenPype mode that will go over all published workfiles and print linked assets and their version:This is created per project and can be exported in csv file or copied to clipboard in _"ASCII Human readable form"_.
+
+
+___
+
+
+
+
+
+Testing: dump_databases flag #5955
+
+This introduces a `dump_databases` flag which makes it convenient to output the resulting database of a successful test run. The flag supports two formats; `bson` and `json`.Due to outputting to the test data folder, when dumping the databases, the test data folder will persist.Split from https://github.com/ynput/OpenPype/pull/5644
+
+
+___
+
+
+
+
+
+SiteSync: implemented in Ayon Loader #5962
+
+Implemented `Availability` column in Ayon loader and redo of loaders to `ActionItems` in representation window there.
+
+
+___
+
+
+
+
+
+AYON: Workfile template build works #5975
+
+Modified workfile template builder to work, to some degree, in AYON mode.
+
+
+___
+
+
+
+### **🚀 Enhancements**
+
+
+
+Maya: Small Tweaks on Validator for Look Default Shader Connection for Maya 2024 #5957
+
+Resolve https://github.com/ynput/OpenPype/issues/5269
+
+
+___
+
+
+
+
+
+Settings: Changes in default settings #5983
+
+We've made some changes in the default settings as several application versions were obsolete (Maya 18, Nuke 11, PS 2020, etc). Also added tools and changed settings for Blender, Maya, and Blender.
+
+All should work as usual.
+___
+
+
+
+
+
+Testing: Do not persist data by default in Maya/Deadline. #5987
+
+This is similar to the Maya publishing test.
+
+
+___
+
+
+
+
+
+Max: Validate loaded plugins tweaks #5820
+
+In the current development of 3dsMax, users need to use separate validators to validate if certain plugins being loaded before the extraction. For example, usd extractor in model family, prt/tycache extractor in pointcloud/tycache family.But with the PR where implements optional validate loaded plugin, users just need to put what kind of plugins they want to validate in the settings. They no longer need to go through all the separate plugin validators when publishing, and only one validator would do all the check on the loaded plugins before extraction.
+
+
+___
+
+
+
+
+
+Nuke: Change context label enhancement #5887
+
+Use QAction to change label of context label in Nuke pipeline menu.
+
+
+___
+
+
+
+
+
+Chore: Do not use template data as source for context #5918
+
+Use available information on context to receive context data instead of using `"anatomyData"` during publishing.
+
+
+___
+
+
+
+
+
+Houdini: Add python3.10 libs for Houdini 20 startup #5932
+
+Add python3.10 libs for Houdini 20 startup
+
+
+___
+
+
+
+
+
+General: Use colorspace data when creating thumbnail #5938
+
+Thumbnails with applied colormanagement.
+
+
+___
+
+
+
+
+
+Ftrack: rewriting component creation to support multiple thumbnails #5939
+
+The creation of Ftrack components needs to allow for multiple thumbnails. This is important in situations where there could be several reviewable streams, like in the case of a nuke intermediate files preset. Customers have asked for unique thumbnails for each data stream.For instance, one stream might contain a baked LUT file along with Display and View. Another stream might only include the baked Display and View. These variations can change the overall look. Thus, we found it necessary to depict these differences via thumbnails.
+
+
+___
+
+
+
+
+
+Chore: PySide6 tree view style #5940
+
+Define solid color for background of branch in QTreeView.
+
+
+___
+
+
+
+
+
+Nuke: Explicit Thumbnail workflow #5941
+
+Nuke made a shift from using its own plugin to a global one for thumbnail creation. This was because it had to handle several thumbnail workflows for baking intermediate data streams. To manage this, the global plugin had to be upgraded. Now, each baking stream can set a unique tag 'need_thumbnail'. This tag is used to mark representations that need a thumbnail.
+
+
+___
+
+
+
+
+
+Global: extract thumbnail with new settings #5944
+
+Settings are now configurable for the following:
+- target size of thumbnail - source or constrained to specific
+- where should be frame taken from in sequence or video file
+- if thumbnail should be integrated or not
+- background color for letter boxes
+- added AYON settings
+
+
+___
+
+
+
+
+
+RoyalRender: inject submitter environment to the royal render job #5958
+
+This is an attempt to solve runtime environment injection for render jobs in RoyalRender as there is no easy way to implement something like `GlobalJobPreload` logic in Deadline. Idea is to inject OpenPype environments directly to the job itself.
+
+
+___
+
+
+
+
+
+General: Use manual thumbnail if present when publishing #5969
+
+Use manual thumbnail added to the publisher instead of using it from published representation.
+
+
+___
+
+
+
+
+
+AYON: Change of server url should work as expected #5971
+
+Using login action in tray menu to change server url should correctly start new process without issues of missing bundle or previous url.
+
+
+___
+
+
+
+
+
+AYON: make sure the AYON menu bar in 3dsMax is named AYON when AYON launches #5972
+
+Renaming the menu bar in 3dsMax for AYON and some cosmetic fix in the docstring
+
+
+___
+
+
+
+
+
+Resolve: renaming menu to AYON #5974
+
+Resolve in Ayon is now having aligned name.
+
+
+___
+
+
+
+
+
+Hiero: custom tools menu rename #5976
+
+- OpenPype Tools are now Custom Tools menu
+- fixing order of tools. Create should be first.
+
+
+___
+
+
+
+
+
+nuke: updating name for custom tools menu item #5977
+
+- Ayon variant of settings renamed `Custom Tools` menu item
+
+
+___
+
+
+
+
+
+fusion: AYON renaming menu #5978
+
+Fusion is having Ayon menu.
+
+
+___
+
+
+
+
+
+Blender: Changed the labels for Layout JSON Extractor #5981
+
+Changed the labels for Blender's Layout JSON Extractor.
+
+
+___
+
+
+
+
+
+Testing: Skip Arnold license for test rendering. #5984
+
+Skip license check when rendering for testing.
+
+
+___
+
+
+
+
+
+Testing: Validate errors and failed status from Deadline jobs. #5986
+
+While waiting for the Deadline jobs to finish, we query the errors on the job and its dependent jobs to fail as early as possible. Plus the failed status.
+
+
+___
+
+
+
+
+
+AYON: rename Openpype Tools as Custom Tools in Maya Host #5991
+
+Rename Openpype Tools as Custom Tools in Maya Host in
+
+
+___
+
+
+
+
+
+AYON: Use AYON label in ayon mode #5995
+
+Replaced OpenPype with AYON in AYON mode and added bundle nam to information.
+
+
+___
+
+
+
+
+
+AYON: Update ayon python api #6002
+
+Updated ayon-python-api to '1.0.0-rc.1'.
+
+
+___
+
+
+
+
+
+Max: Add missing repair action in validate resolution setting #6014
+
+Add missing repair action for validate resolution setting
+
+
+___
+
+
+
+
+
+Add the AYON/OP settings to enable extractor for model family in 3dsmax #6027
+
+Add the AYON/OP settings to enable extractor for model family in 3dsmax
+
+
+___
+
+
+
+
+
+Bugfix: Fix error message formatting if ayon executable can't be found by deadline #6028
+
+Without this fix the error message would report executables string with `;` between EACH character, similar to this PR: https://github.com/ynput/OpenPype/pull/5815However that PR apparently missed also fixing it in `GlobalJobPreLoad` and only fixed it in `Ayon.py` plugin.
+
+
+___
+
+
+
+
+
+Show slightly different info in AYON mode #6031
+
+This PR changes what is shown in Tray menu in AYON mode. Previously, it showed version of OpenPype that is very confusing in AYON mode. So this now shows AYON version instead. When clicked, it will opene AYON info window, where OpenPype version is now added, for debugging purposes.
+
+
+___
+
+
+
+
+
+AYON Editorial: Hierarchy context have names as keys #6041
+
+Use folder name as keys in `hierarchyContext` and modify hierachy extraction accordingly.
+
+
+___
+
+
+
+
+
+AYON: Convert the createAt value to local timezone #6043
+
+Show correct create time in UIs.
+
+
+___
+
+
+
+### **🐛 Bug fixes**
+
+
+
+Maya: Render creation - fix broken imports #5893
+
+Maya specific imports were moved to specific methods but not in all cases by #5775. This is just quickly restoring functionality without questioning that decision.
+
+
+___
+
+
+
+
+
+Maya: fix crashing model renderset collector #5929
+
+This fix is handling case where model is in some type of render sets but no other connections are made there. Publishing this model would fail with `RuntimeError: Found no items to list the history for.`
+
+
+___
+
+
+
+
+
+Maya: Remove duplicated attributes of MTOA verbosity level #5945
+
+Remove duplicated attributes implementation mentioned in https://github.com/ynput/OpenPype/pull/5931#discussion_r1402175289
+
+
+___
+
+
+
+
+
+Maya: Bug fix Redshift Proxy not being successfully published #5956
+
+Bug fix redshift proxy family not being successfully published due to the error found in integrate.py
+
+
+___
+
+
+
+
+
+Maya: Bug fix load image for texturesetMain #6011
+
+Bug fix load image with file node for texturesetMain
+
+
+___
+
+
+
+
+
+Maya: bug fix the repair function in validate_rendersettings #6021
+
+The following error has been encountered below:
+```
+// pyblish.pyblish.plugin.Action : Finding failed instances..
+// pyblish.pyblish.plugin.Action : Attempting repair for instance: renderLookdevMain ...
+// Error: pyblish.plugin : Traceback (most recent call last):
+// File "C:\Users\lbate\AppData\Local\Ynput\AYON\dependency_packages\ayon_2310271602_windows.zip\dependencies\pyblish\plugin.py", line 527, in __explicit_process
+// runner(*args)
+// File "C:\Users\lbate\AppData\Local\Ynput\AYON\addons\openpype_3.17.7-nightly.6\openpype\pipeline\publish\publish_plugins.py", line 241, in process
+// plugin.repair(instance)
+// File "C:\Users\lbate\AppData\Local\Ynput\AYON\addons\openpype_3.17.7-nightly.6\openpype\hosts\maya\plugins\publish\validate_rendersettings.py", line 395, in repair
+// cmds.setAttr("{}.{}".format(node, prefix_attr),
+// UnboundLocalError: local variable 'node' referenced before assignment
+// Traceback (most recent call last):
+// File "C:\Users\lbate\AppData\Local\Ynput\AYON\dependency_packages\ayon_2310271602_windows.zip\dependencies\pyblish\plugin.py", line 527, in __explicit_process
+// runner(*args)
+// File "C:\Users\lbate\AppData\Local\Ynput\AYON\addons\openpype_3.17.7-nightly.6\openpype\pipeline\publish\publish_plugins.py", line 241, in process
+// plugin.repair(instance)
+// File "C:\Users\lbate\AppData\Local\Ynput\AYON\addons\openpype_3.17.7-nightly.6\openpype\hosts\maya\plugins\publish\validate_rendersettings.py", line 395, in repair
+// cmds.setAttr("{}.{}".format(node, prefix_attr),
+// UnboundLocalError: local variable 'node' referenced before assignment
+```
+This PR is a fix for that
+
+
+___
+
+
+
+
+
+Fusion: Render avoid unhashable type `BlackmagicFusion.PyRemoteObject` error #5672
+
+Fix Fusion 18.6+ support: Avoid issues with Fusion's `BlackmagicFusion.PyRemoteObject` instances being unhashable.
+```python
+Traceback (most recent call last):
+ File "E:\openpype\OpenPype\.venv\lib\site-packages\pyblish\plugin.py", line 527, in __explicit_process
+ runner(*args)
+ File "E:\openpype\OpenPype\openpype\hosts\fusion\plugins\publish\extract_render_local.py", line 61, in process
+ result = self.render(instance)
+ File "E:\openpype\OpenPype\openpype\hosts\fusion\plugins\publish\extract_render_local.py", line 118, in render
+ with enabled_savers(current_comp, savers_to_render):
+ File "C:\Users\User\AppData\Local\Programs\Python\Python39\lib\contextlib.py", line 119, in __enter__
+ return next(self.gen)
+ File "E:\openpype\OpenPype\openpype\hosts\fusion\plugins\publish\extract_render_local.py", line 33, in enabled_savers
+ original_states[saver] = original_state
+TypeError: unhashable type: 'BlackmagicFusion.PyRemoteObject'
+```
+
+
+
+___
+
+
+
+
+
+Nuke: Validate Nuke Write Nodes refactor to use variable `node_value` instead of `value` #5764
+
+Nuke: Validate Nuke Write Nodes refactor to use variable `node_value` instead of `value`The variable `value` only exists as the last variable value in the `for value in values` loop and might not be declared if `values` is an empty iterable.
+
+
+___
+
+
+
+
+
+resolve: fixing loader handles calculation #5863
+
+Resolve was not correctly calculating duration of database related duration.
+
+
+___
+
+
+
+
+
+Chore: Staging mode determination #5895
+
+Resources use `is_staging_enabled` function instead of `is_running_staging` to determine if should use staging icon. And fixed comparison bug in `is_running_staging`.
+
+
+___
+
+
+
+
+
+AYON: Handle staging templates category #5905
+
+Staging anatomy templates category is handled during project templates conversion. The keys are stored into `others` with `"staging_"` prefix.
+
+
+___
+
+
+
+
+
+Max: fix the subset name not changing accordingly after the variant name changes #5911
+
+Resolve #5902
+
+
+___
+
+
+
+
+
+AYON: Loader tool bugs hunt #5915
+
+Fix issues with invalid representation ids in loaded containers and handle missing product type in server database.
+
+
+___
+
+
+
+
+
+Publisher: Bugfixes and enhancements #5924
+
+Small fixes/enhancements in publisher UI.
+
+
+___
+
+
+
+
+
+Maya: Supports for additional Job Info and Plugin Info in deadline submission #5931
+
+This PR is to resolve some of the attributes such as MTOA's `ArnoldVerbose` are not preserved on farm and users can use the project settings to add the attributes back to either job or plugin Info.
+
+
+___
+
+
+
+
+
+Bugfix: Houdini license validator missing families #5934
+
+Adding missing families to Houdini license validator.
+
+
+___
+
+
+
+
+
+TrayPublisher: adding back `asset_doc` variable #5943
+
+Returning variable which had been removed accidentally in previous PR.
+
+
+___
+
+
+
+
+
+Settings: Fix ModulesManager init args #5947
+
+Remove usage of kwargs to create ModulesManager.
+
+
+___
+
+
+
+
+
+Blender: Fix Deadline Frames per task #5949
+
+Fixed a problem with Frames per task setting not being applied when publishing a render.
+
+
+___
+
+
+
+
+
+Testing: Fix is_test_failed #5951
+
+`is_test_failed` is used (exclusively) on module fixtures to determine whether the tests have failed or not. This determines whether to run tear down code like cleaning up the database and temporary files.But in the module scope `request.node.rep_call` is not available, which results in `is_test_failed` always returning `True`, and no tear down code get executed.The solution was taken from; https://github.com/pytest-dev/pytest/issues/5090
+
+
+___
+
+
+
+
+
+Harmony: Fix local rendering #5953
+
+Local rendering was throwing warning about license, but didn't fail per se. It just didn't produce anything.
+
+
+___
+
+
+
+
+
+Testing: hou module should be within class code. #5954
+
+`hou` module should be within the class code else we'll get pyblish errors from needing to skip the plugin.
+
+
+___
+
+
+
+
+
+Maya: Add Label to MayaUSDReferenceLoader #5964
+
+As the create placeholder dialog displays the two distinct loaders with the same name, this PR is to distinguish Maya USD Reference Loaders from the loaders of which inherited from. See the screenshot below:
+
+
+___
+
+
+
+
+
+Max: Bug fix the resolution not being shown correctly in review burnin #5965
+
+The resolution is not being shown correctly in review burnin
+
+
+___
+
+
+
+
+
+AYON: Fix thumbnail integration #5970
+
+Thumbnail integration could cause crash of server if thumbnail id was changed for the same entity id multiple times. Modified the code to avoid that issue.
+
+
+___
+
+
+
+
+
+Photoshop: Updated label in Settings #5980
+
+Replaced wrong label from different plugin.
+
+
+___
+
+
+
+
+
+Photoshop: Fix removed unsupported Path #5996
+
+Path is not json serializable by default, it is not necessary, better model reused.
+
+
+___
+
+
+
+
+
+AYON: Prepare functions for newer ayon-python-api #5997
+
+Newer ayon python api will add new filtering options or change order of existing. Kwargs are used in client code to prevent issues on update.
+
+
+___
+
+
+
+
+
+AYON: Conversion of the new playblast settings in Maya #6000
+
+Conversion of the new playblast settings in Maya
+
+
+___
+
+
+
+
+
+AYON: Bug fix for loading Mesh in Substance Painter as new project not working #6004
+
+Substance Painter in AYON can't load mesh for creating a new project
+
+
+___
+
+
+
+
+
+Deadline: correct webservice couldn't be selected in Ayon #6007
+
+Changed the Setting model to mimic more OP approach as it needs to live together for time being.
+
+
+___
+
+
+
+
+
+AYON tools: Fix refresh thread #6008
+
+Trigger 'refresh_finished' signal out of 'run' method.
+
+
+___
+
+
+
+
+
+Ftrack: multiple reviewable components missing variable #6013
+
+Missing variable in code for editorial publishing in traypublisher.
+
+
+___
+
+
+
+
+
+TVPaint: Expect legacy instances in metadata #6015
+
+Do not expect `"workfileInstances"` constains only new type instance data with `creator_identifier`.
+
+
+___
+
+
+
+
+
+Bugfix: handle missing key in Deadline #6019
+
+This quickly fixes bug introduced by #5420
+
+
+___
+
+
+
+
+
+Revert `extractenvironments` behaviour #6020
+
+This is returning original behaviour of `extractenvironments` command from before #5958 so we restore functionality.
+
+
+___
+
+
+
+
+
+OP-7535 - Fix renaming composition in AE #6025
+
+Removing of `render` instance caused renaming of composition to `dummyComp` which caused issue in publishing in next attempt.This PR stores original composition name(cleaned up for product name creation) and uses it if instance needs to be removed.
+
+
+___
+
+
+
+
+
+Refactor code to skip instance creation for new assets #6029
+
+Publishing effects from hiero during editorial publish is working as expected again.
+
+
+___
+
+
+
+
+
+Refactor code to handle missing "representations" key in instance data #6032
+
+Minor code change for optimisation of thumbnail workflow.
+
+
+___
+
+
+
+
+
+Traypublisher: editorial preserve clip case sensitivity #6036
+
+Keep EDL clip name inheritance with case sensitivity.
+
+
+___
+
+
+
+
+
+Bugfix/add missing houdini settings #6039
+
+add missing settings. now, it looks like this:| Ayon | OpenPype || -- | -- | | | || | |
+
+
+___
+
+
+
+### **🔀 Refactored code**
+
+
+
+Maya: Remove RenderSetup layer observers #5836
+
+Remove RenderSetup layer observers that are not needed since new publisher since Renderlayer Creators manage these themselves on Collect and Save/Update of instances.
+
+
+___
+
+
+
+### **Merged pull requests**
+
+
+
+Tests: Removed render instance #6026
+
+This test was created as simple model and workfile publish, without Deadline rendering. Cleaned up render elements.
+
+
+___
+
+
+
+
+
+Tests: update after thumbnail default change #6040
+
+https://github.com/ynput/OpenPype/pull/5944 changed default state of integration of Thumbnails to NOT integrate. This PR updates automatic tests to follow that.
+
+
+___
+
+
+
+
+
+Houdini: Remove legacy LOPs USD output processors #5861
+
+Remove unused/broken legacy code for Houdini Solaris USD LOPs output processors. The code was originally written in Avalon, against early Houdini 18 betas which had a different API for output processors and thus the current state doesn't even work in recent versions of Houdini.
+
+
+___
+
+
+
+
+
+Chore: Substance Painter Addons for Ayon #5914
+
+Substance Painter Addons for Ayon
+
+
+___
+
+
+
+
+
+Ayon: Updated name of Adobe extension to Ayon #5992
+
+This changes name in menu in Adobe extensions to Ayon.
+
+
+___
+
+
+
+
+
+Chore/houdini update startup log #6003
+
+print `Installing AYON ...` on startup when launching houdini from launcher in ayon mode.also update submenu to `ayon_menu` instead of `openpype_menu`
+
+
+___
+
+
+
+
+
+Revert "Ayon: Updated name of Adobe extension to Ayon" #6010
+
+Reverts ynput/OpenPype#5992
+
+That PR is only applicable to Ayon.
+___
+
+
+
+
+
+Standalone/Tray Publisher: Remove simple Unreal texture publishing #6012
+
+We are removing _simple Unreal Texture publishing_ that was just renaming texture files to fit to Unreal naming conventions but without any additional functionality. We might return this functionality back with better texture publishing system.Related to #5983
+
+
+___
+
+
+
+
+
+Deadline: Bump version because of Settings changes for Deadline #6023
+
+
+___
+
+
+
+
+
+Change ASCII art in the Console based on the server mode #6030
+
+This changes ASCII art in the console based on the AYON/OpenPype mode
+
+
+___
+
+
+
+
+
+
+## [3.17.6](https://github.com/ynput/OpenPype/tree/3.17.6)
+
+
+[Full Changelog](https://github.com/ynput/OpenPype/compare/3.17.5...3.17.6)
+
+### **🚀 Enhancements**
+
+
+
+Testing: Validate Maya Logs #5775
+
+This PR adds testing of the logs within Maya such as Python and Pyblish errors.The reason why we need to touch so many files outside of Maya is because of the pyblish errors below;
+```
+pyblish (ERROR) (line: 1371) pyblish.plugin:
+Skipped: "collect_otio_frame_ranges" (No module named 'opentimelineio')
+# Error: pyblish.plugin : Skipped: "collect_otio_frame_ranges" (No module named 'opentimelineio') #
+pyblish (ERROR) (line: 1371) pyblish.plugin:
+Skipped: "collect_otio_review" (No module named 'opentimelineio')
+# Error: pyblish.plugin : Skipped: "collect_otio_review" (No module named 'opentimelineio') #
+pyblish (ERROR) (line: 1371) pyblish.plugin:
+Skipped: "collect_otio_subset_resources" (No module named 'opentimelineio')
+# Error: pyblish.plugin : Skipped: "collect_otio_subset_resources" (No module named 'opentimelineio') #
+pyblish (ERROR) (line: 1371) pyblish.plugin:
+Skipped: "extract_otio_audio_tracks" (No module named 'opentimelineio')
+# Error: pyblish.plugin : Skipped: "extract_otio_audio_tracks" (No module named 'opentimelineio') #
+pyblish (ERROR) (line: 1371) pyblish.plugin:
+Skipped: "extract_otio_file" (No module named 'opentimelineio')
+# Error: pyblish.plugin : Skipped: "extract_otio_file" (No module named 'opentimelineio') #
+pyblish (ERROR) (line: 1371) pyblish.plugin:
+Skipped: "extract_otio_review" (No module named 'opentimelineio')
+# Error: pyblish.plugin : Skipped: "extract_otio_review" (No module named 'opentimelineio') #
+pyblish (ERROR) (line: 1371) pyblish.plugin:
+Skipped: "extract_otio_trimming_video" (No module named 'opentimelineio')
+# Error: pyblish.plugin : Skipped: "extract_otio_trimming_video" (No module named 'opentimelineio') #
+pyblish (ERROR) (line: 1371) pyblish.plugin:
+Skipped: "submit_blender_deadline" (No module named 'bpy')
+# Error: pyblish.plugin : Skipped: "submit_blender_deadline" (No module named 'bpy') #
+pyblish (ERROR) (line: 1371) pyblish.plugin:
+Skipped: "submit_houdini_remote_publish" (No module named 'hou')
+# Error: pyblish.plugin : Skipped: "submit_houdini_remote_publish" (No module named 'hou') #
+pyblish (ERROR) (line: 1371) pyblish.plugin:
+Skipped: "submit_houdini_render_deadline" (No module named 'hou')
+# Error: pyblish.plugin : Skipped: "submit_houdini_render_deadline" (No module named 'hou') #
+pyblish (ERROR) (line: 1371) pyblish.plugin:
+Skipped: "submit_max_deadline" (No module named 'pymxs')
+# Error: pyblish.plugin : Skipped: "submit_max_deadline" (No module named 'pymxs') #
+pyblish (ERROR) (line: 1371) pyblish.plugin:
+Skipped: "submit_nuke_deadline" (No module named 'nuke')
+# Error: pyblish.plugin : Skipped: "submit_nuke_deadline" (No module named 'nuke') #
+```
+We also needed to `stdout` and `stderr` from the launched application to capture the output.Split from #5644.Dependent on #5734
+
+
+___
+
+
+
+
+
+Maya: Render Settings cleanup remove global `RENDER_ATTRS` #5801
+
+Remove global `lib.RENDER_ATTRS` and implement a `RenderSettings.get_padding_attr(renderer)` method instead.
+
+
+___
+
+
+
+
+
+Testing: Ingest expected files and input workfile #5840
+
+This ingests the Maya workfile from the Drive storage. Have changed the format to MayaAscii so its easier to see what changes are happening in a PR. This meant changing the expected files and database entries as well.
+
+
+___
+
+
+
+
+
+Chore: Create plugin auto-apply settings #5908
+
+Create plugins can auto-apply settings.
+
+
+___
+
+
+
+
+
+Resolve: Add save current file button + "Save" shortcut when menu is active #5691
+
+Adds a "Save current file" to the OpenPype menu.Also adds a "Save" shortcut key sequence (CTRL+S on Windows) to the button, so that clicking CTRL+S when the menu is active will save the current workfile. However this of course does not work if the menu does not receive the key press event (e.g. when Resolve UI is active instead)Resolves #5684
+
+
+___
+
+
+
+
+
+Reference USD file as maya native geometry #5781
+
+Add MayaUsdReferenceLoader to reference USD as Maya native geometry using `mayaUSDImport` file translator.
+
+
+___
+
+
+
+
+
+Max: Bug fix on wrong aspect ratio and viewport not being maximized during context in review family #5839
+
+This PR will fix the bug on wrong aspect ratio and viewport not being maximized when creating preview animationBesides, the support of tga image format and the options for AA quality are implemented in this PR
+
+
+___
+
+
+
+
+
+Blender: Incorporate blender "Collections" into Publish/Load #5841
+
+Allow `blendScene` family to include collections.
+
+
+___
+
+
+
+
+
+Max: Allows user preset the setting of preview animation in OP/AYON Setting #5859
+
+Allows user preset the setting of preview animation in OP/AYON Setting for review family.
+- [x] Openpype
+- [x] AYON
+
+
+___
+
+
+
+
+
+Publisher: Center publisher window on first show #5877
+
+Move publisher window to center of a screen on first show.
+
+
+___
+
+
+
+
+
+Publisher: Instance context changes confirm works #5881
+
+Confirmation of context changes in publisher on existing instances does not cause glitches.
+
+
+___
+
+
+
+
+
+AYON workfiles tools: Revisit workfiles tool #5897
+
+Revisited workfiles tool for AYON mode to reuse common models and widgets.
+
+
+___
+
+
+
+
+
+Nuke: updated colorspace settings #5906
+
+Updating nuke colorspace settings into more convenient way with usage of ocio config roles rather then particular colorspace names. This way we should not have troubles to switch between linear Rec709 or ACES configs without any additional settings changes.
+
+
+___
+
+
+
+
+
+Blender: Refactor to new publisher #5910
+
+Refactor Blender integration to use the new publisher
+
+
+___
+
+
+
+
+
+Enhancement: Some publish logs cosmetics #5917
+
+General logging message tweaks:
+- Sort some lists of folder/filenames so they appear sorted in the logs
+- Fix some grammar / typos
+- In some cases provide slightly more information in a log
+
+
+___
+
+
+
+
+
+Blender: Better name of 'asset_name' function #5927
+
+Renamed function `asset_name` to `prepare_scene_name`.
+
+
+___
+
+
+
+### **🐛 Bug fixes**
+
+
+
+Maya: Bug fix the fbx animation export errored out when the skeletonAnim set is empty #5875
+
+Resolve this bug discordIf the skeletonAnim SET is empty and fbx animation collect, the fbx animation extractor would skip the fbx extraction
+
+
+___
+
+
+
+
+
+Bugfix: fix few typos in houdini's and Maya's Ayon settings #5882
+
+Fixing few typos
+- [x] Maya unreal static mesh
+- [x] Houdini static mesh
+- [x] Houdini collect asset handles
+
+
+___
+
+
+
+
+
+Bugfix: Ayon Deadline env vars + error message on no executable found #5815
+
+Fix some Ayon x Deadline issues as came up in this topic:
+- missing Environment Variables issue explained here for `deadlinePlugin.RunProcess` for the AYON _extract environments_ call.
+- wrong error formatting described here with a `;` between each character like this: `Ayon executable was not found in the semicolon separated list "C;:;/;P;r;o;g;r;a;m; ;F;i;l;e;s;/;Y;n;p;u;t;/;A;Y;O;N; ;1;.;0;.;0;-;b;e;t;a;.;5;/;a;y;o;n;_;c;o;n;s;o;l;e;.;e;x;e". The path to the render executable can be configured from the Plugin Configuration in the Deadline Monitor.`
+
+
+___
+
+
+
+
+
+AYON: Fix bundles access in settings #5856
+
+Fixed access to bundles data in settings to define correct develop variant.
+
+
+___
+
+
+
+
+
+AYON 3dsMax settings: 'ValidateAttributes' settings converte only if available #5878
+
+Convert `ValidateAttributes` settings only if are available in AYON settings.
+
+
+___
+
+
+
+
+
+AYON: Fix TrayPublisher editorial settings #5880
+
+Fixing Traypublisher settings for adding task in simple editorial.
+
+
+___
+
+
+
+
+
+TrayPublisher: editorial frame range check not needed #5884
+
+Validator for frame ranges is not needed during editorial publishing since entity data are not yet in database.
+
+
+___
+
+
+
+
+
+Update houdini license validator #5886
+
+As reported in this community commentHoudini USD publishing is only restricted in Houdini apprentice.
+
+
+___
+
+
+
+
+
+Blender: Fix blend extraction and packed images #5888
+
+Fixed a with blend extractor and packed images.
+
+
+___
+
+
+
+
+
+AYON: Initialize connection with all information #5890
+
+Create global AYON api connection with all informations all the time.
+
+
+___
+
+
+
+
+
+AYON: Scene inventory tool without site sync #5896
+
+Skip 'get_site_icons' if site sync addon is disabled.
+
+
+___
+
+
+
+
+
+Publish report tool: Fix PySide6 #5898
+
+Use constants from classes instead of objects.
+
+
+___
+
+
+
+
+
+fusion: removing hardcoded template name for saver #5907
+
+Fusion is not hardcoded for `render` anatomy template only anymore. This was blocking AYON deployment.
+
+
+___
+
+
+
+
+
+
+## [3.17.5](https://github.com/ynput/OpenPype/tree/3.17.5)
+
+
+[Full Changelog](https://github.com/ynput/OpenPype/compare/3.17.4...3.17.5)
+
+### **🆕 New features**
+
+
+
+Fusion: Add USD loader #4896
+
+Add an OpenPype managed USD loader (`uLoader`) for Fusion.
+
+
+___
+
+
+
+
+
+Fusion: Resolution validator #5325
+
+Added a resolution validator.The code is from my old PR (https://github.com/ynput/OpenPype/pull/4921) that I closed because the PR also contained a frame range validator that no longer is needed.
+
+
+___
+
+
+
+
+
+Context Selection tool: Refactor Context tool (for AYON) #5766
+
+Context selection tool has AYON variant.
+
+
+___
+
+
+
+
+
+AYON: Use AYON username for user in template data #5842
+
+Use ayon username for template data in AYON mode.
+
+
+___
+
+
+
+
+
+Testing: app_group flag #5869
+
+`app_group` command flag. This is for changing which flavour of the host to launch. In the case of Maya, you can launch Maya and MayaPy, but it can be used for the Nuke family as well.Split from #5644
+
+
+___
+
+
+
+### **🚀 Enhancements**
+
+
+
+Enhancement: Fusion fix saver creation + minor Blender/Fusion logging tweaks #5558
+
+- Blender change logs to `debug` level in preparation for new publisher artist facing reports (note that it currently still uses the old publisher)
+- Fusion: Create Saver fix redeclaration of default_variants
+- Fusion: Fix saver being created in incorrect state without saving directly after create
+- Fusion: Allow reset frame range on render family
+- Fusion: Tweak logging level for artist-facing report
+
+
+___
+
+
+
+
+
+Resolve: load clip to timeline at set time #5665
+
+It is possible to load clip to correct place on timeline.
+
+
+___
+
+
+
+
+
+Nuke: Optional Deadline workfile dependency. #5732
+
+Adds option to add the workfile as dependency for the Deadline job.Think it used to have something like this, but it disappeared. Usecase is for remote workflow where the Nuke script needs to be synced before the job can start.
+
+
+___
+
+
+
+
+
+Enhancement/houdini rearrange ayon houdini settings files #5748
+
+Rearranging Houdini Settings to be more readable, easier to edit, update settings (include all families/product types)This PR is mainly for Ayon Settings to have more organized files. For Openpype, I'll make sure that each Houdini setting in Ayon has an equivalent in Openpype.
+- [x] update Ayon settings, fix typos and remove deprecated settings.
+- [x] Sync with Openpype
+- [x] Test in Openpype
+- [x] Test in Ayon
+
+
+___
+
+
+
+
+
+Chore: updating create ayon addon script #5822
+
+Adding developers environment options.
+
+
+___
+
+
+
+
+
+Max: Implement Validator for Properties/Attributes Value Check #5824
+
+Add optional validator which can check if the property attributes are valid in Max
+
+
+___
+
+
+
+
+
+Nuke: Remove unused 'get_render_path' function #5826
+
+Remove unused function `get_render_path` from nuke integration.
+
+
+___
+
+
+
+
+
+Chore: Limit current context template data function #5845
+
+Current implementation of `get_current_context_template_data` does return the same values as base template data function `get_template_data`.
+
+
+___
+
+
+
+
+
+Max: Make sure Collect Render not ignoring instance asset #5847
+
+- Make sure Collect Render is not always using asset from context.
+- Make sure Scene version being collected
+- Clean up unnecessary uses of code in the collector.
+
+
+___
+
+
+
+
+
+Ftrack: Events are not processed if project is not available in OpenPype #5853
+
+Events that happened on project which is not in OpenPype is not processed.
+
+
+___
+
+
+
+
+
+Nuke: Add Nuke 11.0 as default setting #5855
+
+Found I needed Nuke 11.0 in the default settings to help with unit testing.
+
+
+___
+
+
+
+
+
+TVPaint: Code cleanup #5857
+
+Removed unused import. Use `AYON` label in ayon mode. Removed unused data in publish context `"previous_context"`.
+
+
+___
+
+
+
+
+
+AYON settings: Use correct label for follow workfile version #5874
+
+Follow workfile version label was marked as Collect Anatomy Instance Data label.
+
+
+___
+
+
+
+### **🐛 Bug fixes**
+
+
+
+Nuke: Fix workfile template builder so representations get loaded next to each other #5061
+
+Refactor when the cleanup of the placeholder happens for the cases where multiple representations are loaded by a single placeholder.The existing code didn't take into account the case where a template placeholder can load multiple representations so it was trying to do the cleanup of the placeholder node and the re-arrangement of the imported nodes too early. I assume this was designed only for the cases where a single representation can load multiple nodes.
+
+
+___
+
+
+
+
+
+Nuke: Dont update node name on update #5704
+
+When updating `Image` containers the code is trying to set the name of the node. This results in a warning message from Nuke shown below;Suggesting to not change the node name when updating.
+
+
+___
+
+
+
+
+
+UIDefLabel can be unique #5827
+
+`UILabelDef` have implemented comparison and uniqueness.
+
+
+___
+
+
+
+
+
+AYON: Skip kitsu module when creating ayon addons #5828
+
+Create AYON packages is skipping kitsu module in creation of modules/addons and kitsu module is not loaded from modules on start. The addon already has it's repository https://github.com/ynput/ayon-kitsu.
+
+
+___
+
+
+
+
+
+Bugfix: Collect Rendered Files only collecting first instance #5832
+
+Collect all instances from the metadata file - don't return on first instance iteration.
+
+
+___
+
+
+
+
+
+Houdini: set frame range for the created composite ROP #5833
+
+Quick bug fix for created composite ROP, set its frame range to the frame range of the playbar.
+
+
+___
+
+
+
+
+
+Fix registering launcher actions from OpenPypeModules #5843
+
+Fix typo `actions_dir` -> `path` to fix register launcher actions fromm OpenPypeModule
+
+
+___
+
+
+
+
+
+Bugfix in houdini shelves manager and beautify settings #5844
+
+This PR fixes the problem in this PR https://github.com/ynput/OpenPype/issues/5457 by using the right function to load a pre-made houdini `.shelf` fileAlso, it beautifies houdini shelves settings to provide better guidance for users which helps with other issue https://github.com/ynput/OpenPype/issues/5458 , Rather adding default shelf and set names, I'll educate users how to use the tool correctly.Users now are able to select between the two options.| OpenPype | Ayon || -- | -- || | |
+
+
+___
+
+
+
+
+
+Blender: Fix missing Grease Pencils in review #5848
+
+Fix Grease Pencil missing in review when isolating objects.
+
+
+___
+
+
+
+
+
+Blender: Fix Render Settings in Ayon #5849
+
+Fix Render Settings in Ayon for Blender.
+
+
+___
+
+
+
+
+
+Bugfix: houdini tab menu working as expected #5850
+
+This PR:Tab menu name changes to Ayon when using ayon get_network_categories is checked in all creator plugins. | Product | Network Category | | -- | -- | | Alembic camera | rop, obj | | Arnold Ass | rop | | Arnold ROP | rop | | Bgeo | rop, sop | | composite sequence | cop2, rop | | hda | obj | | Karma ROP | rop | | Mantra ROP | rop | | ABC | rop, sop | | RS proxy | rop, sop| | RS ROP | rop | | Review | rop | | Static mesh | rop, obj, sop | | USD | lop, rop | | USD Render | rop | | VDB | rop, obj, sop | | V Ray | rop |
+
+
+___
+
+
+
+
+
+Bigfix: Houdini skip frame_range_validator if node has no 'trange' parameter #5851
+
+I faced a bug when publishing HDA instance as it has no `trange` parameter. As this PR title says : skip frame_range_validator if node has no 'trange' parameter
+
+
+___
+
+
+
+
+
+Bugfix: houdini image sequence loading and missing frames #5852
+
+I made this PR in to fix issues mentioned here https://github.com/ynput/OpenPype/pull/5833#issuecomment-1789207727in short:
+- image load doesn't work
+- publisher only publish one frame
+
+
+___
+
+
+
+
+
+Nuke: loaders' containers updating as nodes #5854
+
+Nuke loaded containers are updating correctly even they have been duplicating of originally loaded nodes. This had previously been removed duplicated nodes.
+
+
+___
+
+
+
+
+
+deadline: settings are not blocking extension input #5864
+
+Settings are not blocking user input.
+
+
+___
+
+
+
+
+
+Blender: Fix loading of blend layouts #5866
+
+Fix a problem with loading blend layouts.
+
+
+___
+
+
+
+
+
+AYON: Launcher refresh issues #5867
+
+Fixed refresh of projects issue in launcher tool. And renamed Qt models to contain `Qt` in their name (it was really hard to find out where were used). It is not possible to click on disabled item in launcher's projects view.
+
+
+___
+
+
+
+
+
+Fix the Wrong key words for tycache workfile template settings in AYON #5870
+
+Fix the wrong key words for the tycache workfile template settings in AYON(i.e. Instead of families, product_types should be used)
+
+
+___
+
+
+
+
+
+AYON tools: Handle empty icon definition #5876
+
+Ignore if passed icon definition is `None`.
+
+
+___
+
+
+
+### **🔀 Refactored code**
+
+
+
+Houdini: Remove on instance toggled callback #5860
+
+Remove on instance toggled callback which isn't relevant to the new publisher
+
+
+___
+
+
+
+
+
+Chore: Remove unused `instanceToggled` callbacks #5862
+
+The `instanceToggled` callbacks should be irrelevant for new publisher.
+
+
+___
+
+
+
+
+
+
+## [3.17.4](https://github.com/ynput/OpenPype/tree/3.17.4)
+
+
+[Full Changelog](https://github.com/ynput/OpenPype/compare/3.17.3...3.17.4)
+
+### **🆕 New features**
+
+
+
+Add Support for Husk-AYON Integration #5816
+
+This draft pull request introduces support for integrating Husk with AYON within the OpenPype repository.
+
+
+___
+
+
+
+
+
+Push to project tool: Prepare push to project tool for AYON #5770
+
+Cloned Push to project tool for AYON and modified it.
+
+
+___
+
+
+
+### **🚀 Enhancements**
+
+
+
+Max: tycache family support #5624
+
+Tycache family supports for Tyflow Plugin in Max
+
+
+___
+
+
+
+
+
+Unreal: Changed behaviour for updating assets #5670
+
+Changed how assets are updated in Unreal.
+
+
+___
+
+
+
+
+
+Unreal: Improved error reporting for Sequence Frame Validator #5730
+
+Improved error reporting for Sequence Frame Validator.
+
+
+___
+
+
+
+
+
+Max: Setting tweaks on Review Family #5744
+
+- Bug fix of not being able to publish the preferred visual style when creating preview animation
+- Exposes the parameters after creating instance
+- Add the Quality settings and viewport texture settings for preview animation
+- add use selection for create review
+
+
+___
+
+
+
+
+
+Max: Add families with frame range extractions back to the frame range validator #5757
+
+In 3dsMax, there are some instances which exports the files in frame range but not being added to the optional frame range validator. In this PR, these instances would have the optional frame range validators to allow users to check if frame range aligns with the context data from DB.The following families have been added to have optional frame range validator:
+- maxrender
+- review
+- camera
+- redshift proxy
+- pointcache
+- point cloud(tyFlow PRT)
+
+
+___
+
+
+
+
+
+TimersManager: Use available data to get context info #5804
+
+Get context information from pyblish context data instead of using `legacy_io`.
+
+
+___
+
+
+
+
+
+Chore: Removed unused variable from `AbstractCollectRender` #5805
+
+Removed unused `_asset` variable from `RenderInstance`.
+
+
+___
+
+
+
+### **🐛 Bug fixes**
+
+
+
+Bugfix/houdini: wrong frame calculation with handles #5698
+
+This PR make collect plugins to consider `handleStart` and `handleEnd` when collecting frame range it affects three parts:
+- get frame range in collect plugins
+- expected file in render plugins
+- submit houdini job deadline plugin
+
+
+___
+
+
+
+
+
+Nuke: ayon server settings improvements #5746
+
+Nuke settings were not aligned with OpenPype settings. Also labels needed to be improved.
+
+
+___
+
+
+
+
+
+Blender: Fix pointcache family and fix alembic extractor #5747
+
+Fixed `pointcache` family and fixed behaviour of the alembic extractor.
+
+
+___
+
+
+
+
+
+AYON: Remove 'shotgun_api3' from dependencies #5803
+
+Removed `shotgun_api3` dependency from openpype dependencies for AYON launcher. The dependency is already defined in shotgrid addon and change of version causes clashes.
+
+
+___
+
+
+
+
+
+Chore: Fix typo in filename #5807
+
+Move content of `contants.py` into `constants.py`.
+
+
+___
+
+
+
+
+
+Chore: Create context respects instance changes #5809
+
+Fix issue with unrespected change propagation in `CreateContext`. All successfully saved instances are marked as saved so they have no changes. Origin data of an instance are explicitly not handled directly by the object but by the attribute wrappers.
+
+
+___
+
+
+
+
+
+Blender: Fix tools handling in AYON mode #5811
+
+Skip logic in `before_window_show` in blender when in AYON mode. Most of the stuff called there happes on show automatically.
+
+
+___
+
+
+
+
+
+Blender: Include Grease Pencil in review and thumbnails #5812
+
+Include Grease Pencil in review and thumbnails.
+
+
+___
+
+
+
+
+
+Workfiles tool AYON: Fix double click of workfile #5813
+
+Fix double click on workfiles in workfiles tool to open the file.
+
+
+___
+
+
+
+
+
+Webpublisher: removal of usage of no_of_frames in error message #5819
+
+If it throws exception, `no_of_frames` value wont be available, so it doesn't make sense to log it.
+
+
+___
+
+
+
+
+
+Attribute Defs: Hide multivalue widget in Number by default #5821
+
+Fixed default look of `NumberAttrWidget` by hiding its multiselection widget.
+
+
+___
+
+
+
+### **Merged pull requests**
+
+
+
+Corrected a typo in Readme.md (Top -> To) #5800
+
+
+___
+
+
+
+
+
+Photoshop: Removed redundant copy of extension.zxp #5802
+
+`extension.zxp` shouldn't be inside of extension folder.
+
+
+___
+
+
+
+
+
+
+## [3.17.3](https://github.com/ynput/OpenPype/tree/3.17.3)
+
+
+[Full Changelog](https://github.com/ynput/OpenPype/compare/3.17.2...3.17.3)
+
+### **🆕 New features**
+
+
+
+Maya: Multi-shot Layout Creator #5710
+
+New Multi-shot Layout creator is a way of automating creation of the new Layout instances in Maya, associated with correct shots, frame ranges and Camera Sequencer in Maya.
+
+
+___
+
+
+
+
+
+Colorspace: ociolook file product type workflow #5541
+
+Traypublisher support for publishing of colorspace look files (ociolook) which are json files holding any LUT files. This new product is available for loading in Nuke host at the moment.Added colorspace selector to publisher attribute with better labeling. We are supporting also Roles and Alias (only v2 configs).
+
+
+___
+
+
+
+
+
+Scene Inventory tool: Refactor Scene Inventory tool (for AYON) #5758
+
+Modified scene inventory tool for AYON. The main difference is in how project name is defined and replacement of assets combobox with folders dialog.
+
+
+___
+
+
+
+
+
+AYON: Support dev bundles #5783
+
+Modules can be loaded in AYON dev mode from different location.
+
+
+___
+
+
+
+### **🚀 Enhancements**
+
+
+
+Testing: Ingest Maya userSetup #5734
+
+Suggesting to ingest `userSetup.py` startup script for easier collaboration and transparency of testing.
+
+
+___
+
+
+
+
+
+Fusion: Work with pathmaps #5329
+
+Path maps are a big part of our Fusion workflow. We map the project folder to a path map within Fusion so all loaders and savers point to the path map variable. This way any computer on any OS can open any comp no matter where the project folder is located.
+
+
+___
+
+
+
+
+
+Maya: Add Maya 2024 and remove pre 2022. #5674
+
+Adding Maya 2024 as default application variant.Removing Maya 2020 and older, as these are not supported anymore.
+
+
+___
+
+
+
+
+
+Enhancement: Houdini: Allow using template keys in Houdini shelves manager #5727
+
+Allow using Template keys in Houdini shelves manager.
+
+
+___
+
+
+
+
+
+Houdini: Fix Show in usdview loader action #5737
+
+Fix the "Show in USD View" loader to show up in Houdini
+
+
+___
+
+
+
+
+
+Nuke: validator of asset context with repair actions #5749
+
+Instance nodes with different context of asset and task can be now validated and repaired via repair action.
+
+
+___
+
+
+
+
+
+AYON: Tools enhancements #5753
+
+Few enhancements and tweaks of AYON related tools.
+
+
+___
+
+
+
+
+
+Max: Tweaks on ValidateMaxContents #5759
+
+This PR provides enhancements on ValidateMaxContent as follow:
+- Rename `ValidateMaxContents` to `ValidateContainers`
+- Add related families which are required to pass the validation(All families except `Render` as the render instance is the one which only allows empty container)
+
+
+___
+
+
+
+
+
+Enhancement: Nuke refactor `SelectInvalidAction` #5762
+
+Refactor `SelectInvalidAction` to behave like other action for other host, create `SelectInstanceNodeAction` as dedicated action to select the instance node for a failed plugin.
+- Note: Selecting Instance Node will still select the instance node even if the user has currently 'fixed' the problem.
+
+
+___
+
+
+
+
+
+Enhancement: Tweak logging for Nuke for artist facing reports #5763
+
+Tweak logs that are not artist-facing to debug level + in some cases clarify what the logged value is.
+
+
+___
+
+
+
+
+
+AYON Settings: Disk mapping #5786
+
+Added disk mapping settings to core addon settings.
+
+
+___
+
+
+
+### **🐛 Bug fixes**
+
+
+
+Maya: add colorspace argument to redshiftTextureProcessor #5645
+
+In color managed Maya, texture processing during Look Extraction wasn't passing texture colorspaces set on textures to `redshiftTextureProcessor` tool. This in effect caused this tool to produce non-zero exit code (even though the texture was converted into wrong colorspace) and therefor crash of the extractor. This PR is passing colorspace to that tool if color management is enabled.
+
+
+___
+
+
+
+
+
+Maya: don't call `cmds.ogs()` in headless mode #5769
+
+`cmds.ogs()` is a call that will crash if Maya is running in headless mode (mayabatch, mayapy). This is handling that case.
+
+
+___
+
+
+
+
+
+Resolve: inventory management fix #5673
+
+Loaded Timeline item containers are now updating correctly and version management is working as it suppose to.
+- [x] updating loaded timeline items
+- [x] Removing of loaded timeline items
+
+
+___
+
+
+
+
+
+Blender: Remove 'update_hierarchy' #5756
+
+Remove `update_hierarchy` function which is causing crashes in scene inventory tool.
+
+
+___
+
+
+
+
+
+Max: bug fix on the settings in pointcloud family #5768
+
+Bug fix on the settings being errored out in validate point cloud(see links:https://github.com/ynput/OpenPype/pull/5759#pullrequestreview-1676681705) and passibly in point cloud extractor.
+
+
+___
+
+
+
+
+
+AYON settings: Fix default factory of tools #5773
+
+Fix default factory of application tools.
+
+
+___
+
+
+
+
+
+Fusion: added missing OPENPYPE_VERSION #5776
+
+Fusion submission to Deadline was missing OPENPYPE_VERSION env var when submitting from build (not source code directly). This missing env var might break rendering on DL if path to OP executable (openpype_console.exe) is not set explicitly and might cause an issue when different versions of OP are deployed.This PR adds this environment variable.
+
+
+___
+
+
+
+
+
+Ftrack: Skip tasks when looking for asset equivalent entity #5777
+
+Skip tasks when looking for asset equivalent entity.
+
+
+___
+
+
+
+
+
+Nuke: loading gizmos fixes #5779
+
+Gizmo product is not offered in Loader as plugin. It is also updating as expected.
+
+
+___
+
+
+
+
+
+General: thumbnail extractor as last extractor #5780
+
+Fixing issue with the order of the `ExtractOIIOTranscode` and `ExtractThumbnail` plugins. The problem was that the `ExtractThumbnail` plugin was processed before the `ExtractOIIOTranscode` plugin. As a result, the `ExtractThumbnail` plugin did not inherit the `review` tag into the representation data. This caused the `ExtractThumbnail` plugin to fail in processing and creating thumbnails.
+
+
+___
+
+
+
+
+
+Bug: fix key in application json #5787
+
+In PR #5705 `maya` was wrongly used instead of `mayapy`, breaking AYON defaults in AYON Application Addon.
+
+
+___
+
+
+
+
+
+'NumberAttrWidget' shows 'Multiselection' label on multiselection #5792
+
+Attribute definition widget 'NumberAttrWidget' shows `< Multiselection >` label on multiselection.
+
+
+___
+
+
+
+
+
+Publisher: Selection change by enabled checkbox on instance update attributes #5793
+
+Change of instance by clicking on enabled checkbox will actually update attributes on right side to match the selection.
+
+
+___
+
+
+
+
+
+Houdini: Remove `setParms` call since it's responsibility of `self.imprint` to set the values #5796
+
+Revert a recent change made in #5621 due to this comment. However the change is faulty as can be seen mentioned here
+
+
+___
+
+
+
+
+
+AYON loader: Fix SubsetLoader functionality #5799
+
+Fix SubsetLoader plugin processing in AYON loader tool.
+
+
+___
+
+
+
+### **Merged pull requests**
+
+
+
+Houdini: Add self publish button #5621
+
+This PR allows single publishing by adding a publish button to created rop nodes in HoudiniAdmins are much welcomed to enable it from houdini general settingsPublish Button also includes all input publish instances. in this screen shot the alembic instance is ignored because the switch is turned off
+
+
+___
+
+
+
+
+
+Nuke: fixing UNC support for OCIO path #5771
+
+UNC paths were broken on windows for custom OCIO path and this is solving the issue with removed double slash at start of path
+
+
+___
+
+
+
+
+
+
+## [3.17.2](https://github.com/ynput/OpenPype/tree/3.17.2)
+
+
+[Full Changelog](https://github.com/ynput/OpenPype/compare/3.17.1...3.17.2)
+
+### **🆕 New features**
+
+
+
+Maya: Add MayaPy application. #5705
+
+This adds mayapy to the application to be launched from a task.
+
+
+___
+
+
+
+
+
+Feature: Copy resources when downloading last workfile #4944
+
+When the last published workfile is downloaded as a prelaunch hook, all resource files referenced in the workfile representation are copied to the `resources` folder, which is inside the local workfile folder.
+
+
+___
+
+
+
+
+
+Blender: Deadline support #5438
+
+Add Deadline support for Blender.
+
+
+___
+
+
+
+
+
+Fusion: implement toggle to use Deadline plugin FusionCmd #5678
+
+Fusion 17 doesn't work in DL 10.3, but FusionCmd does. It might be probably better option as headless variant.Fusion plugin seems to be closing and reopening application when worker is running on artist machine, not so with FusionCmdAdded configuration to Project Settings for admin to select appropriate Deadline plugin:
+
+
+___
+
+
+
+
+
+Loader tool: Refactor loader tool (for AYON) #5729
+
+Refactored loader tool to new tool. Separated backend and frontend logic. Refactored logic is AYON-centric and is used only in AYON mode, so it does not affect OpenPype. The tool is also replacing library loader.
+
+
+___
+
+
+
+### **🚀 Enhancements**
+
+
+
+Maya: implement matchmove publishing #5445
+
+Add possibility to export multiple cameras in single `matchmove` family instance, both in `abc` and `ma`.Exposed flag 'Keep image planes' to control export of image planes.
+
+
+___
+
+
+
+
+
+Maya: Add optional Fbx extractors in Rig and Animation family #5589
+
+This PR allows user to export control rigs(optionally with mesh) and animated rig in fbx optionally by attaching the rig objects to the two newly introduced sets.
+
+
+___
+
+
+
+
+
+Maya: Optional Resolution Validator for Render #5693
+
+Adding optional resolution validator for maya in render family, similar to the one in Max.It checks if the resolution in render setting aligns with that in setting from the db.
+
+
+___
+
+
+
+
+
+Use host's node uniqueness for instance id in new publisher #5490
+
+Instead of writing `instance_id` as parm or attributes on the publish instances we can, for some hosts, just rely on a unique name or path within the scene to refer to that particular instance. By doing so we fix #4820 because upon duplicating such a publish instance using the host's (DCC) functionality the uniqueness for the duplicate is then already ensured instead of attributes remaining exact same value as where to were duplicated from, making `instance_id` a non-unique value.
+
+
+___
+
+
+
+
+
+Max: Implementation of OCIO configuration #5499
+
+Resolve #5473 Implementation of OCIO configuration for Max 2024 regarding to the update of Max 2024
+
+
+___
+
+
+
+
+
+Nuke: Multiple format supports for ExtractReviewDataMov #5623
+
+This PR would fix the bug of the plugin `ExtractReviewDataMov` not being able to support extensions other than `mov`. The plugin is also renamed to `ExtractReviewDataBakingStreams` as i provides multiple format supoort.
+
+
+___
+
+
+
+
+
+Bugfix: houdini switching context doesnt update variables #5651
+
+Allows admins to have a list of vars (e.g. JOB) with (dynamic) values that will be updated on context changes, e.g. when switching to another asset or task.Using template keys is supported but formatting keys capitalization variants is not, e.g. {Asset} and {ASSET} won't workDisabling Update Houdini vars on context change feature will leave all Houdini vars unmanaged and thus no context update changes will occur.Also, this PR adds a new button in menu to update vars on demand.
+
+
+___
+
+
+
+
+
+Publisher: Fix report maker memory leak + optimize lookups using set #5667
+
+Fixes a memory leak where resetting publisher does not clear the stored plugins for the Publish Report Maker.Also changes the stored plugins to a `set` to optimize the lookup speeds.
+
+
+___
+
+
+
+
+
+Add openpype_mongo command flag for testing. #5676
+
+Instead of changing the environment, this command flag allows for changing the database.
+
+
+___
+
+
+
+
+
+Nuke: minor docstring and code tweaks for ExtractReviewMov #5695
+
+Code and docstring tweaks on https://github.com/ynput/OpenPype/pull/5623
+
+
+___
+
+
+
+
+
+AYON: Small settings fixes #5699
+
+Small changes/fixes related to AYON settings. All foundry apps variant `13-0` has label `13.0`. Key `"ExtractReviewIntermediates"` is not mandatory in settings.
+
+
+___
+
+
+
+
+
+Blender: Alembic Animation loader #5711
+
+Implemented loading Alembic Animations in Blender.
+
+
+___
+
+
+
+### **🐛 Bug fixes**
+
+
+
+Maya: Missing "data" field and enabling of audio #5618
+
+When updating audio containers, the field "data" was missing and the audio node was not enabled on the timeline.
+
+
+___
+
+
+
+
+
+Maya: Bug in validate Plug-in Path Attribute #5687
+
+Overwriting list with string is causing `TypeError: string indices must be integers` in subsequent iterations, crashing the validator plugin.
+
+
+___
+
+
+
+
+
+General: Avoid fallback if value is 0 for handle start/end #5652
+
+There's a bug on the `pyblish_functions.get_time_data_from_instance_or_context` where if `handleStart` or `handleEnd` on the instance are set to value 0 it's falling back to grabbing the handles from the instance context. Instead, the logic should be that it only falls back to the `instance.context` if the key doesn't exist.This change was only affecting me on the `handleStart`/`handleEnd` and it's unlikely it could cause issues on `frameStart`, `frameEnd` or `fps` but regardless, the `get` logic is wrong.
+
+
+___
+
+
+
+
+
+Fusion: added missing env vars to Deadline submission #5659
+
+Environment variables discerning type of job was missing. Without this injection of environment variables won't start.
+
+
+___
+
+
+
+
+
+Nuke: workfile version synchronization settings fixed #5662
+
+Settings for synchronizing workfile version to published products is fixed.
+
+
+___
+
+
+
+
+
+AYON Workfiles Tool: Open workfile changes context #5671
+
+Change context when workfile is opened.
+
+
+___
+
+
+
+
+
+Blender: Fix remove/update in new layout instance #5679
+
+Fixes an error that occurs when removing or updating an asset in a new layout instance.
+
+
+___
+
+
+
+
+
+AYON Launcher tool: Fix refresh btn #5685
+
+Refresh button does propagate refreshed content properly. Folders and tasks are cached for 60 seconds instead of 10 seconds. Auto-refresh in launcher will refresh only actions and related data which is project and project settings.
+
+
+___
+
+
+
+
+
+Deadline: handle all valid paths in RenderExecutable #5694
+
+This commit enhances the path resolution mechanism in the RenderExecutable function of the Ayon plugin. Previously, the function only considered paths starting with a tilde (~), ignoring other valid paths listed in exe_list. This limitation led to an empty expanded_paths list when none of the paths in exe_list started with a tilde, causing the function to fail in finding the Ayon executable.With this fix, the RenderExecutable function now correctly processes and includes all valid paths from exe_list, improving its reliability and preventing unnecessary errors related to Ayon executable location.
+
+
+___
+
+
+
+
+
+AYON Launcher tool: Fix skip last workfile boolean #5700
+
+Skip last workfile boolean works as expected.
+
+
+___
+
+
+
+
+
+Chore: Explore here action can work without task #5703
+
+Explore here action does not crash when task is not selected, and change error message a little.
+
+
+___
+
+
+
+
+
+Testing: Inject mongo_url argument earlier #5706
+
+Fix for https://github.com/ynput/OpenPype/pull/5676The Mongo url is used earlier in the execution.
+
+
+___
+
+
+
+
+
+Blender: Add support to auto-install PySide2 in blender 4 #5723
+
+Change version regex to support blender 4 subfolder.
+
+
+___
+
+
+
+
+
+Fix: Hardcoded main site and wrongly copied workfile #5733
+
+Fixing these two issues:
+- Hardcoded main site -> Replaced by `anatomy.fill_root`.
+- Workfiles can sometimes be copied while they shouldn't.
+
+
+___
+
+
+
+
+
+Bugfix: ServerDeleteOperation asset -> folder conversion typo #5735
+
+Fix ServerDeleteOperation asset -> folder conversion typo
+
+
+___
+
+
+
+
+
+Nuke: loaders are filtering correctly #5739
+
+Variable name for filtering by extensions were not correct - it suppose to be plural. It is fixed now and filtering is working as suppose to.
+
+
+___
+
+
+
+
+
+Nuke: failing multiple thumbnails integration #5741
+
+This handles the situation when `ExtractReviewIntermediates` (previously `ExtractReviewDataMov`) has multiple outputs, including thumbnails that need to be integrated. Previously, integrating the thumbnail representation was causing an issue in the integration process. However, we have now resolved this issue by no longer integrating thumbnails as loadable representations.NOW default is that thumbnail representation are NOT integrated (eg. they will not show up in DB > couldn't be Loaded in Loader) and no `_thumb.jpg` will be left in `render` (most likely) publish folder.IF there would be need to override this behavior, please use `project_settings/global/publish/PreIntegrateThumbnails`
+
+
+___
+
+
+
+
+
+AYON Settings: Fix global overrides #5745
+
+The `output` dictionary that gets passed into `ayon_settings._convert_global_project_settings` gets replaced when converting the settings for `ExtractOIIOTranscode`. This results in `global` not being in the output dictionary and thus the defaults being used and not the project overrides.
+
+
+___
+
+
+
+
+
+Chore: AYON query functions arguments #5752
+
+Fixed how `archived` argument is handled in get subsets/assets function.
+
+
+___
+
+
+
+### **🔀 Refactored code**
+
+
+
+Publisher: Refactor Report Maker plugin data storage to be a dict by plugin.id #5668
+
+Refactor Report Maker plugin data storage to be a dict by `plugin.id`Also fixes `_current_plugin_data` type on `__init__`
+
+
+___
+
+
+
+
+
+Chore: Refactor Resolve into new style HostBase, IWorkfileHost, ILoadHost #5701
+
+Refactor Resolve into new style HostBase, IWorkfileHost, ILoadHost
+
+
+___
+
+
+
+### **Merged pull requests**
+
+
+
+Chore: Maya reduce get project settings calls #5669
+
+Re-use system settings / project settings where we can instead of requerying.
+
+
+___
+
+
+
+
+
+Extended error message when getting subset name #5649
+
+Each Creator is using `get_subset_name` functions which collects context data and fills configured template with placeholders.If any key is missing in the template, non descriptive error is thrown.This should provide more verbose message:
+
+
+___
+
+
+
+
+
+Tests: Remove checks for env var #5696
+
+Env var will be filled in `env_var` fixture, here it is too early to check
+
+
+___
+
+
+
+
+
+
## [3.17.1](https://github.com/ynput/OpenPype/tree/3.17.1)
diff --git a/README.md b/README.md
index ce98f845e6..a79b9f2582 100644
--- a/README.md
+++ b/README.md
@@ -7,6 +7,10 @@ OpenPype
[](https://github.com/pypeclub/pype/actions/workflows/documentation.yml) 
+## Important Notice!
+
+OpenPype as a standalone product has reach end of it's life and this repository is now used as a pipeline core code for [AYON](https://ynput.io/ayon/). You can read more details about the end of life process here https://community.ynput.io/t/openpype-end-of-life-timeline/877
+
Introduction
------------
@@ -279,7 +283,7 @@ arguments and it will create zip file that OpenPype can use.
Building documentation
----------------------
-Top build API documentation, run `.\tools\make_docs(.ps1|.sh)`. It will create html documentation
+To build API documentation, run `.\tools\make_docs(.ps1|.sh)`. It will create html documentation
from current sources in `.\docs\build`.
**Note that it needs existing virtual environment.**
diff --git a/openpype/cli.py b/openpype/cli.py
index 7422f32f13..8caa139765 100644
--- a/openpype/cli.py
+++ b/openpype/cli.py
@@ -282,6 +282,9 @@ def run(script):
"--app_variant",
help="Provide specific app variant for test, empty for latest",
default=None)
+@click.option("--app_group",
+ help="Provide specific app group for test, empty for default",
+ default=None)
@click.option("-t",
"--timeout",
help="Provide specific timeout value for test case",
@@ -293,12 +296,15 @@ def run(script):
@click.option("--mongo_url",
help="MongoDB for testing.",
default=None)
+@click.option("--dump_databases",
+ help="Dump all databases to data folder.",
+ default=None)
def runtests(folder, mark, pyargs, test_data_folder, persist, app_variant,
- timeout, setup_only, mongo_url):
+ timeout, setup_only, mongo_url, app_group, dump_databases):
"""Run all automatic tests after proper initialization via start.py"""
PypeCommands().run_tests(folder, mark, pyargs, test_data_folder,
persist, app_variant, timeout, setup_only,
- mongo_url)
+ mongo_url, app_group, dump_databases)
@main.command(help="DEPRECATED - run sync server")
diff --git a/openpype/client/__init__.py b/openpype/client/__init__.py
index 7831afd8ad..ba36d940e3 100644
--- a/openpype/client/__init__.py
+++ b/openpype/client/__init__.py
@@ -1,6 +1,7 @@
from .mongo import (
OpenPypeMongoConnection,
)
+from .server.utils import get_ayon_server_api_connection
from .entities import (
get_projects,
@@ -43,6 +44,8 @@ from .entities import (
get_thumbnail_id_from_source,
get_workfile_info,
+
+ get_asset_name_identifier,
)
from .entity_links import (
@@ -59,6 +62,8 @@ from .operations import (
__all__ = (
"OpenPypeMongoConnection",
+ "get_ayon_server_api_connection",
+
"get_projects",
"get_project",
"get_whole_project",
@@ -105,4 +110,6 @@ __all__ = (
"get_linked_representation_id",
"create_project",
+
+ "get_asset_name_identifier",
)
diff --git a/openpype/client/entities.py b/openpype/client/entities.py
index 5d9654c611..cbaa943743 100644
--- a/openpype/client/entities.py
+++ b/openpype/client/entities.py
@@ -4,3 +4,22 @@ if not AYON_SERVER_ENABLED:
from .mongo.entities import *
else:
from .server.entities import *
+
+
+def get_asset_name_identifier(asset_doc):
+ """Get asset name identifier by asset document.
+
+ This function is added because of AYON implementation where name
+ identifier is not just a name but full path.
+
+ Asset document must have "name" key, and "data.parents" when in AYON mode.
+
+ Args:
+ asset_doc (dict[str, Any]): Asset document.
+ """
+
+ if not AYON_SERVER_ENABLED:
+ return asset_doc["name"]
+ parents = list(asset_doc["data"]["parents"])
+ parents.append(asset_doc["name"])
+ return "/" + "/".join(parents)
diff --git a/openpype/client/server/conversion_utils.py b/openpype/client/server/conversion_utils.py
index 8c18cb1c13..e8d3c4cbe4 100644
--- a/openpype/client/server/conversion_utils.py
+++ b/openpype/client/server/conversion_utils.py
@@ -138,16 +138,22 @@ def _template_replacements_to_v3(template):
)
-def _convert_template_item(template):
- # Others won't have 'directory'
- if "directory" not in template:
- return
- folder = _template_replacements_to_v3(template.pop("directory"))
- template["folder"] = folder
- template["file"] = _template_replacements_to_v3(template["file"])
- template["path"] = "/".join(
- (folder, template["file"])
- )
+def _convert_template_item(template_item):
+ for key, value in tuple(template_item.items()):
+ template_item[key] = _template_replacements_to_v3(value)
+
+ # Change 'directory' to 'folder'
+ if "directory" in template_item:
+ template_item["folder"] = template_item.pop("directory")
+
+ if (
+ "path" not in template_item
+ and "file" in template_item
+ and "folder" in template_item
+ ):
+ template_item["path"] = "/".join(
+ (template_item["folder"], template_item["file"])
+ )
def _fill_template_category(templates, cat_templates, cat_key):
@@ -212,10 +218,27 @@ def convert_v4_project_to_v3(project):
_convert_template_item(template)
new_others_templates[name] = template
+ staging_templates = templates.pop("staging", None)
+ # Key 'staging_directories' is legacy key that changed
+ # to 'staging_dir'
+ _legacy_staging_templates = templates.pop("staging_directories", None)
+ if staging_templates is None:
+ staging_templates = _legacy_staging_templates
+
+ if staging_templates is None:
+ staging_templates = {}
+
+ # Prefix all staging template names with 'staging_' prefix
+ # and add them to 'others'
+ for name, template in staging_templates.items():
+ _convert_template_item(template)
+ new_name = "staging_{}".format(name)
+ new_others_templates[new_name] = template
+
for key in (
"work",
"publish",
- "hero"
+ "hero",
):
cat_templates = templates.pop(key)
_fill_template_category(templates, cat_templates, key)
@@ -583,7 +606,7 @@ def convert_v4_version_to_v3(version):
output_data[dst_key] = version[src_key]
if "createdAt" in version:
- created_at = arrow.get(version["createdAt"])
+ created_at = arrow.get(version["createdAt"]).to("local")
output_data["time"] = created_at.strftime("%Y%m%dT%H%M%SZ")
output["data"] = output_data
diff --git a/openpype/client/server/entities.py b/openpype/client/server/entities.py
index 3ee62a3172..75b5dc2cdd 100644
--- a/openpype/client/server/entities.py
+++ b/openpype/client/server/entities.py
@@ -1,9 +1,8 @@
import collections
-from ayon_api import get_server_api_connection
-
from openpype.client.mongo.operations import CURRENT_THUMBNAIL_SCHEMA
+from .utils import get_ayon_server_api_connection
from .openpype_comp import get_folders_with_tasks
from .conversion_utils import (
project_fields_v3_to_v4,
@@ -37,7 +36,7 @@ def get_projects(active=True, inactive=False, library=None, fields=None):
elif inactive:
active = False
- con = get_server_api_connection()
+ con = get_ayon_server_api_connection()
fields = project_fields_v3_to_v4(fields, con)
for project in con.get_projects(active, library, fields=fields):
yield convert_v4_project_to_v3(project)
@@ -45,7 +44,7 @@ def get_projects(active=True, inactive=False, library=None, fields=None):
def get_project(project_name, active=True, inactive=False, fields=None):
# Skip if both are disabled
- con = get_server_api_connection()
+ con = get_ayon_server_api_connection()
fields = project_fields_v3_to_v4(fields, con)
return convert_v4_project_to_v3(
con.get_project(project_name, fields=fields)
@@ -66,7 +65,7 @@ def _get_subsets(
fields=None
):
# Convert fields and add minimum required fields
- con = get_server_api_connection()
+ con = get_ayon_server_api_connection()
fields = subset_fields_v3_to_v4(fields, con)
if fields is not None:
for key in (
@@ -75,14 +74,14 @@ def _get_subsets(
):
fields.add(key)
- active = None
+ active = True
if archived:
- active = False
+ active = None
for subset in con.get_products(
project_name,
- subset_ids,
- subset_names,
+ product_ids=subset_ids,
+ product_names=subset_names,
folder_ids=folder_ids,
names_by_folder_ids=names_by_folder_ids,
active=active,
@@ -102,7 +101,7 @@ def _get_versions(
active=None,
fields=None
):
- con = get_server_api_connection()
+ con = get_ayon_server_api_connection()
fields = version_fields_v3_to_v4(fields, con)
@@ -114,23 +113,23 @@ def _get_versions(
queried_versions = con.get_versions(
project_name,
- version_ids,
- subset_ids,
- versions,
- hero,
- standard,
- latest,
+ version_ids=version_ids,
+ product_ids=subset_ids,
+ versions=versions,
+ hero=hero,
+ standard=standard,
+ latest=latest,
active=active,
fields=fields
)
- versions = []
+ version_entities = []
hero_versions = []
for version in queried_versions:
if version["version"] < 0:
hero_versions.append(version)
else:
- versions.append(convert_v4_version_to_v3(version))
+ version_entities.append(convert_v4_version_to_v3(version))
if hero_versions:
subset_ids = set()
@@ -160,9 +159,9 @@ def _get_versions(
break
conv_hero = convert_v4_version_to_v3(hero_version)
conv_hero["version_id"] = version_id
- versions.append(conv_hero)
+ version_entities.append(conv_hero)
- return versions
+ return version_entities
def get_asset_by_id(project_name, asset_id, fields=None):
@@ -183,6 +182,19 @@ def get_asset_by_name(project_name, asset_name, fields=None):
return None
+def _folders_query(project_name, con, fields, **kwargs):
+ if fields is None or "tasks" in fields:
+ folders = get_folders_with_tasks(
+ con, project_name, fields=fields, **kwargs
+ )
+
+ else:
+ folders = con.get_folders(project_name, fields=fields, **kwargs)
+
+ for folder in folders:
+ yield folder
+
+
def get_assets(
project_name,
asset_ids=None,
@@ -196,26 +208,45 @@ def get_assets(
active = True
if archived:
- active = False
+ active = None
- con = get_server_api_connection()
+ con = get_ayon_server_api_connection()
fields = folder_fields_v3_to_v4(fields, con)
kwargs = dict(
folder_ids=asset_ids,
- folder_names=asset_names,
parent_ids=parent_ids,
active=active,
- fields=fields
)
+ if not asset_names:
+ for folder in _folders_query(project_name, con, fields, **kwargs):
+ yield convert_v4_folder_to_v3(folder, project_name)
+ return
- if fields is None or "tasks" in fields:
- folders = get_folders_with_tasks(con, project_name, **kwargs)
+ new_asset_names = set()
+ folder_paths = set()
+ for name in asset_names:
+ if "/" in name:
+ folder_paths.add(name)
+ else:
+ new_asset_names.add(name)
- else:
- folders = con.get_folders(project_name, **kwargs)
+ yielded_ids = set()
+ if folder_paths:
+ for folder in _folders_query(
+ project_name, con, fields, folder_paths=folder_paths, **kwargs
+ ):
+ yielded_ids.add(folder["id"])
+ yield convert_v4_folder_to_v3(folder, project_name)
- for folder in folders:
- yield convert_v4_folder_to_v3(folder, project_name)
+ if not new_asset_names:
+ return
+
+ for folder in _folders_query(
+ project_name, con, fields, folder_names=new_asset_names, **kwargs
+ ):
+ if folder["id"] not in yielded_ids:
+ yielded_ids.add(folder["id"])
+ yield convert_v4_folder_to_v3(folder, project_name)
def get_archived_assets(
@@ -236,7 +267,7 @@ def get_archived_assets(
def get_asset_ids_with_subsets(project_name, asset_ids=None):
- con = get_server_api_connection()
+ con = get_ayon_server_api_connection()
return con.get_folder_ids_with_products(project_name, asset_ids)
@@ -282,7 +313,7 @@ def get_subsets(
def get_subset_families(project_name, subset_ids=None):
- con = get_server_api_connection()
+ con = get_ayon_server_api_connection()
return con.get_product_type_names(project_name, subset_ids)
@@ -430,7 +461,7 @@ def get_output_link_versions(project_name, version_id, fields=None):
if not version_id:
return []
- con = get_server_api_connection()
+ con = get_ayon_server_api_connection()
version_links = con.get_version_links(
project_name, version_id, link_direction="out")
@@ -446,7 +477,7 @@ def get_output_link_versions(project_name, version_id, fields=None):
def version_is_latest(project_name, version_id):
- con = get_server_api_connection()
+ con = get_ayon_server_api_connection()
return con.version_is_latest(project_name, version_id)
@@ -501,18 +532,18 @@ def get_representations(
else:
active = None
- con = get_server_api_connection()
+ con = get_ayon_server_api_connection()
fields = representation_fields_v3_to_v4(fields, con)
if fields and active is not None:
fields.add("active")
representations = con.get_representations(
project_name,
- representation_ids,
- representation_names,
- version_ids,
- names_by_version_ids,
- active,
+ representation_ids=representation_ids,
+ representation_names=representation_names,
+ version_ids=version_ids,
+ names_by_version_ids=names_by_version_ids,
+ active=active,
fields=fields
)
for representation in representations:
@@ -535,7 +566,7 @@ def get_representations_parents(project_name, representations):
repre["_id"]
for repre in representations
}
- con = get_server_api_connection()
+ con = get_ayon_server_api_connection()
parents_by_repre_id = con.get_representations_parents(project_name,
repre_ids)
folder_ids = set()
@@ -677,7 +708,7 @@ def get_workfile_info(
if not asset_id or not task_name or not filename:
return None
- con = get_server_api_connection()
+ con = get_ayon_server_api_connection()
task = con.get_task_by_name(
project_name, asset_id, task_name, fields=["id", "name", "folderId"]
)
diff --git a/openpype/client/server/entity_links.py b/openpype/client/server/entity_links.py
index d8395aabe7..368dcdcb9d 100644
--- a/openpype/client/server/entity_links.py
+++ b/openpype/client/server/entity_links.py
@@ -1,6 +1,4 @@
-import ayon_api
-from ayon_api import get_folder_links, get_versions_links
-
+from .utils import get_ayon_server_api_connection
from .entities import get_assets, get_representation_by_id
@@ -28,7 +26,8 @@ def get_linked_asset_ids(project_name, asset_doc=None, asset_id=None):
if not asset_id:
asset_id = asset_doc["_id"]
- links = get_folder_links(project_name, asset_id, link_direction="in")
+ con = get_ayon_server_api_connection()
+ links = con.get_folder_links(project_name, asset_id, link_direction="in")
return [
link["entityId"]
for link in links
@@ -115,6 +114,7 @@ def get_linked_representation_id(
if link_type:
link_types = [link_type]
+ con = get_ayon_server_api_connection()
# Store already found version ids to avoid recursion, and also to store
# output -> Don't forget to remove 'version_id' at the end!!!
linked_version_ids = {version_id}
@@ -124,7 +124,7 @@ def get_linked_representation_id(
if not versions_to_check:
break
- links = get_versions_links(
+ links = con.get_versions_links(
project_name,
versions_to_check,
link_types=link_types,
@@ -145,8 +145,8 @@ def get_linked_representation_id(
linked_version_ids.remove(version_id)
if not linked_version_ids:
return []
-
- representations = ayon_api.get_representations(
+ con = get_ayon_server_api_connection()
+ representations = con.get_representations(
project_name,
version_ids=linked_version_ids,
fields=["id"])
diff --git a/openpype/client/server/openpype_comp.py b/openpype/client/server/openpype_comp.py
index a123fe3167..71a141e913 100644
--- a/openpype/client/server/openpype_comp.py
+++ b/openpype/client/server/openpype_comp.py
@@ -1,4 +1,7 @@
import collections
+import json
+
+import six
from ayon_api.graphql import GraphQlQuery, FIELD_VALUE, fields_to_dict
from .constants import DEFAULT_FOLDER_FIELDS
@@ -84,12 +87,12 @@ def get_folders_with_tasks(
for folder. All possible folder fields are returned if 'None'
is passed.
- Returns:
- List[Dict[str, Any]]: Queried folder entities.
+ Yields:
+ Dict[str, Any]: Queried folder entities.
"""
if not project_name:
- return []
+ return
filters = {
"projectName": project_name
@@ -97,25 +100,25 @@ def get_folders_with_tasks(
if folder_ids is not None:
folder_ids = set(folder_ids)
if not folder_ids:
- return []
+ return
filters["folderIds"] = list(folder_ids)
if folder_paths is not None:
folder_paths = set(folder_paths)
if not folder_paths:
- return []
+ return
filters["folderPaths"] = list(folder_paths)
if folder_names is not None:
folder_names = set(folder_names)
if not folder_names:
- return []
+ return
filters["folderNames"] = list(folder_names)
if parent_ids is not None:
parent_ids = set(parent_ids)
if not parent_ids:
- return []
+ return
if None in parent_ids:
# Replace 'None' with '"root"' which is used during GraphQl
# query for parent ids filter for folders without folder
@@ -147,10 +150,10 @@ def get_folders_with_tasks(
parsed_data = query.query(con)
folders = parsed_data["project"]["folders"]
- if active is None:
- return folders
- return [
- folder
- for folder in folders
- if folder["active"] is active
- ]
+ for folder in folders:
+ if active is not None and folder["active"] is not active:
+ continue
+ folder_data = folder.get("data")
+ if isinstance(folder_data, six.string_types):
+ folder["data"] = json.loads(folder_data)
+ yield folder
diff --git a/openpype/client/server/operations.py b/openpype/client/server/operations.py
index eeb55784e1..eddc1eaf60 100644
--- a/openpype/client/server/operations.py
+++ b/openpype/client/server/operations.py
@@ -5,7 +5,6 @@ import uuid
import datetime
from bson.objectid import ObjectId
-from ayon_api import get_server_api_connection
from openpype.client.operations_base import (
REMOVED_VALUE,
@@ -41,7 +40,7 @@ from .conversion_utils import (
convert_update_representation_to_v4,
convert_update_workfile_info_to_v4,
)
-from .utils import create_entity_id
+from .utils import create_entity_id, get_ayon_server_api_connection
def _create_or_convert_to_id(entity_id=None):
@@ -422,7 +421,7 @@ def failed_json_default(value):
class ServerCreateOperation(CreateOperation):
- """Opeartion to create an entity.
+ """Operation to create an entity.
Args:
project_name (str): On which project operation will happen.
@@ -634,7 +633,7 @@ class ServerUpdateOperation(UpdateOperation):
class ServerDeleteOperation(DeleteOperation):
- """Opeartion to delete an entity.
+ """Operation to delete an entity.
Args:
project_name (str): On which project operation will happen.
@@ -647,7 +646,7 @@ class ServerDeleteOperation(DeleteOperation):
self._session = session
if entity_type == "asset":
- entity_type == "folder"
+ entity_type = "folder"
elif entity_type == "hero_version":
entity_type = "version"
@@ -680,7 +679,7 @@ class OperationsSession(BaseOperationsSession):
def __init__(self, con=None, *args, **kwargs):
super(OperationsSession, self).__init__(*args, **kwargs)
if con is None:
- con = get_server_api_connection()
+ con = get_ayon_server_api_connection()
self._con = con
self._project_cache = {}
self._nested_operations = collections.defaultdict(list)
@@ -858,7 +857,7 @@ def create_project(
"""
if con is None:
- con = get_server_api_connection()
+ con = get_ayon_server_api_connection()
return con.create_project(
project_name,
@@ -870,12 +869,12 @@ def create_project(
def delete_project(project_name, con=None):
if con is None:
- con = get_server_api_connection()
+ con = get_ayon_server_api_connection()
return con.delete_project(project_name)
def create_thumbnail(project_name, src_filepath, thumbnail_id=None, con=None):
if con is None:
- con = get_server_api_connection()
+ con = get_ayon_server_api_connection()
return con.create_thumbnail(project_name, src_filepath, thumbnail_id)
diff --git a/openpype/client/server/utils.py b/openpype/client/server/utils.py
index ed128cfad9..a9dcf539bd 100644
--- a/openpype/client/server/utils.py
+++ b/openpype/client/server/utils.py
@@ -1,8 +1,33 @@
+import os
import uuid
+import ayon_api
+
from openpype.client.operations_base import REMOVED_VALUE
+class _GlobalCache:
+ initialized = False
+
+
+def get_ayon_server_api_connection():
+ if _GlobalCache.initialized:
+ con = ayon_api.get_server_api_connection()
+ else:
+ from openpype.lib.local_settings import get_local_site_id
+
+ _GlobalCache.initialized = True
+ site_id = get_local_site_id()
+ version = os.getenv("AYON_VERSION")
+ if ayon_api.is_connection_created():
+ con = ayon_api.get_server_api_connection()
+ con.set_site_id(site_id)
+ con.set_client_version(version)
+ else:
+ con = ayon_api.create_connection(site_id, version)
+ return con
+
+
def create_entity_id():
return uuid.uuid1().hex
diff --git a/openpype/hooks/pre_add_last_workfile_arg.py b/openpype/hooks/pre_add_last_workfile_arg.py
index 1418bc210b..6e255ae82a 100644
--- a/openpype/hooks/pre_add_last_workfile_arg.py
+++ b/openpype/hooks/pre_add_last_workfile_arg.py
@@ -27,6 +27,7 @@ class AddLastWorkfileToLaunchArgs(PreLaunchHook):
"tvpaint",
"substancepainter",
"aftereffects",
+ "wrap"
}
launch_types = {LaunchTypes.local}
diff --git a/openpype/hooks/pre_copy_template_workfile.py b/openpype/hooks/pre_copy_template_workfile.py
index 2203ff4396..4d91d83c95 100644
--- a/openpype/hooks/pre_copy_template_workfile.py
+++ b/openpype/hooks/pre_copy_template_workfile.py
@@ -19,7 +19,8 @@ class CopyTemplateWorkfile(PreLaunchHook):
# Before `AddLastWorkfileToLaunchArgs`
order = 0
- app_groups = {"blender", "photoshop", "tvpaint", "aftereffects"}
+ app_groups = {"blender", "photoshop", "tvpaint", "aftereffects",
+ "wrap"}
launch_types = {LaunchTypes.local}
def execute(self):
diff --git a/openpype/hooks/pre_foundry_apps.py b/openpype/hooks/pre_new_console_apps.py
similarity index 82%
rename from openpype/hooks/pre_foundry_apps.py
rename to openpype/hooks/pre_new_console_apps.py
index 7536df4c16..9727b4fb78 100644
--- a/openpype/hooks/pre_foundry_apps.py
+++ b/openpype/hooks/pre_new_console_apps.py
@@ -2,7 +2,7 @@ import subprocess
from openpype.lib.applications import PreLaunchHook, LaunchTypes
-class LaunchFoundryAppsWindows(PreLaunchHook):
+class LaunchNewConsoleApps(PreLaunchHook):
"""Foundry applications have specific way how to launch them.
Nuke is executed "like" python process so it is required to pass
@@ -13,13 +13,15 @@ class LaunchFoundryAppsWindows(PreLaunchHook):
# Should be as last hook because must change launch arguments to string
order = 1000
- app_groups = {"nuke", "nukeassist", "nukex", "hiero", "nukestudio"}
+ app_groups = {
+ "nuke", "nukeassist", "nukex", "hiero", "nukestudio", "mayapy"
+ }
platforms = {"windows"}
launch_types = {LaunchTypes.local}
def execute(self):
# Change `creationflags` to CREATE_NEW_CONSOLE
- # - on Windows nuke will create new window using its console
+ # - on Windows some apps will create new window using its console
# Set `stdout` and `stderr` to None so new created console does not
# have redirected output to DEVNULL in build
self.launch_context.kwargs.update({
diff --git a/openpype/host/host.py b/openpype/host/host.py
index 630fb873a8..afe06d1f55 100644
--- a/openpype/host/host.py
+++ b/openpype/host/host.py
@@ -170,7 +170,7 @@ class HostBase(object):
if project_name:
items.append(project_name)
if asset_name:
- items.append(asset_name)
+ items.append(asset_name.lstrip("/"))
if task_name:
items.append(task_name)
if items:
diff --git a/openpype/hosts/aftereffects/api/README.md b/openpype/hosts/aftereffects/api/README.md
index 790c9f859a..9c4bad3689 100644
--- a/openpype/hosts/aftereffects/api/README.md
+++ b/openpype/hosts/aftereffects/api/README.md
@@ -1,6 +1,6 @@
# AfterEffects Integration
-Requirements: This extension requires use of Javascript engine, which is
+Requirements: This extension requires use of Javascript engine, which is
available since CC 16.0.
Please check your File>Project Settings>Expressions>Expressions Engine
@@ -13,26 +13,28 @@ The After Effects integration requires two components to work; `extension` and `
To install the extension download [Extension Manager Command Line tool (ExManCmd)](https://github.com/Adobe-CEP/Getting-Started-guides/tree/master/Package%20Distribute%20Install#option-2---exmancmd).
```
-ExManCmd /install {path to avalon-core}\avalon\photoshop\extension.zxp
+ExManCmd /install {path to addon}/api/extension.zxp
```
OR
download [Anastasiy’s Extension Manager](https://install.anastasiy.com/)
+`{path to addon}` will be most likely in your AppData (on Windows, in your user data folder in Linux and MacOS.)
+
### Server
The easiest way to get the server and After Effects launch is with:
```
-python -c ^"import avalon.photoshop;avalon.aftereffects.launch(""c:\Program Files\Adobe\Adobe After Effects 2020\Support Files\AfterFX.exe"")^"
+python -c ^"import openpype.hosts.photoshop;openpype.hosts..aftereffects.launch(""c:\Program Files\Adobe\Adobe After Effects 2020\Support Files\AfterFX.exe"")^"
```
`avalon.aftereffects.launch` launches the application and server, and also closes the server when After Effects exists.
## Usage
-The After Effects extension can be found under `Window > Extensions > OpenPype`. Once launched you should be presented with a panel like this:
+The After Effects extension can be found under `Window > Extensions > AYON`. Once launched you should be presented with a panel like this:
-
+
## Developing
@@ -43,8 +45,8 @@ When developing the extension you can load it [unsigned](https://github.com/Adob
When signing the extension you can use this [guide](https://github.com/Adobe-CEP/Getting-Started-guides/tree/master/Package%20Distribute%20Install#package-distribute-install-guide).
```
-ZXPSignCmd -selfSignedCert NA NA Avalon Avalon-After-Effects avalon extension.p12
-ZXPSignCmd -sign {path to avalon-core}\avalon\aftereffects\extension {path to avalon-core}\avalon\aftereffects\extension.zxp extension.p12 avalon
+ZXPSignCmd -selfSignedCert NA NA Ayon Avalon-After-Effects Ayon extension.p12
+ZXPSignCmd -sign {path to addon}/api/extension {path to addon}/api/extension.zxp extension.p12 Ayon
```
### Plugin Examples
@@ -52,14 +54,14 @@ ZXPSignCmd -sign {path to avalon-core}\avalon\aftereffects\extension {path to av
These plugins were made with the [polly config](https://github.com/mindbender-studio/config). To fully integrate and load, you will have to use this config and add `image` to the [integration plugin](https://github.com/mindbender-studio/config/blob/master/polly/plugins/publish/integrate_asset.py).
Expected deployed extension location on default Windows:
-`c:\Program Files (x86)\Common Files\Adobe\CEP\extensions\com.openpype.AE.panel`
+`c:\Program Files (x86)\Common Files\Adobe\CEP\extensions\io.ynput.AE.panel`
For easier debugging of Javascript:
https://community.adobe.com/t5/download-install/adobe-extension-debuger-problem/td-p/10911704?page=1
Add (optional) --enable-blink-features=ShadowDOMV0,CustomElementsV0 when starting Chrome
then localhost:8092
-Or use Visual Studio Code https://medium.com/adobetech/extendscript-debugger-for-visual-studio-code-public-release-a2ff6161fa01
+Or use Visual Studio Code https://medium.com/adobetech/extendscript-debugger-for-visual-studio-code-public-release-a2ff6161fa01
## Resources
- https://javascript-tools-guide.readthedocs.io/introduction/index.html
- https://github.com/Adobe-CEP/Getting-Started-guides
diff --git a/openpype/hosts/aftereffects/api/extension.zxp b/openpype/hosts/aftereffects/api/extension.zxp
index 933dc7dc6c..104a5c9e99 100644
Binary files a/openpype/hosts/aftereffects/api/extension.zxp and b/openpype/hosts/aftereffects/api/extension.zxp differ
diff --git a/openpype/hosts/aftereffects/api/extension/.debug b/openpype/hosts/aftereffects/api/extension/.debug
index b06ec515dd..20a6713ab2 100644
--- a/openpype/hosts/aftereffects/api/extension/.debug
+++ b/openpype/hosts/aftereffects/api/extension/.debug
@@ -1,32 +1,31 @@
-
+
-
+
-
+
-
+
-
+
-
+
-
+
-
+
-
+
-
+
-
\ No newline at end of file
diff --git a/openpype/hosts/aftereffects/api/extension/CSXS/manifest.xml b/openpype/hosts/aftereffects/api/extension/CSXS/manifest.xml
index 7329a9e723..cf6ba67f44 100644
--- a/openpype/hosts/aftereffects/api/extension/CSXS/manifest.xml
+++ b/openpype/hosts/aftereffects/api/extension/CSXS/manifest.xml
@@ -1,8 +1,8 @@
-
+
-
+
@@ -38,7 +38,7 @@
-
+
./index.html
@@ -49,7 +49,7 @@
Panel
-
+
200
@@ -66,7 +66,7 @@
- ./icons/iconNormal.png
+ ./icons/ayon_logo.png
./icons/iconRollover.png
./icons/iconDisabled.png
./icons/iconDarkNormal.png
diff --git a/openpype/hosts/aftereffects/api/extension/icons/ayon_logo.png b/openpype/hosts/aftereffects/api/extension/icons/ayon_logo.png
new file mode 100644
index 0000000000..3a96f8e2b4
Binary files /dev/null and b/openpype/hosts/aftereffects/api/extension/icons/ayon_logo.png differ
diff --git a/openpype/hosts/aftereffects/api/panel.png b/openpype/hosts/aftereffects/api/panel.png
new file mode 100644
index 0000000000..d05ed35428
Binary files /dev/null and b/openpype/hosts/aftereffects/api/panel.png differ
diff --git a/openpype/hosts/aftereffects/api/panel_failure.PNG b/openpype/hosts/aftereffects/api/panel_failure.PNG
deleted file mode 100644
index 67afc4e212..0000000000
Binary files a/openpype/hosts/aftereffects/api/panel_failure.PNG and /dev/null differ
diff --git a/openpype/hosts/aftereffects/api/panel_failure.png b/openpype/hosts/aftereffects/api/panel_failure.png
new file mode 100644
index 0000000000..6e52a77d22
Binary files /dev/null and b/openpype/hosts/aftereffects/api/panel_failure.png differ
diff --git a/openpype/hosts/aftereffects/api/pipeline.py b/openpype/hosts/aftereffects/api/pipeline.py
index 8fc7a70dd8..e059f7c272 100644
--- a/openpype/hosts/aftereffects/api/pipeline.py
+++ b/openpype/hosts/aftereffects/api/pipeline.py
@@ -74,11 +74,6 @@ class AfterEffectsHost(HostBase, IWorkfileHost, ILoadHost, IPublishHost):
register_loader_plugin_path(LOAD_PATH)
register_creator_plugin_path(CREATE_PATH)
- log.info(PUBLISH_PATH)
-
- pyblish.api.register_callback(
- "instanceToggled", on_pyblish_instance_toggled
- )
register_event_callback("application.launched", application_launch)
@@ -186,11 +181,6 @@ def application_launch():
check_inventory()
-def on_pyblish_instance_toggled(instance, old_value, new_value):
- """Toggle layer visibility on instance toggles."""
- instance[0].Visible = new_value
-
-
def ls():
"""Yields containers from active AfterEffects document.
diff --git a/openpype/hosts/aftereffects/plugins/create/create_render.py b/openpype/hosts/aftereffects/plugins/create/create_render.py
index fbe600ae68..fadfc0c206 100644
--- a/openpype/hosts/aftereffects/plugins/create/create_render.py
+++ b/openpype/hosts/aftereffects/plugins/create/create_render.py
@@ -56,16 +56,15 @@ class RenderCreator(Creator):
use_composition_name = (pre_create_data.get("use_composition_name") or
len(comps) > 1)
for comp in comps:
+ composition_name = re.sub(
+ "[^{}]+".format(SUBSET_NAME_ALLOWED_SYMBOLS),
+ "",
+ comp.name
+ )
if use_composition_name:
if "{composition}" not in subset_name_from_ui.lower():
subset_name_from_ui += "{Composition}"
- composition_name = re.sub(
- "[^{}]+".format(SUBSET_NAME_ALLOWED_SYMBOLS),
- "",
- comp.name
- )
-
dynamic_fill = prepare_template_data({"composition":
composition_name})
subset_name = subset_name_from_ui.format(**dynamic_fill)
@@ -81,6 +80,8 @@ class RenderCreator(Creator):
inst.subset_name))
data["members"] = [comp.id]
+ data["orig_comp_name"] = composition_name
+
new_instance = CreatedInstance(self.family, subset_name, data,
self)
if "farm" in pre_create_data:
@@ -88,7 +89,7 @@ class RenderCreator(Creator):
new_instance.creator_attributes["farm"] = use_farm
review = pre_create_data["mark_for_review"]
- new_instance.creator_attributes["mark_for_review"] = review
+ new_instance. creator_attributes["mark_for_review"] = review
api.get_stub().imprint(new_instance.id,
new_instance.data_to_store())
@@ -150,16 +151,18 @@ class RenderCreator(Creator):
subset_change.new_value)
def remove_instances(self, instances):
+ """Removes metadata and renames to original comp name if available."""
for instance in instances:
self._remove_instance_from_context(instance)
self.host.remove_instance(instance)
- subset = instance.data["subset"]
comp_id = instance.data["members"][0]
comp = api.get_stub().get_item(comp_id)
+ orig_comp_name = instance.data.get("orig_comp_name")
if comp:
- new_comp_name = comp.name.replace(subset, '')
- if not new_comp_name:
+ if orig_comp_name:
+ new_comp_name = orig_comp_name
+ else:
new_comp_name = "dummyCompName"
api.get_stub().rename_item(comp_id,
new_comp_name)
diff --git a/openpype/hosts/aftereffects/plugins/create/workfile_creator.py b/openpype/hosts/aftereffects/plugins/create/workfile_creator.py
index 2e7b9d4a7e..5dc3d6592d 100644
--- a/openpype/hosts/aftereffects/plugins/create/workfile_creator.py
+++ b/openpype/hosts/aftereffects/plugins/create/workfile_creator.py
@@ -1,3 +1,4 @@
+from openpype import AYON_SERVER_ENABLED
import openpype.hosts.aftereffects.api as api
from openpype.client import get_asset_by_name
from openpype.pipeline import (
@@ -43,6 +44,14 @@ class AEWorkfileCreator(AutoCreator):
task_name = context.get_current_task_name()
host_name = context.host_name
+ existing_asset_name = None
+ if existing_instance is not None:
+ if AYON_SERVER_ENABLED:
+ existing_asset_name = existing_instance.get("folderPath")
+
+ if existing_asset_name is None:
+ existing_asset_name = existing_instance["asset"]
+
if existing_instance is None:
asset_doc = get_asset_by_name(project_name, asset_name)
subset_name = self.get_subset_name(
@@ -50,10 +59,13 @@ class AEWorkfileCreator(AutoCreator):
project_name, host_name
)
data = {
- "asset": asset_name,
"task": task_name,
"variant": self.default_variant
}
+ if AYON_SERVER_ENABLED:
+ data["folderPath"] = asset_name
+ else:
+ data["asset"] = asset_name
data.update(self.get_dynamic_data(
self.default_variant, task_name, asset_doc,
project_name, host_name, None
@@ -68,7 +80,7 @@ class AEWorkfileCreator(AutoCreator):
new_instance.data_to_store())
elif (
- existing_instance["asset"] != asset_name
+ existing_asset_name != asset_name
or existing_instance["task"] != task_name
):
asset_doc = get_asset_by_name(project_name, asset_name)
@@ -76,6 +88,10 @@ class AEWorkfileCreator(AutoCreator):
self.default_variant, task_name, asset_doc,
project_name, host_name
)
- existing_instance["asset"] = asset_name
+ if AYON_SERVER_ENABLED:
+ existing_instance["folderPath"] = asset_name
+ else:
+ existing_instance["asset"] = asset_name
+
existing_instance["task"] = task_name
existing_instance["subset"] = subset_name
diff --git a/openpype/hosts/aftereffects/plugins/publish/collect_workfile.py b/openpype/hosts/aftereffects/plugins/publish/collect_workfile.py
index dc557f67fc..58d2757840 100644
--- a/openpype/hosts/aftereffects/plugins/publish/collect_workfile.py
+++ b/openpype/hosts/aftereffects/plugins/publish/collect_workfile.py
@@ -1,6 +1,8 @@
import os
import pyblish.api
+
+from openpype.client import get_asset_name_identifier
from openpype.pipeline.create import get_subset_name
@@ -48,9 +50,11 @@ class CollectWorkfile(pyblish.api.ContextPlugin):
asset_entity = context.data["assetEntity"]
project_entity = context.data["projectEntity"]
+ asset_name = get_asset_name_identifier(asset_entity)
+
instance_data = {
"active": True,
- "asset": asset_entity["name"],
+ "asset": asset_name,
"task": task,
"frameStart": context.data['frameStart'],
"frameEnd": context.data['frameEnd'],
diff --git a/openpype/hosts/aftereffects/plugins/publish/extract_local_render.py b/openpype/hosts/aftereffects/plugins/publish/extract_local_render.py
index bdb48e11f8..b44e986d83 100644
--- a/openpype/hosts/aftereffects/plugins/publish/extract_local_render.py
+++ b/openpype/hosts/aftereffects/plugins/publish/extract_local_render.py
@@ -60,8 +60,9 @@ class ExtractLocalRender(publish.Extractor):
first_repre = not representations
if instance.data["review"] and first_repre:
repre_data["tags"] = ["review"]
- thumbnail_path = os.path.join(staging_dir, files[0])
- instance.data["thumbnailSource"] = thumbnail_path
+ # TODO return back when Extract from source same as regular
+ # thumbnail_path = os.path.join(staging_dir, files[0])
+ # instance.data["thumbnailSource"] = thumbnail_path
representations.append(repre_data)
diff --git a/openpype/hosts/blender/api/__init__.py b/openpype/hosts/blender/api/__init__.py
index e15f1193a5..ce2b444997 100644
--- a/openpype/hosts/blender/api/__init__.py
+++ b/openpype/hosts/blender/api/__init__.py
@@ -10,6 +10,7 @@ from .pipeline import (
ls,
publish,
containerise,
+ BlenderHost,
)
from .plugin import (
@@ -47,6 +48,7 @@ __all__ = [
"ls",
"publish",
"containerise",
+ "BlenderHost",
"Creator",
"Loader",
diff --git a/openpype/hosts/blender/api/capture.py b/openpype/hosts/blender/api/capture.py
index 849f8ee629..bad6831143 100644
--- a/openpype/hosts/blender/api/capture.py
+++ b/openpype/hosts/blender/api/capture.py
@@ -148,13 +148,14 @@ def applied_view(window, camera, isolate=None, options=None):
area.ui_type = "VIEW_3D"
- meshes = [obj for obj in window.scene.objects if obj.type == "MESH"]
+ types = {"MESH", "GPENCIL"}
+ objects = [obj for obj in window.scene.objects if obj.type in types]
if camera == "AUTO":
space.region_3d.view_perspective = "ORTHO"
- isolate_objects(window, isolate or meshes)
+ isolate_objects(window, isolate or objects)
else:
- isolate_objects(window, isolate or meshes)
+ isolate_objects(window, isolate or objects)
space.camera = window.scene.objects.get(camera)
space.region_3d.view_perspective = "CAMERA"
diff --git a/openpype/hosts/blender/api/lib.py b/openpype/hosts/blender/api/lib.py
index 9bb560c364..e80ed61bc8 100644
--- a/openpype/hosts/blender/api/lib.py
+++ b/openpype/hosts/blender/api/lib.py
@@ -188,7 +188,7 @@ def imprint(node: bpy.types.bpy_struct_meta_idprop, data: Dict):
# Support values evaluated at imprint
value = value()
- if not isinstance(value, (int, float, bool, str, list)):
+ if not isinstance(value, (int, float, bool, str, list, dict)):
raise TypeError(f"Unsupported type: {type(value)}")
imprint_data[key] = value
@@ -266,9 +266,59 @@ def read(node: bpy.types.bpy_struct_meta_idprop):
return data
-def get_selection() -> List[bpy.types.Object]:
- """Return the selected objects from the current scene."""
- return [obj for obj in bpy.context.scene.objects if obj.select_get()]
+def get_selected_collections():
+ """
+ Returns a list of the currently selected collections in the outliner.
+
+ Raises:
+ RuntimeError: If the outliner cannot be found in the main Blender
+ window.
+
+ Returns:
+ list: A list of `bpy.types.Collection` objects that are currently
+ selected in the outliner.
+ """
+ window = bpy.context.window or bpy.context.window_manager.windows[0]
+
+ try:
+ area = next(
+ area for area in window.screen.areas
+ if area.type == 'OUTLINER')
+ region = next(
+ region for region in area.regions
+ if region.type == 'WINDOW')
+ except StopIteration as e:
+ raise RuntimeError("Could not find outliner. An outliner space "
+ "must be in the main Blender window.") from e
+
+ with bpy.context.temp_override(
+ window=window,
+ area=area,
+ region=region,
+ screen=window.screen
+ ):
+ ids = bpy.context.selected_ids
+
+ return [id for id in ids if isinstance(id, bpy.types.Collection)]
+
+
+def get_selection(include_collections: bool = False) -> List[bpy.types.Object]:
+ """
+ Returns a list of selected objects in the current Blender scene.
+
+ Args:
+ include_collections (bool, optional): Whether to include selected
+ collections in the result. Defaults to False.
+
+ Returns:
+ List[bpy.types.Object]: A list of selected objects.
+ """
+ selection = [obj for obj in bpy.context.scene.objects if obj.select_get()]
+
+ if include_collections:
+ selection.extend(get_selected_collections())
+
+ return selection
@contextlib.contextmanager
diff --git a/openpype/hosts/blender/api/ops.py b/openpype/hosts/blender/api/ops.py
index 0eb90eeff9..f4d96e563a 100644
--- a/openpype/hosts/blender/api/ops.py
+++ b/openpype/hosts/blender/api/ops.py
@@ -31,6 +31,14 @@ PREVIEW_COLLECTIONS: Dict = dict()
TIMER_INTERVAL: float = 0.01 if platform.system() == "Windows" else 0.1
+def execute_function_in_main_thread(f):
+ """Decorator to move a function call into main thread items"""
+ def wrapper(*args, **kwargs):
+ mti = MainThreadItem(f, *args, **kwargs)
+ execute_in_main_thread(mti)
+ return wrapper
+
+
class BlenderApplication(QtWidgets.QApplication):
_instance = None
blender_windows = {}
@@ -238,8 +246,24 @@ class LaunchQtApp(bpy.types.Operator):
self.before_window_show()
+ def pull_to_front(window):
+ """Pull window forward to screen.
+
+ If Window is minimized this will un-minimize, then it can be raised
+ and activated to the front.
+ """
+ window.setWindowState(
+ (window.windowState() & ~QtCore.Qt.WindowMinimized) |
+ QtCore.Qt.WindowActive
+ )
+ window.raise_()
+ window.activateWindow()
+
if isinstance(self._window, ModuleType):
self._window.show()
+ pull_to_front(self._window)
+
+ # Pull window to the front
window = None
if hasattr(self._window, "window"):
window = self._window.window
@@ -254,6 +278,7 @@ class LaunchQtApp(bpy.types.Operator):
on_top_flags = origin_flags | QtCore.Qt.WindowStaysOnTopHint
self._window.setWindowFlags(on_top_flags)
self._window.show()
+ pull_to_front(self._window)
# if on_top_flags != origin_flags:
# self._window.setWindowFlags(origin_flags)
@@ -275,6 +300,10 @@ class LaunchCreator(LaunchQtApp):
def before_window_show(self):
self._window.refresh()
+ def execute(self, context):
+ host_tools.show_publisher(tab="create")
+ return {"FINISHED"}
+
class LaunchLoader(LaunchQtApp):
"""Launch Avalon Loader."""
@@ -284,6 +313,8 @@ class LaunchLoader(LaunchQtApp):
_tool_name = "loader"
def before_window_show(self):
+ if AYON_SERVER_ENABLED:
+ return
self._window.set_context(
{"asset": get_current_asset_name()},
refresh=True
@@ -297,7 +328,7 @@ class LaunchPublisher(LaunchQtApp):
bl_label = "Publish..."
def execute(self, context):
- host_tools.show_publish()
+ host_tools.show_publisher(tab="publish")
return {"FINISHED"}
@@ -309,6 +340,8 @@ class LaunchManager(LaunchQtApp):
_tool_name = "sceneinventory"
def before_window_show(self):
+ if AYON_SERVER_ENABLED:
+ return
self._window.refresh()
@@ -320,6 +353,8 @@ class LaunchLibrary(LaunchQtApp):
_tool_name = "libraryloader"
def before_window_show(self):
+ if AYON_SERVER_ENABLED:
+ return
self._window.refresh()
@@ -340,6 +375,8 @@ class LaunchWorkFiles(LaunchQtApp):
return result
def before_window_show(self):
+ if AYON_SERVER_ENABLED:
+ return
self._window.root = str(Path(
os.environ.get("AVALON_WORKDIR", ""),
os.environ.get("AVALON_SCENEDIR", ""),
@@ -408,7 +445,6 @@ class TOPBAR_MT_avalon(bpy.types.Menu):
layout.operator(SetResolution.bl_idname, text="Set Resolution")
layout.separator()
layout.operator(LaunchWorkFiles.bl_idname, text="Work Files...")
- # TODO (jasper): maybe add 'Reload Pipeline'
def draw_avalon_menu(self, context):
diff --git a/openpype/hosts/blender/api/pipeline.py b/openpype/hosts/blender/api/pipeline.py
index 29339a512c..b386dd49d3 100644
--- a/openpype/hosts/blender/api/pipeline.py
+++ b/openpype/hosts/blender/api/pipeline.py
@@ -10,6 +10,12 @@ from . import ops
import pyblish.api
+from openpype.host import (
+ HostBase,
+ IWorkfileHost,
+ IPublishHost,
+ ILoadHost
+)
from openpype.client import get_asset_by_name
from openpype.pipeline import (
schema,
@@ -29,6 +35,14 @@ from openpype.lib import (
)
import openpype.hosts.blender
from openpype.settings import get_project_settings
+from .workio import (
+ open_file,
+ save_file,
+ current_file,
+ has_unsaved_changes,
+ file_extensions,
+ work_root,
+)
HOST_DIR = os.path.dirname(os.path.abspath(openpype.hosts.blender.__file__))
@@ -47,6 +61,101 @@ IS_HEADLESS = bpy.app.background
log = Logger.get_logger(__name__)
+class BlenderHost(HostBase, IWorkfileHost, IPublishHost, ILoadHost):
+ name = "blender"
+
+ def install(self):
+ """Override install method from HostBase.
+ Install Blender host functionality."""
+ install()
+
+ def get_containers(self) -> Iterator:
+ """List containers from active Blender scene."""
+ return ls()
+
+ def get_workfile_extensions(self) -> List[str]:
+ """Override get_workfile_extensions method from IWorkfileHost.
+ Get workfile possible extensions.
+
+ Returns:
+ List[str]: Workfile extensions.
+ """
+ return file_extensions()
+
+ def save_workfile(self, dst_path: str = None):
+ """Override save_workfile method from IWorkfileHost.
+ Save currently opened workfile.
+
+ Args:
+ dst_path (str): Where the current scene should be saved. Or use
+ current path if `None` is passed.
+ """
+ save_file(dst_path if dst_path else bpy.data.filepath)
+
+ def open_workfile(self, filepath: str):
+ """Override open_workfile method from IWorkfileHost.
+ Open workfile at specified filepath in the host.
+
+ Args:
+ filepath (str): Path to workfile.
+ """
+ open_file(filepath)
+
+ def get_current_workfile(self) -> str:
+ """Override get_current_workfile method from IWorkfileHost.
+ Retrieve currently opened workfile path.
+
+ Returns:
+ str: Path to currently opened workfile.
+ """
+ return current_file()
+
+ def workfile_has_unsaved_changes(self) -> bool:
+ """Override wokfile_has_unsaved_changes method from IWorkfileHost.
+ Returns True if opened workfile has no unsaved changes.
+
+ Returns:
+ bool: True if scene is saved and False if it has unsaved
+ modifications.
+ """
+ return has_unsaved_changes()
+
+ def work_root(self, session) -> str:
+ """Override work_root method from IWorkfileHost.
+ Modify workdir per host.
+
+ Args:
+ session (dict): Session context data.
+
+ Returns:
+ str: Path to new workdir.
+ """
+ return work_root(session)
+
+ def get_context_data(self) -> dict:
+ """Override abstract method from IPublishHost.
+ Get global data related to creation-publishing from workfile.
+
+ Returns:
+ dict: Context data stored using 'update_context_data'.
+ """
+ property = bpy.context.scene.get(AVALON_PROPERTY)
+ if property:
+ return property.to_dict()
+ return {}
+
+ def update_context_data(self, data: dict, changes: dict):
+ """Override abstract method from IPublishHost.
+ Store global context data to workfile.
+
+ Args:
+ data (dict): New data as are.
+ changes (dict): Only data that has been changed. Each value has
+ tuple with '(, )' value.
+ """
+ bpy.context.scene[AVALON_PROPERTY] = data
+
+
def pype_excepthook_handler(*args):
traceback.print_exception(*args)
@@ -460,36 +569,6 @@ def ls() -> Iterator:
yield parse_container(container)
-def update_hierarchy(containers):
- """Hierarchical container support
-
- This is the function to support Scene Inventory to draw hierarchical
- view for containers.
-
- We need both parent and children to visualize the graph.
-
- """
-
- all_containers = set(ls()) # lookup set
-
- for container in containers:
- # Find parent
- # FIXME (jasperge): re-evaluate this. How would it be possible
- # to 'nest' assets? Collections can have several parents, for
- # now assume it has only 1 parent
- parent = [
- coll for coll in bpy.data.collections if container in coll.children
- ]
- for node in parent:
- if node in all_containers:
- container["parent"] = node
- break
-
- log.debug("Container: %s", container)
-
- yield container
-
-
def publish():
"""Shorthand to publish from within host."""
diff --git a/openpype/hosts/blender/api/plugin.py b/openpype/hosts/blender/api/plugin.py
index fb87d08cce..568d8f6695 100644
--- a/openpype/hosts/blender/api/plugin.py
+++ b/openpype/hosts/blender/api/plugin.py
@@ -1,28 +1,34 @@
"""Shared functionality for pipeline plugins for Blender."""
+import itertools
from pathlib import Path
from typing import Dict, List, Optional
import bpy
+from openpype import AYON_SERVER_ENABLED
from openpype.pipeline import (
- LegacyCreator,
+ Creator,
+ CreatedInstance,
LoaderPlugin,
)
-from .pipeline import AVALON_CONTAINERS
+from openpype.lib import BoolDef
+
+from .pipeline import (
+ AVALON_CONTAINERS,
+ AVALON_INSTANCES,
+ AVALON_PROPERTY,
+)
from .ops import (
MainThreadItem,
execute_in_main_thread
)
-from .lib import (
- imprint,
- get_selection
-)
+from .lib import imprint
VALID_EXTENSIONS = [".blend", ".json", ".abc", ".fbx"]
-def asset_name(
+def prepare_scene_name(
asset: str, subset: str, namespace: Optional[str] = None
) -> str:
"""Return a consistent name for an asset."""
@@ -40,9 +46,16 @@ def get_unique_number(
avalon_container = bpy.data.collections.get(AVALON_CONTAINERS)
if not avalon_container:
return "01"
- asset_groups = avalon_container.all_objects
-
- container_names = [c.name for c in asset_groups if c.type == 'EMPTY']
+ # Check the names of both object and collection containers
+ obj_asset_groups = avalon_container.objects
+ obj_group_names = {
+ c.name for c in obj_asset_groups
+ if c.type == 'EMPTY' and c.get(AVALON_PROPERTY)}
+ coll_asset_groups = avalon_container.children
+ coll_group_names = {
+ c.name for c in coll_asset_groups
+ if c.get(AVALON_PROPERTY)}
+ container_names = obj_group_names.union(coll_group_names)
count = 1
name = f"{asset}_{count:0>2}_{subset}"
while name in container_names:
@@ -134,20 +147,224 @@ def deselect_all():
bpy.context.view_layer.objects.active = active
-class Creator(LegacyCreator):
- """Base class for Creator plug-ins."""
+class BaseCreator(Creator):
+ """Base class for Blender Creator plug-ins."""
defaults = ['Main']
- def process(self):
- collection = bpy.data.collections.new(name=self.data["subset"])
- bpy.context.scene.collection.children.link(collection)
- imprint(collection, self.data)
+ create_as_asset_group = False
- if (self.options or {}).get("useSelection"):
- for obj in get_selection():
- collection.objects.link(obj)
+ @staticmethod
+ def cache_subsets(shared_data):
+ """Cache instances for Creators shared data.
- return collection
+ Create `blender_cached_subsets` key when needed in shared data and
+ fill it with all collected instances from the scene under its
+ respective creator identifiers.
+
+ If legacy instances are detected in the scene, create
+ `blender_cached_legacy_subsets` key and fill it with
+ all legacy subsets from this family as a value. # key or value?
+
+ Args:
+ shared_data(Dict[str, Any]): Shared data.
+
+ Return:
+ Dict[str, Any]: Shared data with cached subsets.
+ """
+ if not shared_data.get('blender_cached_subsets'):
+ cache = {}
+ cache_legacy = {}
+
+ avalon_instances = bpy.data.collections.get(AVALON_INSTANCES)
+ avalon_instance_objs = (
+ avalon_instances.objects if avalon_instances else []
+ )
+
+ for obj_or_col in itertools.chain(
+ avalon_instance_objs,
+ bpy.data.collections
+ ):
+ avalon_prop = obj_or_col.get(AVALON_PROPERTY, {})
+ if not avalon_prop:
+ continue
+
+ if avalon_prop.get('id') != 'pyblish.avalon.instance':
+ continue
+
+ creator_id = avalon_prop.get('creator_identifier')
+ if creator_id:
+ # Creator instance
+ cache.setdefault(creator_id, []).append(obj_or_col)
+ else:
+ family = avalon_prop.get('family')
+ if family:
+ # Legacy creator instance
+ cache_legacy.setdefault(family, []).append(obj_or_col)
+
+ shared_data["blender_cached_subsets"] = cache
+ shared_data["blender_cached_legacy_subsets"] = cache_legacy
+
+ return shared_data
+
+ def create(
+ self, subset_name: str, instance_data: dict, pre_create_data: dict
+ ):
+ """Override abstract method from Creator.
+ Create new instance and store it.
+
+ Args:
+ subset_name(str): Subset name of created instance.
+ instance_data(dict): Instance base data.
+ pre_create_data(dict): Data based on pre creation attributes.
+ Those may affect how creator works.
+ """
+ # Get Instance Container or create it if it does not exist
+ instances = bpy.data.collections.get(AVALON_INSTANCES)
+ if not instances:
+ instances = bpy.data.collections.new(name=AVALON_INSTANCES)
+ bpy.context.scene.collection.children.link(instances)
+
+ # Create asset group
+ if AYON_SERVER_ENABLED:
+ asset_name = instance_data["folderPath"]
+ else:
+ asset_name = instance_data["asset"]
+
+ name = prepare_scene_name(asset_name, subset_name)
+ if self.create_as_asset_group:
+ # Create instance as empty
+ instance_node = bpy.data.objects.new(name=name, object_data=None)
+ instance_node.empty_display_type = 'SINGLE_ARROW'
+ instances.objects.link(instance_node)
+ else:
+ # Create instance collection
+ instance_node = bpy.data.collections.new(name=name)
+ instances.children.link(instance_node)
+
+ self.set_instance_data(subset_name, instance_data)
+
+ instance = CreatedInstance(
+ self.family, subset_name, instance_data, self
+ )
+ instance.transient_data["instance_node"] = instance_node
+ self._add_instance_to_context(instance)
+
+ imprint(instance_node, instance_data)
+
+ return instance_node
+
+ def collect_instances(self):
+ """Override abstract method from BaseCreator.
+ Collect existing instances related to this creator plugin."""
+
+ # Cache subsets in shared data
+ self.cache_subsets(self.collection_shared_data)
+
+ # Get cached subsets
+ cached_subsets = self.collection_shared_data.get(
+ "blender_cached_subsets"
+ )
+ if not cached_subsets:
+ return
+
+ # Process only instances that were created by this creator
+ for instance_node in cached_subsets.get(self.identifier, []):
+ property = instance_node.get(AVALON_PROPERTY)
+ # Create instance object from existing data
+ instance = CreatedInstance.from_existing(
+ instance_data=property.to_dict(),
+ creator=self
+ )
+ instance.transient_data["instance_node"] = instance_node
+
+ # Add instance to create context
+ self._add_instance_to_context(instance)
+
+ def update_instances(self, update_list):
+ """Override abstract method from BaseCreator.
+ Store changes of existing instances so they can be recollected.
+
+ Args:
+ update_list(List[UpdateData]): Changed instances
+ and their changes, as a list of tuples.
+ """
+
+ if AYON_SERVER_ENABLED:
+ asset_name_key = "folderPath"
+ else:
+ asset_name_key = "asset"
+
+ for created_instance, changes in update_list:
+ data = created_instance.data_to_store()
+ node = created_instance.transient_data["instance_node"]
+ if not node:
+ # We can't update if we don't know the node
+ self.log.error(
+ f"Unable to update instance {created_instance} "
+ f"without instance node."
+ )
+ return
+
+ # Rename the instance node in the scene if subset or asset changed
+ if (
+ "subset" in changes.changed_keys
+ or asset_name_key in changes.changed_keys
+ ):
+ asset_name = data[asset_name_key]
+ name = prepare_scene_name(
+ asset=asset_name, subset=data["subset"]
+ )
+ node.name = name
+
+ imprint(node, data)
+
+ def remove_instances(self, instances: List[CreatedInstance]):
+
+ for instance in instances:
+ node = instance.transient_data["instance_node"]
+
+ if isinstance(node, bpy.types.Collection):
+ for children in node.children_recursive:
+ if isinstance(children, bpy.types.Collection):
+ bpy.data.collections.remove(children)
+ else:
+ bpy.data.objects.remove(children)
+
+ bpy.data.collections.remove(node)
+ elif isinstance(node, bpy.types.Object):
+ bpy.data.objects.remove(node)
+
+ self._remove_instance_from_context(instance)
+
+ def set_instance_data(
+ self,
+ subset_name: str,
+ instance_data: dict
+ ):
+ """Fill instance data with required items.
+
+ Args:
+ subset_name(str): Subset name of created instance.
+ instance_data(dict): Instance base data.
+ instance_node(bpy.types.ID): Instance node in blender scene.
+ """
+ if not instance_data:
+ instance_data = {}
+
+ instance_data.update(
+ {
+ "id": "pyblish.avalon.instance",
+ "creator_identifier": self.identifier,
+ "subset": subset_name,
+ }
+ )
+
+ def get_pre_create_attr_defs(self):
+ return [
+ BoolDef("use_selection",
+ label="Use selection",
+ default=True)
+ ]
class Loader(LoaderPlugin):
@@ -241,7 +458,7 @@ class AssetLoader(LoaderPlugin):
namespace: Use pre-defined namespace
options: Additional settings dictionary
"""
- # TODO (jasper): make it possible to add the asset several times by
+ # TODO: make it possible to add the asset several times by
# just re-using the collection
filepath = self.filepath_from_context(context)
assert Path(filepath).exists(), f"{filepath} doesn't exist."
@@ -252,7 +469,7 @@ class AssetLoader(LoaderPlugin):
asset, subset
)
namespace = namespace or f"{asset}_{unique_number}"
- name = name or asset_name(
+ name = name or prepare_scene_name(
asset, subset, unique_number
)
@@ -281,7 +498,9 @@ class AssetLoader(LoaderPlugin):
# asset = context["asset"]["name"]
# subset = context["subset"]["name"]
- # instance_name = asset_name(asset, subset, unique_number) + '_CON'
+ # instance_name = prepare_scene_name(
+ # asset, subset, unique_number
+ # ) + '_CON'
# return self._get_instance_collection(instance_name, nodes)
diff --git a/openpype/hosts/blender/api/render_lib.py b/openpype/hosts/blender/api/render_lib.py
index d564b5ebcb..b437078ad8 100644
--- a/openpype/hosts/blender/api/render_lib.py
+++ b/openpype/hosts/blender/api/render_lib.py
@@ -1,4 +1,4 @@
-import os
+from pathlib import Path
import bpy
@@ -59,7 +59,7 @@ def get_render_product(output_path, name, aov_sep):
instance (pyblish.api.Instance): The instance to publish.
ext (str): The image format to render.
"""
- filepath = os.path.join(output_path, name)
+ filepath = output_path / name.lstrip("/")
render_product = f"{filepath}{aov_sep}beauty.####"
render_product = render_product.replace("\\", "/")
@@ -180,7 +180,7 @@ def set_node_tree(output_path, name, aov_sep, ext, multilayer):
return []
output.file_slots.clear()
- output.base_path = output_path
+ output.base_path = str(output_path)
aov_file_products = []
@@ -191,8 +191,9 @@ def set_node_tree(output_path, name, aov_sep, ext, multilayer):
output.file_slots.new(filepath)
- aov_file_products.append(
- (render_pass.name, os.path.join(output_path, filepath)))
+ filename = str(output_path / filepath.lstrip("/"))
+
+ aov_file_products.append((render_pass.name, filename))
node_input = output.inputs[-1]
@@ -214,12 +215,11 @@ def imprint_render_settings(node, data):
def prepare_rendering(asset_group):
name = asset_group.name
- filepath = bpy.data.filepath
+ filepath = Path(bpy.data.filepath)
assert filepath, "Workfile not saved. Please save the file first."
- file_path = os.path.dirname(filepath)
- file_name = os.path.basename(filepath)
- file_name, _ = os.path.splitext(file_name)
+ dirpath = filepath.parent
+ file_name = Path(filepath.name).stem
project = get_current_project_name()
settings = get_project_settings(project)
@@ -232,7 +232,7 @@ def prepare_rendering(asset_group):
set_render_format(ext, multilayer)
aov_list, custom_passes = set_render_passes(settings)
- output_path = os.path.join(file_path, render_folder, file_name)
+ output_path = Path.joinpath(dirpath, render_folder, file_name)
render_product = get_render_product(output_path, name, aov_sep)
aov_file_product = set_node_tree(
diff --git a/openpype/hosts/blender/blender_addon/startup/init.py b/openpype/hosts/blender/blender_addon/startup/init.py
index 8dbff8a91d..603691675d 100644
--- a/openpype/hosts/blender/blender_addon/startup/init.py
+++ b/openpype/hosts/blender/blender_addon/startup/init.py
@@ -1,9 +1,9 @@
from openpype.pipeline import install_host
-from openpype.hosts.blender import api
+from openpype.hosts.blender.api import BlenderHost
def register():
- install_host(api)
+ install_host(BlenderHost())
def unregister():
diff --git a/openpype/hosts/blender/hooks/pre_pyside_install.py b/openpype/hosts/blender/hooks/pre_pyside_install.py
index 777e383215..2aa3a5e49a 100644
--- a/openpype/hosts/blender/hooks/pre_pyside_install.py
+++ b/openpype/hosts/blender/hooks/pre_pyside_install.py
@@ -31,7 +31,7 @@ class InstallPySideToBlender(PreLaunchHook):
def inner_execute(self):
# Get blender's python directory
- version_regex = re.compile(r"^[2-3]\.[0-9]+$")
+ version_regex = re.compile(r"^[2-4]\.[0-9]+$")
platform = system().lower()
executable = self.launch_context.executable.executable_path
diff --git a/openpype/hosts/blender/plugins/create/convert_legacy.py b/openpype/hosts/blender/plugins/create/convert_legacy.py
new file mode 100644
index 0000000000..f05a6b1f5a
--- /dev/null
+++ b/openpype/hosts/blender/plugins/create/convert_legacy.py
@@ -0,0 +1,78 @@
+# -*- coding: utf-8 -*-
+"""Converter for legacy Houdini subsets."""
+from openpype.pipeline.create.creator_plugins import SubsetConvertorPlugin
+from openpype.hosts.blender.api.lib import imprint
+
+
+class BlenderLegacyConvertor(SubsetConvertorPlugin):
+ """Find and convert any legacy subsets in the scene.
+
+ This Converter will find all legacy subsets in the scene and will
+ transform them to the current system. Since the old subsets doesn't
+ retain any information about their original creators, the only mapping
+ we can do is based on their families.
+
+ Its limitation is that you can have multiple creators creating subset
+ of the same family and there is no way to handle it. This code should
+ nevertheless cover all creators that came with OpenPype.
+
+ """
+ identifier = "io.openpype.creators.blender.legacy"
+ family_to_id = {
+ "action": "io.openpype.creators.blender.action",
+ "camera": "io.openpype.creators.blender.camera",
+ "animation": "io.openpype.creators.blender.animation",
+ "blendScene": "io.openpype.creators.blender.blendscene",
+ "layout": "io.openpype.creators.blender.layout",
+ "model": "io.openpype.creators.blender.model",
+ "pointcache": "io.openpype.creators.blender.pointcache",
+ "render": "io.openpype.creators.blender.render",
+ "review": "io.openpype.creators.blender.review",
+ "rig": "io.openpype.creators.blender.rig",
+ }
+
+ def __init__(self, *args, **kwargs):
+ super(BlenderLegacyConvertor, self).__init__(*args, **kwargs)
+ self.legacy_subsets = {}
+
+ def find_instances(self):
+ """Find legacy subsets in the scene.
+
+ Legacy subsets are the ones that doesn't have `creator_identifier`
+ parameter on them.
+
+ This is using cached entries done in
+ :py:meth:`~BaseCreator.cache_subsets()`
+
+ """
+ self.legacy_subsets = self.collection_shared_data.get(
+ "blender_cached_legacy_subsets")
+ if not self.legacy_subsets:
+ return
+ self.add_convertor_item(
+ "Found {} incompatible subset{}".format(
+ len(self.legacy_subsets),
+ "s" if len(self.legacy_subsets) > 1 else ""
+ )
+ )
+
+ def convert(self):
+ """Convert all legacy subsets to current.
+
+ It is enough to add `creator_identifier` and `instance_node`.
+
+ """
+ if not self.legacy_subsets:
+ return
+
+ for family, instance_nodes in self.legacy_subsets.items():
+ if family in self.family_to_id:
+ for instance_node in instance_nodes:
+ creator_identifier = self.family_to_id[family]
+ self.log.info(
+ "Converting {} to {}".format(instance_node.name,
+ creator_identifier)
+ )
+ imprint(instance_node, data={
+ "creator_identifier": creator_identifier
+ })
diff --git a/openpype/hosts/blender/plugins/create/create_action.py b/openpype/hosts/blender/plugins/create/create_action.py
index 0203ba74c0..caaa72fe8d 100644
--- a/openpype/hosts/blender/plugins/create/create_action.py
+++ b/openpype/hosts/blender/plugins/create/create_action.py
@@ -2,30 +2,29 @@
import bpy
-from openpype.pipeline import get_current_task_name
-import openpype.hosts.blender.api.plugin
-from openpype.hosts.blender.api import lib
+from openpype.hosts.blender.api import lib, plugin
-class CreateAction(openpype.hosts.blender.api.plugin.Creator):
- """Action output for character rigs"""
+class CreateAction(plugin.BaseCreator):
+ """Action output for character rigs."""
- name = "actionMain"
+ identifier = "io.openpype.creators.blender.action"
label = "Action"
family = "action"
icon = "male"
- def process(self):
+ def create(
+ self, subset_name: str, instance_data: dict, pre_create_data: dict
+ ):
+ # Run parent create method
+ collection = super().create(
+ subset_name, instance_data, pre_create_data
+ )
- asset = self.data["asset"]
- subset = self.data["subset"]
- name = openpype.hosts.blender.api.plugin.asset_name(asset, subset)
- collection = bpy.data.collections.new(name=name)
- bpy.context.scene.collection.children.link(collection)
- self.data['task'] = get_current_task_name()
- lib.imprint(collection, self.data)
+ # Get instance name
+ name = plugin.prepare_scene_name(instance_data["asset"], subset_name)
- if (self.options or {}).get("useSelection"):
+ if pre_create_data.get("use_selection"):
for obj in lib.get_selection():
if (obj.animation_data is not None
and obj.animation_data.action is not None):
diff --git a/openpype/hosts/blender/plugins/create/create_animation.py b/openpype/hosts/blender/plugins/create/create_animation.py
index bc2840952b..3a91b2d5ff 100644
--- a/openpype/hosts/blender/plugins/create/create_animation.py
+++ b/openpype/hosts/blender/plugins/create/create_animation.py
@@ -1,51 +1,32 @@
"""Create an animation asset."""
-import bpy
-
-from openpype.pipeline import get_current_task_name
-from openpype.hosts.blender.api import plugin, lib, ops
-from openpype.hosts.blender.api.pipeline import AVALON_INSTANCES
+from openpype.hosts.blender.api import plugin, lib
-class CreateAnimation(plugin.Creator):
- """Animation output for character rigs"""
+class CreateAnimation(plugin.BaseCreator):
+ """Animation output for character rigs."""
- name = "animationMain"
+ identifier = "io.openpype.creators.blender.animation"
label = "Animation"
family = "animation"
icon = "male"
- def process(self):
- """ Run the creator on Blender main thread"""
- mti = ops.MainThreadItem(self._process)
- ops.execute_in_main_thread(mti)
+ def create(
+ self, subset_name: str, instance_data: dict, pre_create_data: dict
+ ):
+ # Run parent create method
+ collection = super().create(
+ subset_name, instance_data, pre_create_data
+ )
- def _process(self):
- # Get Instance Container or create it if it does not exist
- instances = bpy.data.collections.get(AVALON_INSTANCES)
- if not instances:
- instances = bpy.data.collections.new(name=AVALON_INSTANCES)
- bpy.context.scene.collection.children.link(instances)
-
- # Create instance object
- # name = self.name
- # if not name:
- asset = self.data["asset"]
- subset = self.data["subset"]
- name = plugin.asset_name(asset, subset)
- # asset_group = bpy.data.objects.new(name=name, object_data=None)
- # asset_group.empty_display_type = 'SINGLE_ARROW'
- asset_group = bpy.data.collections.new(name=name)
- instances.children.link(asset_group)
- self.data['task'] = get_current_task_name()
- lib.imprint(asset_group, self.data)
-
- if (self.options or {}).get("useSelection"):
+ if pre_create_data.get("use_selection"):
selected = lib.get_selection()
for obj in selected:
- asset_group.objects.link(obj)
- elif (self.options or {}).get("asset_group"):
- obj = (self.options or {}).get("asset_group")
- asset_group.objects.link(obj)
+ collection.objects.link(obj)
+ elif pre_create_data.get("asset_group"):
+ # Use for Load Blend automated creation of animation instances
+ # upon loading rig files
+ obj = pre_create_data.get("asset_group")
+ collection.objects.link(obj)
- return asset_group
+ return collection
diff --git a/openpype/hosts/blender/plugins/create/create_blendScene.py b/openpype/hosts/blender/plugins/create/create_blendScene.py
index 63bcf212ff..e1026282c0 100644
--- a/openpype/hosts/blender/plugins/create/create_blendScene.py
+++ b/openpype/hosts/blender/plugins/create/create_blendScene.py
@@ -2,50 +2,33 @@
import bpy
-from openpype.pipeline import get_current_task_name
-from openpype.hosts.blender.api import plugin, lib, ops
-from openpype.hosts.blender.api.pipeline import AVALON_INSTANCES
+from openpype.hosts.blender.api import plugin, lib
-class CreateBlendScene(plugin.Creator):
- """Generic group of assets"""
+class CreateBlendScene(plugin.BaseCreator):
+ """Generic group of assets."""
- name = "blendScene"
+ identifier = "io.openpype.creators.blender.blendscene"
label = "Blender Scene"
family = "blendScene"
icon = "cubes"
- def process(self):
- """ Run the creator on Blender main thread"""
- mti = ops.MainThreadItem(self._process)
- ops.execute_in_main_thread(mti)
+ maintain_selection = False
- def _process(self):
- # Get Instance Container or create it if it does not exist
- instances = bpy.data.collections.get(AVALON_INSTANCES)
- if not instances:
- instances = bpy.data.collections.new(name=AVALON_INSTANCES)
- bpy.context.scene.collection.children.link(instances)
+ def create(
+ self, subset_name: str, instance_data: dict, pre_create_data: dict
+ ):
- # Create instance object
- asset = self.data["asset"]
- subset = self.data["subset"]
- name = plugin.asset_name(asset, subset)
- asset_group = bpy.data.objects.new(name=name, object_data=None)
- asset_group.empty_display_type = 'SINGLE_ARROW'
- instances.objects.link(asset_group)
- self.data['task'] = get_current_task_name()
- lib.imprint(asset_group, self.data)
+ instance_node = super().create(subset_name,
+ instance_data,
+ pre_create_data)
- # Add selected objects to instance
- if (self.options or {}).get("useSelection"):
- bpy.context.view_layer.objects.active = asset_group
- selected = lib.get_selection()
- for obj in selected:
- if obj.parent in selected:
- obj.select_set(False)
- continue
- selected.append(asset_group)
- bpy.ops.object.parent_set(keep_transform=True)
+ if pre_create_data.get("use_selection"):
+ selection = lib.get_selection(include_collections=True)
+ for data in selection:
+ if isinstance(data, bpy.types.Collection):
+ instance_node.children.link(data)
+ elif isinstance(data, bpy.types.Object):
+ instance_node.objects.link(data)
- return asset_group
+ return instance_node
diff --git a/openpype/hosts/blender/plugins/create/create_camera.py b/openpype/hosts/blender/plugins/create/create_camera.py
index 7a770a3e77..2e2e6cec22 100644
--- a/openpype/hosts/blender/plugins/create/create_camera.py
+++ b/openpype/hosts/blender/plugins/create/create_camera.py
@@ -2,62 +2,41 @@
import bpy
-from openpype.pipeline import get_current_task_name
-from openpype.hosts.blender.api import plugin, lib, ops
+from openpype.hosts.blender.api import plugin, lib
from openpype.hosts.blender.api.pipeline import AVALON_INSTANCES
-class CreateCamera(plugin.Creator):
- """Polygonal static geometry"""
+class CreateCamera(plugin.BaseCreator):
+ """Polygonal static geometry."""
- name = "cameraMain"
+ identifier = "io.openpype.creators.blender.camera"
label = "Camera"
family = "camera"
icon = "video-camera"
- def process(self):
- """ Run the creator on Blender main thread"""
- mti = ops.MainThreadItem(self._process)
- ops.execute_in_main_thread(mti)
+ create_as_asset_group = True
- def _process(self):
- # Get Instance Container or create it if it does not exist
- instances = bpy.data.collections.get(AVALON_INSTANCES)
- if not instances:
- instances = bpy.data.collections.new(name=AVALON_INSTANCES)
- bpy.context.scene.collection.children.link(instances)
+ def create(
+ self, subset_name: str, instance_data: dict, pre_create_data: dict
+ ):
- # Create instance object
- asset = self.data["asset"]
- subset = self.data["subset"]
- name = plugin.asset_name(asset, subset)
+ asset_group = super().create(subset_name,
+ instance_data,
+ pre_create_data)
- asset_group = bpy.data.objects.new(name=name, object_data=None)
- asset_group.empty_display_type = 'SINGLE_ARROW'
- instances.objects.link(asset_group)
- self.data['task'] = get_current_task_name()
- print(f"self.data: {self.data}")
- lib.imprint(asset_group, self.data)
-
- if (self.options or {}).get("useSelection"):
- bpy.context.view_layer.objects.active = asset_group
- selected = lib.get_selection()
- for obj in selected:
- if obj.parent in selected:
- obj.select_set(False)
- continue
- selected.append(asset_group)
- bpy.ops.object.parent_set(keep_transform=True)
+ bpy.context.view_layer.objects.active = asset_group
+ if pre_create_data.get("use_selection"):
+ for obj in lib.get_selection():
+ obj.parent = asset_group
else:
plugin.deselect_all()
- camera = bpy.data.cameras.new(subset)
- camera_obj = bpy.data.objects.new(subset, camera)
+ camera = bpy.data.cameras.new(subset_name)
+ camera_obj = bpy.data.objects.new(subset_name, camera)
+ instances = bpy.data.collections.get(AVALON_INSTANCES)
instances.objects.link(camera_obj)
- camera_obj.select_set(True)
- asset_group.select_set(True)
bpy.context.view_layer.objects.active = asset_group
- bpy.ops.object.parent_set(keep_transform=True)
+ camera_obj.parent = asset_group
return asset_group
diff --git a/openpype/hosts/blender/plugins/create/create_layout.py b/openpype/hosts/blender/plugins/create/create_layout.py
index 73ed683256..16d227e50e 100644
--- a/openpype/hosts/blender/plugins/create/create_layout.py
+++ b/openpype/hosts/blender/plugins/create/create_layout.py
@@ -2,50 +2,31 @@
import bpy
-from openpype.pipeline import get_current_task_name
-from openpype.hosts.blender.api import plugin, lib, ops
-from openpype.hosts.blender.api.pipeline import AVALON_INSTANCES
+from openpype.hosts.blender.api import plugin, lib
-class CreateLayout(plugin.Creator):
- """Layout output for character rigs"""
+class CreateLayout(plugin.BaseCreator):
+ """Layout output for character rigs."""
- name = "layoutMain"
+ identifier = "io.openpype.creators.blender.layout"
label = "Layout"
family = "layout"
icon = "cubes"
- def process(self):
- """ Run the creator on Blender main thread"""
- mti = ops.MainThreadItem(self._process)
- ops.execute_in_main_thread(mti)
+ create_as_asset_group = True
- def _process(self):
- # Get Instance Container or create it if it does not exist
- instances = bpy.data.collections.get(AVALON_INSTANCES)
- if not instances:
- instances = bpy.data.collections.new(name=AVALON_INSTANCES)
- bpy.context.scene.collection.children.link(instances)
+ def create(
+ self, subset_name: str, instance_data: dict, pre_create_data: dict
+ ):
- # Create instance object
- asset = self.data["asset"]
- subset = self.data["subset"]
- name = plugin.asset_name(asset, subset)
- asset_group = bpy.data.objects.new(name=name, object_data=None)
- asset_group.empty_display_type = 'SINGLE_ARROW'
- instances.objects.link(asset_group)
- self.data['task'] = get_current_task_name()
- lib.imprint(asset_group, self.data)
+ asset_group = super().create(subset_name,
+ instance_data,
+ pre_create_data)
# Add selected objects to instance
- if (self.options or {}).get("useSelection"):
+ if pre_create_data.get("use_selection"):
bpy.context.view_layer.objects.active = asset_group
- selected = lib.get_selection()
- for obj in selected:
- if obj.parent in selected:
- obj.select_set(False)
- continue
- selected.append(asset_group)
- bpy.ops.object.parent_set(keep_transform=True)
+ for obj in lib.get_selection():
+ obj.parent = asset_group
return asset_group
diff --git a/openpype/hosts/blender/plugins/create/create_model.py b/openpype/hosts/blender/plugins/create/create_model.py
index 51fc6683f6..2f3f61728b 100644
--- a/openpype/hosts/blender/plugins/create/create_model.py
+++ b/openpype/hosts/blender/plugins/create/create_model.py
@@ -2,50 +2,30 @@
import bpy
-from openpype.pipeline import get_current_task_name
-from openpype.hosts.blender.api import plugin, lib, ops
-from openpype.hosts.blender.api.pipeline import AVALON_INSTANCES
+from openpype.hosts.blender.api import plugin, lib
-class CreateModel(plugin.Creator):
- """Polygonal static geometry"""
+class CreateModel(plugin.BaseCreator):
+ """Polygonal static geometry."""
- name = "modelMain"
+ identifier = "io.openpype.creators.blender.model"
label = "Model"
family = "model"
icon = "cube"
- def process(self):
- """ Run the creator on Blender main thread"""
- mti = ops.MainThreadItem(self._process)
- ops.execute_in_main_thread(mti)
+ create_as_asset_group = True
- def _process(self):
- # Get Instance Container or create it if it does not exist
- instances = bpy.data.collections.get(AVALON_INSTANCES)
- if not instances:
- instances = bpy.data.collections.new(name=AVALON_INSTANCES)
- bpy.context.scene.collection.children.link(instances)
-
- # Create instance object
- asset = self.data["asset"]
- subset = self.data["subset"]
- name = plugin.asset_name(asset, subset)
- asset_group = bpy.data.objects.new(name=name, object_data=None)
- asset_group.empty_display_type = 'SINGLE_ARROW'
- instances.objects.link(asset_group)
- self.data['task'] = get_current_task_name()
- lib.imprint(asset_group, self.data)
+ def create(
+ self, subset_name: str, instance_data: dict, pre_create_data: dict
+ ):
+ asset_group = super().create(subset_name,
+ instance_data,
+ pre_create_data)
# Add selected objects to instance
- if (self.options or {}).get("useSelection"):
+ if pre_create_data.get("use_selection"):
bpy.context.view_layer.objects.active = asset_group
- selected = lib.get_selection()
- for obj in selected:
- if obj.parent in selected:
- obj.select_set(False)
- continue
- selected.append(asset_group)
- bpy.ops.object.parent_set(keep_transform=True)
+ for obj in lib.get_selection():
+ obj.parent = asset_group
return asset_group
diff --git a/openpype/hosts/blender/plugins/create/create_pointcache.py b/openpype/hosts/blender/plugins/create/create_pointcache.py
index 6220f68dc5..b3329bcb3b 100644
--- a/openpype/hosts/blender/plugins/create/create_pointcache.py
+++ b/openpype/hosts/blender/plugins/create/create_pointcache.py
@@ -1,31 +1,25 @@
"""Create a pointcache asset."""
-import bpy
-
-from openpype.pipeline import get_current_task_name
-import openpype.hosts.blender.api.plugin
-from openpype.hosts.blender.api import lib
+from openpype.hosts.blender.api import plugin, lib
-class CreatePointcache(openpype.hosts.blender.api.plugin.Creator):
- """Polygonal static geometry"""
+class CreatePointcache(plugin.BaseCreator):
+ """Polygonal static geometry."""
- name = "pointcacheMain"
+ identifier = "io.openpype.creators.blender.pointcache"
label = "Point Cache"
family = "pointcache"
icon = "gears"
- def process(self):
+ def create(
+ self, subset_name: str, instance_data: dict, pre_create_data: dict
+ ):
+ # Run parent create method
+ collection = super().create(
+ subset_name, instance_data, pre_create_data
+ )
- asset = self.data["asset"]
- subset = self.data["subset"]
- name = openpype.hosts.blender.api.plugin.asset_name(asset, subset)
- collection = bpy.data.collections.new(name=name)
- bpy.context.scene.collection.children.link(collection)
- self.data['task'] = get_current_task_name()
- lib.imprint(collection, self.data)
-
- if (self.options or {}).get("useSelection"):
+ if pre_create_data.get("use_selection"):
objects = lib.get_selection()
for obj in objects:
collection.objects.link(obj)
diff --git a/openpype/hosts/blender/plugins/create/create_render.py b/openpype/hosts/blender/plugins/create/create_render.py
index f938a21808..7fb3e5eb00 100644
--- a/openpype/hosts/blender/plugins/create/create_render.py
+++ b/openpype/hosts/blender/plugins/create/create_render.py
@@ -1,42 +1,31 @@
"""Create render."""
import bpy
-from openpype.pipeline import get_current_task_name
-from openpype.hosts.blender.api import plugin, lib
+from openpype.hosts.blender.api import plugin
from openpype.hosts.blender.api.render_lib import prepare_rendering
-from openpype.hosts.blender.api.pipeline import AVALON_INSTANCES
-class CreateRenderlayer(plugin.Creator):
- """Single baked camera"""
+class CreateRenderlayer(plugin.BaseCreator):
+ """Single baked camera."""
- name = "renderingMain"
+ identifier = "io.openpype.creators.blender.render"
label = "Render"
family = "render"
icon = "eye"
- def process(self):
- # Get Instance Container or create it if it does not exist
- instances = bpy.data.collections.get(AVALON_INSTANCES)
- if not instances:
- instances = bpy.data.collections.new(name=AVALON_INSTANCES)
- bpy.context.scene.collection.children.link(instances)
-
- # Create instance object
- asset = self.data["asset"]
- subset = self.data["subset"]
- name = plugin.asset_name(asset, subset)
- asset_group = bpy.data.collections.new(name=name)
-
+ def create(
+ self, subset_name: str, instance_data: dict, pre_create_data: dict
+ ):
try:
- instances.children.link(asset_group)
- self.data['task'] = get_current_task_name()
- lib.imprint(asset_group, self.data)
+ # Run parent create method
+ collection = super().create(
+ subset_name, instance_data, pre_create_data
+ )
- prepare_rendering(asset_group)
+ prepare_rendering(collection)
except Exception:
# Remove the instance if there was an error
- bpy.data.collections.remove(asset_group)
+ bpy.data.collections.remove(collection)
raise
# TODO: this is undesiderable, but it's the only way to be sure that
@@ -50,4 +39,4 @@ class CreateRenderlayer(plugin.Creator):
# now it is to force the file to be saved.
bpy.ops.wm.save_as_mainfile(filepath=bpy.data.filepath)
- return asset_group
+ return collection
diff --git a/openpype/hosts/blender/plugins/create/create_review.py b/openpype/hosts/blender/plugins/create/create_review.py
index 914f249891..940bcbea22 100644
--- a/openpype/hosts/blender/plugins/create/create_review.py
+++ b/openpype/hosts/blender/plugins/create/create_review.py
@@ -1,47 +1,27 @@
"""Create review."""
-import bpy
-
-from openpype.pipeline import get_current_task_name
-from openpype.hosts.blender.api import plugin, lib, ops
-from openpype.hosts.blender.api.pipeline import AVALON_INSTANCES
+from openpype.hosts.blender.api import plugin, lib
-class CreateReview(plugin.Creator):
- """Single baked camera"""
+class CreateReview(plugin.BaseCreator):
+ """Single baked camera."""
- name = "reviewDefault"
+ identifier = "io.openpype.creators.blender.review"
label = "Review"
family = "review"
icon = "video-camera"
- def process(self):
- """ Run the creator on Blender main thread"""
- mti = ops.MainThreadItem(self._process)
- ops.execute_in_main_thread(mti)
+ def create(
+ self, subset_name: str, instance_data: dict, pre_create_data: dict
+ ):
+ # Run parent create method
+ collection = super().create(
+ subset_name, instance_data, pre_create_data
+ )
- def _process(self):
- # Get Instance Container or create it if it does not exist
- instances = bpy.data.collections.get(AVALON_INSTANCES)
- if not instances:
- instances = bpy.data.collections.new(name=AVALON_INSTANCES)
- bpy.context.scene.collection.children.link(instances)
-
- # Create instance object
- asset = self.data["asset"]
- subset = self.data["subset"]
- name = plugin.asset_name(asset, subset)
- asset_group = bpy.data.collections.new(name=name)
- instances.children.link(asset_group)
- self.data['task'] = get_current_task_name()
- lib.imprint(asset_group, self.data)
-
- if (self.options or {}).get("useSelection"):
+ if pre_create_data.get("use_selection"):
selected = lib.get_selection()
for obj in selected:
- asset_group.objects.link(obj)
- elif (self.options or {}).get("asset_group"):
- obj = (self.options or {}).get("asset_group")
- asset_group.objects.link(obj)
+ collection.objects.link(obj)
- return asset_group
+ return collection
diff --git a/openpype/hosts/blender/plugins/create/create_rig.py b/openpype/hosts/blender/plugins/create/create_rig.py
index 08cc46ee3e..d63b8d56ff 100644
--- a/openpype/hosts/blender/plugins/create/create_rig.py
+++ b/openpype/hosts/blender/plugins/create/create_rig.py
@@ -2,50 +2,30 @@
import bpy
-from openpype.pipeline import get_current_task_name
-from openpype.hosts.blender.api import plugin, lib, ops
-from openpype.hosts.blender.api.pipeline import AVALON_INSTANCES
+from openpype.hosts.blender.api import plugin, lib
-class CreateRig(plugin.Creator):
- """Artist-friendly rig with controls to direct motion"""
+class CreateRig(plugin.BaseCreator):
+ """Artist-friendly rig with controls to direct motion."""
- name = "rigMain"
+ identifier = "io.openpype.creators.blender.rig"
label = "Rig"
family = "rig"
icon = "wheelchair"
- def process(self):
- """ Run the creator on Blender main thread"""
- mti = ops.MainThreadItem(self._process)
- ops.execute_in_main_thread(mti)
+ create_as_asset_group = True
- def _process(self):
- # Get Instance Container or create it if it does not exist
- instances = bpy.data.collections.get(AVALON_INSTANCES)
- if not instances:
- instances = bpy.data.collections.new(name=AVALON_INSTANCES)
- bpy.context.scene.collection.children.link(instances)
-
- # Create instance object
- asset = self.data["asset"]
- subset = self.data["subset"]
- name = plugin.asset_name(asset, subset)
- asset_group = bpy.data.objects.new(name=name, object_data=None)
- asset_group.empty_display_type = 'SINGLE_ARROW'
- instances.objects.link(asset_group)
- self.data['task'] = get_current_task_name()
- lib.imprint(asset_group, self.data)
+ def create(
+ self, subset_name: str, instance_data: dict, pre_create_data: dict
+ ):
+ asset_group = super().create(subset_name,
+ instance_data,
+ pre_create_data)
# Add selected objects to instance
- if (self.options or {}).get("useSelection"):
+ if pre_create_data.get("use_selection"):
bpy.context.view_layer.objects.active = asset_group
- selected = lib.get_selection()
- for obj in selected:
- if obj.parent in selected:
- obj.select_set(False)
- continue
- selected.append(asset_group)
- bpy.ops.object.parent_set(keep_transform=True)
+ for obj in lib.get_selection():
+ obj.parent = asset_group
return asset_group
diff --git a/openpype/hosts/blender/plugins/create/create_workfile.py b/openpype/hosts/blender/plugins/create/create_workfile.py
new file mode 100644
index 0000000000..ceec3e0552
--- /dev/null
+++ b/openpype/hosts/blender/plugins/create/create_workfile.py
@@ -0,0 +1,121 @@
+import bpy
+
+from openpype import AYON_SERVER_ENABLED
+from openpype.pipeline import CreatedInstance, AutoCreator
+from openpype.client import get_asset_by_name
+from openpype.hosts.blender.api.plugin import BaseCreator
+from openpype.hosts.blender.api.pipeline import (
+ AVALON_PROPERTY,
+ AVALON_CONTAINERS
+)
+
+
+class CreateWorkfile(BaseCreator, AutoCreator):
+ """Workfile auto-creator.
+
+ The workfile instance stores its data on the `AVALON_CONTAINERS` collection
+ as custom attributes, because unlike other instances it doesn't have an
+ instance node of its own.
+
+ """
+ identifier = "io.openpype.creators.blender.workfile"
+ label = "Workfile"
+ family = "workfile"
+ icon = "fa5.file"
+
+ def create(self):
+ """Create workfile instances."""
+ existing_instance = next(
+ (
+ instance for instance in self.create_context.instances
+ if instance.creator_identifier == self.identifier
+ ),
+ None,
+ )
+
+ project_name = self.project_name
+ asset_name = self.create_context.get_current_asset_name()
+ task_name = self.create_context.get_current_task_name()
+ host_name = self.create_context.host_name
+
+ existing_asset_name = None
+ if existing_instance is not None:
+ if AYON_SERVER_ENABLED:
+ existing_asset_name = existing_instance.get("folderPath")
+
+ if existing_asset_name is None:
+ existing_asset_name = existing_instance["asset"]
+
+ if not existing_instance:
+ asset_doc = get_asset_by_name(project_name, asset_name)
+ subset_name = self.get_subset_name(
+ task_name, task_name, asset_doc, project_name, host_name
+ )
+ data = {
+ "task": task_name,
+ "variant": task_name,
+ }
+ if AYON_SERVER_ENABLED:
+ data["folderPath"] = asset_name
+ else:
+ data["asset"] = asset_name
+ data.update(
+ self.get_dynamic_data(
+ task_name,
+ task_name,
+ asset_doc,
+ project_name,
+ host_name,
+ existing_instance,
+ )
+ )
+ self.log.info("Auto-creating workfile instance...")
+ current_instance = CreatedInstance(
+ self.family, subset_name, data, self
+ )
+ instance_node = bpy.data.collections.get(AVALON_CONTAINERS, {})
+ current_instance.transient_data["instance_node"] = instance_node
+ self._add_instance_to_context(current_instance)
+ elif (
+ existing_asset_name != asset_name
+ or existing_instance["task"] != task_name
+ ):
+ # Update instance context if it's different
+ asset_doc = get_asset_by_name(project_name, asset_name)
+ subset_name = self.get_subset_name(
+ task_name, task_name, asset_doc, project_name, host_name
+ )
+ if AYON_SERVER_ENABLED:
+ existing_instance["folderPath"] = asset_name
+ else:
+ existing_instance["asset"] = asset_name
+
+ existing_instance["task"] = task_name
+ existing_instance["subset"] = subset_name
+
+ def collect_instances(self):
+
+ instance_node = bpy.data.collections.get(AVALON_CONTAINERS)
+ if not instance_node:
+ return
+
+ property = instance_node.get(AVALON_PROPERTY)
+ if not property:
+ return
+
+ # Create instance object from existing data
+ instance = CreatedInstance.from_existing(
+ instance_data=property.to_dict(),
+ creator=self
+ )
+ instance.transient_data["instance_node"] = instance_node
+
+ # Add instance to create context
+ self._add_instance_to_context(instance)
+
+ def remove_instances(self, instances):
+ for instance in instances:
+ node = instance.transient_data["instance_node"]
+ del node[AVALON_PROPERTY]
+
+ self._remove_instance_from_context(instance)
diff --git a/openpype/hosts/blender/plugins/load/import_workfile.py b/openpype/hosts/blender/plugins/load/import_workfile.py
index 4f5016d422..331f6a8bdb 100644
--- a/openpype/hosts/blender/plugins/load/import_workfile.py
+++ b/openpype/hosts/blender/plugins/load/import_workfile.py
@@ -7,7 +7,7 @@ def append_workfile(context, fname, do_import):
asset = context['asset']['name']
subset = context['subset']['name']
- group_name = plugin.asset_name(asset, subset)
+ group_name = plugin.prepare_scene_name(asset, subset)
# We need to preserve the original names of the scenes, otherwise,
# if there are duplicate names in the current workfile, the imported
diff --git a/openpype/hosts/blender/plugins/load/load_abc.py b/openpype/hosts/blender/plugins/load/load_abc.py
index 292925c833..d7e82d1900 100644
--- a/openpype/hosts/blender/plugins/load/load_abc.py
+++ b/openpype/hosts/blender/plugins/load/load_abc.py
@@ -26,8 +26,7 @@ class CacheModelLoader(plugin.AssetLoader):
Note:
At least for now it only supports Alembic files.
"""
-
- families = ["model", "pointcache"]
+ families = ["model", "pointcache", "animation"]
representations = ["abc"]
label = "Load Alembic"
@@ -53,32 +52,43 @@ class CacheModelLoader(plugin.AssetLoader):
def _process(self, libpath, asset_group, group_name):
plugin.deselect_all()
- collection = bpy.context.view_layer.active_layer_collection.collection
-
relative = bpy.context.preferences.filepaths.use_relative_paths
bpy.ops.wm.alembic_import(
filepath=libpath,
relative_path=relative
)
- parent = bpy.context.scene.collection
-
imported = lib.get_selection()
- # Children must be linked before parents,
- # otherwise the hierarchy will break
+ # Use first EMPTY without parent as container
+ container = next(
+ (obj for obj in imported
+ if obj.type == "EMPTY" and not obj.parent),
+ None
+ )
+
objects = []
+ if container:
+ nodes = list(container.children)
- for obj in imported:
- obj.parent = asset_group
+ for obj in nodes:
+ obj.parent = asset_group
- for obj in imported:
- objects.append(obj)
- imported.extend(list(obj.children))
+ bpy.data.objects.remove(container)
- objects.reverse()
+ objects.extend(nodes)
+ for obj in nodes:
+ objects.extend(obj.children_recursive)
+ else:
+ for obj in imported:
+ obj.parent = asset_group
+ objects = imported
for obj in objects:
+ # Unlink the object from all collections
+ collections = obj.users_collection
+ for collection in collections:
+ collection.objects.unlink(obj)
name = obj.name
obj.name = f"{group_name}:{name}"
if obj.type != 'EMPTY':
@@ -90,7 +100,7 @@ class CacheModelLoader(plugin.AssetLoader):
material_slot.material.name = f"{group_name}:{name_mat}"
if not obj.get(AVALON_PROPERTY):
- obj[AVALON_PROPERTY] = dict()
+ obj[AVALON_PROPERTY] = {}
avalon_info = obj[AVALON_PROPERTY]
avalon_info.update({"container_name": group_name})
@@ -99,6 +109,18 @@ class CacheModelLoader(plugin.AssetLoader):
return objects
+ def _link_objects(self, objects, collection, containers, asset_group):
+ # Link the imported objects to any collection where the asset group is
+ # linked to, except the AVALON_CONTAINERS collection
+ group_collections = [
+ collection
+ for collection in asset_group.users_collection
+ if collection != containers]
+
+ for obj in objects:
+ for collection in group_collections:
+ collection.objects.link(obj)
+
def process_asset(
self, context: dict, name: str, namespace: Optional[str] = None,
options: Optional[Dict] = None
@@ -115,23 +137,27 @@ class CacheModelLoader(plugin.AssetLoader):
asset = context["asset"]["name"]
subset = context["subset"]["name"]
- asset_name = plugin.asset_name(asset, subset)
+ asset_name = plugin.prepare_scene_name(asset, subset)
unique_number = plugin.get_unique_number(asset, subset)
- group_name = plugin.asset_name(asset, subset, unique_number)
+ group_name = plugin.prepare_scene_name(asset, subset, unique_number)
namespace = namespace or f"{asset}_{unique_number}"
- avalon_containers = bpy.data.collections.get(AVALON_CONTAINERS)
- if not avalon_containers:
- avalon_containers = bpy.data.collections.new(
- name=AVALON_CONTAINERS)
- bpy.context.scene.collection.children.link(avalon_containers)
+ containers = bpy.data.collections.get(AVALON_CONTAINERS)
+ if not containers:
+ containers = bpy.data.collections.new(name=AVALON_CONTAINERS)
+ bpy.context.scene.collection.children.link(containers)
asset_group = bpy.data.objects.new(group_name, object_data=None)
- avalon_containers.objects.link(asset_group)
+ asset_group.empty_display_type = 'SINGLE_ARROW'
+ containers.objects.link(asset_group)
objects = self._process(libpath, asset_group, group_name)
- bpy.context.scene.collection.objects.link(asset_group)
+ # Link the asset group to the active collection
+ collection = bpy.context.view_layer.active_layer_collection.collection
+ collection.objects.link(asset_group)
+
+ self._link_objects(objects, asset_group, containers, asset_group)
asset_group[AVALON_PROPERTY] = {
"schema": "openpype:container-2.0",
@@ -207,7 +233,11 @@ class CacheModelLoader(plugin.AssetLoader):
mat = asset_group.matrix_basis.copy()
self._remove(asset_group)
- self._process(str(libpath), asset_group, object_name)
+ objects = self._process(str(libpath), asset_group, object_name)
+
+ containers = bpy.data.collections.get(AVALON_CONTAINERS)
+ self._link_objects(objects, asset_group, containers, asset_group)
+
asset_group.matrix_basis = mat
metadata["libpath"] = str(libpath)
diff --git a/openpype/hosts/blender/plugins/load/load_action.py b/openpype/hosts/blender/plugins/load/load_action.py
index 3447e67ebf..f7d32f92a5 100644
--- a/openpype/hosts/blender/plugins/load/load_action.py
+++ b/openpype/hosts/blender/plugins/load/load_action.py
@@ -7,7 +7,7 @@ from typing import Dict, List, Optional
import bpy
from openpype.pipeline import get_representation_path
-import openpype.hosts.blender.api.plugin
+from openpype.hosts.blender.api import plugin
from openpype.hosts.blender.api.pipeline import (
containerise_existing,
AVALON_PROPERTY,
@@ -16,7 +16,7 @@ from openpype.hosts.blender.api.pipeline import (
logger = logging.getLogger("openpype").getChild("blender").getChild("load_action")
-class BlendActionLoader(openpype.hosts.blender.api.plugin.AssetLoader):
+class BlendActionLoader(plugin.AssetLoader):
"""Load action from a .blend file.
Warning:
@@ -46,8 +46,8 @@ class BlendActionLoader(openpype.hosts.blender.api.plugin.AssetLoader):
libpath = self.filepath_from_context(context)
asset = context["asset"]["name"]
subset = context["subset"]["name"]
- lib_container = openpype.hosts.blender.api.plugin.asset_name(asset, subset)
- container_name = openpype.hosts.blender.api.plugin.asset_name(
+ lib_container = plugin.prepare_scene_name(asset, subset)
+ container_name = plugin.prepare_scene_name(
asset, subset, namespace
)
@@ -152,7 +152,7 @@ class BlendActionLoader(openpype.hosts.blender.api.plugin.AssetLoader):
assert libpath.is_file(), (
f"The file doesn't exist: {libpath}"
)
- assert extension in openpype.hosts.blender.api.plugin.VALID_EXTENSIONS, (
+ assert extension in plugin.VALID_EXTENSIONS, (
f"Unsupported file: {libpath}"
)
diff --git a/openpype/hosts/blender/plugins/load/load_audio.py b/openpype/hosts/blender/plugins/load/load_audio.py
index ac8f363316..1e5bd39a32 100644
--- a/openpype/hosts/blender/plugins/load/load_audio.py
+++ b/openpype/hosts/blender/plugins/load/load_audio.py
@@ -42,9 +42,9 @@ class AudioLoader(plugin.AssetLoader):
asset = context["asset"]["name"]
subset = context["subset"]["name"]
- asset_name = plugin.asset_name(asset, subset)
+ asset_name = plugin.prepare_scene_name(asset, subset)
unique_number = plugin.get_unique_number(asset, subset)
- group_name = plugin.asset_name(asset, subset, unique_number)
+ group_name = plugin.prepare_scene_name(asset, subset, unique_number)
namespace = namespace or f"{asset}_{unique_number}"
avalon_container = bpy.data.collections.get(AVALON_CONTAINERS)
diff --git a/openpype/hosts/blender/plugins/load/load_blend.py b/openpype/hosts/blender/plugins/load/load_blend.py
index 25d6568889..f437e66795 100644
--- a/openpype/hosts/blender/plugins/load/load_blend.py
+++ b/openpype/hosts/blender/plugins/load/load_blend.py
@@ -4,11 +4,11 @@ from pathlib import Path
import bpy
from openpype.pipeline import (
- legacy_create,
get_representation_path,
AVALON_CONTAINER_ID,
+ registered_host
)
-from openpype.pipeline.create import get_legacy_creator_by_name
+from openpype.pipeline.create import CreateContext
from openpype.hosts.blender.api import plugin
from openpype.hosts.blender.api.lib import imprint
from openpype.hosts.blender.api.pipeline import (
@@ -20,7 +20,7 @@ from openpype.hosts.blender.api.pipeline import (
class BlendLoader(plugin.AssetLoader):
"""Load assets from a .blend file."""
- families = ["model", "rig", "layout", "camera", "blendScene"]
+ families = ["model", "rig", "layout", "camera"]
representations = ["blend"]
label = "Append Blend"
@@ -32,7 +32,7 @@ class BlendLoader(plugin.AssetLoader):
empties = [obj for obj in objects if obj.type == 'EMPTY']
for empty in empties:
- if empty.get(AVALON_PROPERTY):
+ if empty.get(AVALON_PROPERTY) and empty.parent is None:
return empty
return None
@@ -57,19 +57,21 @@ class BlendLoader(plugin.AssetLoader):
obj.get(AVALON_PROPERTY).get('family') == 'rig'
)
]
+ if not rigs:
+ return
+
+ # Create animation instances for each rig
+ creator_identifier = "io.openpype.creators.blender.animation"
+ host = registered_host()
+ create_context = CreateContext(host)
for rig in rigs:
- creator_plugin = get_legacy_creator_by_name("CreateAnimation")
- legacy_create(
- creator_plugin,
- name=rig.name.split(':')[-1] + "_animation",
- asset=asset,
- options={
- "useSelection": False,
+ create_context.create(
+ creator_identifier=creator_identifier,
+ variant=rig.name.split(':')[-1],
+ pre_create_data={
+ "use_selection": False,
"asset_group": rig
- },
- data={
- "dependencies": representation
}
)
@@ -100,6 +102,7 @@ class BlendLoader(plugin.AssetLoader):
# Link all the container children to the collection
for obj in container.children_recursive:
+ print(obj)
bpy.context.scene.collection.objects.link(obj)
# Remove the library from the blend file
@@ -130,9 +133,9 @@ class BlendLoader(plugin.AssetLoader):
representation = str(context["representation"]["_id"])
- asset_name = plugin.asset_name(asset, subset)
+ asset_name = plugin.prepare_scene_name(asset, subset)
unique_number = plugin.get_unique_number(asset, subset)
- group_name = plugin.asset_name(asset, subset, unique_number)
+ group_name = plugin.prepare_scene_name(asset, subset, unique_number)
namespace = namespace or f"{asset}_{unique_number}"
avalon_container = bpy.data.collections.get(AVALON_CONTAINERS)
diff --git a/openpype/hosts/blender/plugins/load/load_blendscene.py b/openpype/hosts/blender/plugins/load/load_blendscene.py
new file mode 100644
index 0000000000..6cc7f39d03
--- /dev/null
+++ b/openpype/hosts/blender/plugins/load/load_blendscene.py
@@ -0,0 +1,221 @@
+from typing import Dict, List, Optional
+from pathlib import Path
+
+import bpy
+
+from openpype.pipeline import (
+ get_representation_path,
+ AVALON_CONTAINER_ID,
+)
+from openpype.hosts.blender.api import plugin
+from openpype.hosts.blender.api.lib import imprint
+from openpype.hosts.blender.api.pipeline import (
+ AVALON_CONTAINERS,
+ AVALON_PROPERTY,
+)
+
+
+class BlendSceneLoader(plugin.AssetLoader):
+ """Load assets from a .blend file."""
+
+ families = ["blendScene"]
+ representations = ["blend"]
+
+ label = "Append Blend"
+ icon = "code-fork"
+ color = "orange"
+
+ @staticmethod
+ def _get_asset_container(collections):
+ for coll in collections:
+ parents = [c for c in collections if c.user_of_id(coll)]
+ if coll.get(AVALON_PROPERTY) and not parents:
+ return coll
+
+ return None
+
+ def _process_data(self, libpath, group_name, family):
+ # Append all the data from the .blend file
+ with bpy.data.libraries.load(
+ libpath, link=False, relative=False
+ ) as (data_from, data_to):
+ for attr in dir(data_to):
+ setattr(data_to, attr, getattr(data_from, attr))
+
+ members = []
+
+ # Rename the object to add the asset name
+ for attr in dir(data_to):
+ for data in getattr(data_to, attr):
+ data.name = f"{group_name}:{data.name}"
+ members.append(data)
+
+ container = self._get_asset_container(
+ data_to.collections)
+ assert container, "No asset group found"
+
+ container.name = group_name
+
+ # Link the group to the scene
+ bpy.context.scene.collection.children.link(container)
+
+ # Remove the library from the blend file
+ library = bpy.data.libraries.get(bpy.path.basename(libpath))
+ bpy.data.libraries.remove(library)
+
+ return container, members
+
+ def process_asset(
+ self, context: dict, name: str, namespace: Optional[str] = None,
+ options: Optional[Dict] = None
+ ) -> Optional[List]:
+ """
+ Arguments:
+ name: Use pre-defined name
+ namespace: Use pre-defined namespace
+ context: Full parenthood of representation to load
+ options: Additional settings dictionary
+ """
+ libpath = self.filepath_from_context(context)
+ asset = context["asset"]["name"]
+ subset = context["subset"]["name"]
+
+ try:
+ family = context["representation"]["context"]["family"]
+ except ValueError:
+ family = "model"
+
+ asset_name = plugin.prepare_scene_name(asset, subset)
+ unique_number = plugin.get_unique_number(asset, subset)
+ group_name = plugin.prepare_scene_name(asset, subset, unique_number)
+ namespace = namespace or f"{asset}_{unique_number}"
+
+ avalon_container = bpy.data.collections.get(AVALON_CONTAINERS)
+ if not avalon_container:
+ avalon_container = bpy.data.collections.new(name=AVALON_CONTAINERS)
+ bpy.context.scene.collection.children.link(avalon_container)
+
+ container, members = self._process_data(libpath, group_name, family)
+
+ avalon_container.children.link(container)
+
+ data = {
+ "schema": "openpype:container-2.0",
+ "id": AVALON_CONTAINER_ID,
+ "name": name,
+ "namespace": namespace or '',
+ "loader": str(self.__class__.__name__),
+ "representation": str(context["representation"]["_id"]),
+ "libpath": libpath,
+ "asset_name": asset_name,
+ "parent": str(context["representation"]["parent"]),
+ "family": context["representation"]["context"]["family"],
+ "objectName": group_name,
+ "members": members,
+ }
+
+ container[AVALON_PROPERTY] = data
+
+ objects = [
+ obj for obj in bpy.data.objects
+ if obj.name.startswith(f"{group_name}:")
+ ]
+
+ self[:] = objects
+ return objects
+
+ def exec_update(self, container: Dict, representation: Dict):
+ """
+ Update the loaded asset.
+ """
+ group_name = container["objectName"]
+ asset_group = bpy.data.collections.get(group_name)
+ libpath = Path(get_representation_path(representation)).as_posix()
+
+ assert asset_group, (
+ f"The asset is not loaded: {container['objectName']}"
+ )
+
+ # Get the parents of the members of the asset group, so we can
+ # re-link them after the update.
+ # Also gets the transform for each object to reapply after the update.
+ collection_parents = {}
+ member_transforms = {}
+ members = asset_group.get(AVALON_PROPERTY).get("members", [])
+ loaded_collections = {c for c in bpy.data.collections if c in members}
+ loaded_collections.add(bpy.data.collections.get(AVALON_CONTAINERS))
+ for member in members:
+ if isinstance(member, bpy.types.Object):
+ member_parents = set(member.users_collection)
+ member_transforms[member.name] = member.matrix_basis.copy()
+ elif isinstance(member, bpy.types.Collection):
+ member_parents = {
+ c for c in bpy.data.collections if c.user_of_id(member)}
+ else:
+ continue
+
+ member_parents = member_parents.difference(loaded_collections)
+ if member_parents:
+ collection_parents[member.name] = list(member_parents)
+
+ old_data = dict(asset_group.get(AVALON_PROPERTY))
+
+ self.exec_remove(container)
+
+ family = container["family"]
+ asset_group, members = self._process_data(libpath, group_name, family)
+
+ for member in members:
+ if member.name in collection_parents:
+ for parent in collection_parents[member.name]:
+ if isinstance(member, bpy.types.Object):
+ parent.objects.link(member)
+ elif isinstance(member, bpy.types.Collection):
+ parent.children.link(member)
+ if member.name in member_transforms and isinstance(
+ member, bpy.types.Object
+ ):
+ member.matrix_basis = member_transforms[member.name]
+
+ avalon_container = bpy.data.collections.get(AVALON_CONTAINERS)
+ avalon_container.children.link(asset_group)
+
+ # Restore the old data, but reset members, as they don't exist anymore
+ # This avoids a crash, because the memory addresses of those members
+ # are not valid anymore
+ old_data["members"] = []
+ asset_group[AVALON_PROPERTY] = old_data
+
+ new_data = {
+ "libpath": libpath,
+ "representation": str(representation["_id"]),
+ "parent": str(representation["parent"]),
+ "members": members,
+ }
+
+ imprint(asset_group, new_data)
+
+ def exec_remove(self, container: Dict) -> bool:
+ """
+ Remove an existing container from a Blender scene.
+ """
+ group_name = container["objectName"]
+ asset_group = bpy.data.collections.get(group_name)
+
+ members = set(asset_group.get(AVALON_PROPERTY).get("members", []))
+
+ if members:
+ for attr_name in dir(bpy.data):
+ attr = getattr(bpy.data, attr_name)
+ if not isinstance(attr, bpy.types.bpy_prop_collection):
+ continue
+
+ # ensure to make a list copy because we
+ # we remove members as we iterate
+ for data in list(attr):
+ if data not in members or data == asset_group:
+ continue
+
+ attr.remove(data)
+
+ bpy.data.collections.remove(asset_group)
diff --git a/openpype/hosts/blender/plugins/load/load_camera_abc.py b/openpype/hosts/blender/plugins/load/load_camera_abc.py
index 05d3fb764d..ecd6bb98f1 100644
--- a/openpype/hosts/blender/plugins/load/load_camera_abc.py
+++ b/openpype/hosts/blender/plugins/load/load_camera_abc.py
@@ -87,9 +87,9 @@ class AbcCameraLoader(plugin.AssetLoader):
asset = context["asset"]["name"]
subset = context["subset"]["name"]
- asset_name = plugin.asset_name(asset, subset)
+ asset_name = plugin.prepare_scene_name(asset, subset)
unique_number = plugin.get_unique_number(asset, subset)
- group_name = plugin.asset_name(asset, subset, unique_number)
+ group_name = plugin.prepare_scene_name(asset, subset, unique_number)
namespace = namespace or f"{asset}_{unique_number}"
avalon_container = bpy.data.collections.get(AVALON_CONTAINERS)
diff --git a/openpype/hosts/blender/plugins/load/load_camera_fbx.py b/openpype/hosts/blender/plugins/load/load_camera_fbx.py
index 3cca6e7fd3..2d53d3e573 100644
--- a/openpype/hosts/blender/plugins/load/load_camera_fbx.py
+++ b/openpype/hosts/blender/plugins/load/load_camera_fbx.py
@@ -90,9 +90,9 @@ class FbxCameraLoader(plugin.AssetLoader):
asset = context["asset"]["name"]
subset = context["subset"]["name"]
- asset_name = plugin.asset_name(asset, subset)
+ asset_name = plugin.prepare_scene_name(asset, subset)
unique_number = plugin.get_unique_number(asset, subset)
- group_name = plugin.asset_name(asset, subset, unique_number)
+ group_name = plugin.prepare_scene_name(asset, subset, unique_number)
namespace = namespace or f"{asset}_{unique_number}"
avalon_container = bpy.data.collections.get(AVALON_CONTAINERS)
diff --git a/openpype/hosts/blender/plugins/load/load_fbx.py b/openpype/hosts/blender/plugins/load/load_fbx.py
index e129ea6754..8fce53a5d5 100644
--- a/openpype/hosts/blender/plugins/load/load_fbx.py
+++ b/openpype/hosts/blender/plugins/load/load_fbx.py
@@ -134,9 +134,9 @@ class FbxModelLoader(plugin.AssetLoader):
asset = context["asset"]["name"]
subset = context["subset"]["name"]
- asset_name = plugin.asset_name(asset, subset)
+ asset_name = plugin.prepare_scene_name(asset, subset)
unique_number = plugin.get_unique_number(asset, subset)
- group_name = plugin.asset_name(asset, subset, unique_number)
+ group_name = plugin.prepare_scene_name(asset, subset, unique_number)
namespace = namespace or f"{asset}_{unique_number}"
avalon_container = bpy.data.collections.get(AVALON_CONTAINERS)
diff --git a/openpype/hosts/blender/plugins/load/load_layout_json.py b/openpype/hosts/blender/plugins/load/load_layout_json.py
index 81683b8de8..748ac619b6 100644
--- a/openpype/hosts/blender/plugins/load/load_layout_json.py
+++ b/openpype/hosts/blender/plugins/load/load_layout_json.py
@@ -123,6 +123,7 @@ class JsonLayoutLoader(plugin.AssetLoader):
# raise ValueError("Creator plugin \"CreateCamera\" was "
# "not found.")
+ # TODO: Refactor legacy create usage to new style creators
# legacy_create(
# creator_plugin,
# name="camera",
@@ -148,9 +149,9 @@ class JsonLayoutLoader(plugin.AssetLoader):
asset = context["asset"]["name"]
subset = context["subset"]["name"]
- asset_name = plugin.asset_name(asset, subset)
+ asset_name = plugin.prepare_scene_name(asset, subset)
unique_number = plugin.get_unique_number(asset, subset)
- group_name = plugin.asset_name(asset, subset, unique_number)
+ group_name = plugin.prepare_scene_name(asset, subset, unique_number)
namespace = namespace or f"{asset}_{unique_number}"
avalon_container = bpy.data.collections.get(AVALON_CONTAINERS)
diff --git a/openpype/hosts/blender/plugins/load/load_look.py b/openpype/hosts/blender/plugins/load/load_look.py
index c121f55633..8d3118d83b 100644
--- a/openpype/hosts/blender/plugins/load/load_look.py
+++ b/openpype/hosts/blender/plugins/load/load_look.py
@@ -96,14 +96,14 @@ class BlendLookLoader(plugin.AssetLoader):
asset = context["asset"]["name"]
subset = context["subset"]["name"]
- lib_container = plugin.asset_name(
+ lib_container = plugin.prepare_scene_name(
asset, subset
)
unique_number = plugin.get_unique_number(
asset, subset
)
namespace = namespace or f"{asset}_{unique_number}"
- container_name = plugin.asset_name(
+ container_name = plugin.prepare_scene_name(
asset, subset, unique_number
)
diff --git a/openpype/hosts/blender/plugins/publish/collect_current_file.py b/openpype/hosts/blender/plugins/publish/collect_current_file.py
index c2d8a96a18..91c88f2e28 100644
--- a/openpype/hosts/blender/plugins/publish/collect_current_file.py
+++ b/openpype/hosts/blender/plugins/publish/collect_current_file.py
@@ -1,72 +1,15 @@
-import os
-import bpy
-
import pyblish.api
-from openpype.pipeline import get_current_task_name, get_current_asset_name
from openpype.hosts.blender.api import workio
-class SaveWorkfiledAction(pyblish.api.Action):
- """Save Workfile."""
- label = "Save Workfile"
- on = "failed"
- icon = "save"
-
- def process(self, context, plugin):
- bpy.ops.wm.avalon_workfiles()
-
-
class CollectBlenderCurrentFile(pyblish.api.ContextPlugin):
"""Inject the current working file into context"""
order = pyblish.api.CollectorOrder - 0.5
label = "Blender Current File"
hosts = ["blender"]
- actions = [SaveWorkfiledAction]
def process(self, context):
"""Inject the current working file"""
current_file = workio.current_file()
-
context.data["currentFile"] = current_file
-
- assert current_file, (
- "Current file is empty. Save the file before continuing."
- )
-
- folder, file = os.path.split(current_file)
- filename, ext = os.path.splitext(file)
-
- task = get_current_task_name()
-
- data = {}
-
- # create instance
- instance = context.create_instance(name=filename)
- subset = "workfile" + task.capitalize()
-
- data.update({
- "subset": subset,
- "asset": get_current_asset_name(),
- "label": subset,
- "publish": True,
- "family": "workfile",
- "families": ["workfile"],
- "setMembers": [current_file],
- "frameStart": bpy.context.scene.frame_start,
- "frameEnd": bpy.context.scene.frame_end,
- })
-
- data["representations"] = [{
- "name": ext.lstrip("."),
- "ext": ext.lstrip("."),
- "files": file,
- "stagingDir": folder,
- }]
-
- instance.data.update(data)
-
- self.log.info("Collected instance: {}".format(file))
- self.log.info("Scene path: {}".format(current_file))
- self.log.info("staging Dir: {}".format(folder))
- self.log.info("subset: {}".format(subset))
diff --git a/openpype/hosts/blender/plugins/publish/collect_instance.py b/openpype/hosts/blender/plugins/publish/collect_instance.py
new file mode 100644
index 0000000000..4685472213
--- /dev/null
+++ b/openpype/hosts/blender/plugins/publish/collect_instance.py
@@ -0,0 +1,43 @@
+import bpy
+
+import pyblish.api
+
+from openpype.pipeline.publish import KnownPublishError
+from openpype.hosts.blender.api.pipeline import AVALON_PROPERTY
+
+
+class CollectBlenderInstanceData(pyblish.api.InstancePlugin):
+ """Validator to verify that the instance is not empty"""
+
+ order = pyblish.api.CollectorOrder
+ hosts = ["blender"]
+ families = ["model", "pointcache", "animation", "rig", "camera", "layout",
+ "blendScene"]
+ label = "Collect Instance"
+
+ def process(self, instance):
+ instance_node = instance.data["transientData"]["instance_node"]
+
+ # Collect members of the instance
+ members = [instance_node]
+ if isinstance(instance_node, bpy.types.Collection):
+ members.extend(instance_node.objects)
+ members.extend(instance_node.children)
+
+ # Special case for animation instances, include armatures
+ if instance.data["family"] == "animation":
+ for obj in instance_node.objects:
+ if obj.type == 'EMPTY' and obj.get(AVALON_PROPERTY):
+ members.extend(
+ child for child in obj.children
+ if child.type == 'ARMATURE'
+ )
+ elif isinstance(instance_node, bpy.types.Object):
+ members.extend(instance_node.children_recursive)
+ else:
+ raise KnownPublishError(
+ f"Unsupported instance node type '{type(instance_node)}' "
+ f"for instance '{instance}'"
+ )
+
+ instance[:] = members
diff --git a/openpype/hosts/blender/plugins/publish/collect_instances.py b/openpype/hosts/blender/plugins/publish/collect_instances.py
deleted file mode 100644
index bc4b5ab092..0000000000
--- a/openpype/hosts/blender/plugins/publish/collect_instances.py
+++ /dev/null
@@ -1,104 +0,0 @@
-import json
-from typing import Generator
-
-import bpy
-
-import pyblish.api
-from openpype.hosts.blender.api.pipeline import (
- AVALON_INSTANCES,
- AVALON_PROPERTY,
-)
-
-
-class CollectInstances(pyblish.api.ContextPlugin):
- """Collect the data of a model."""
-
- hosts = ["blender"]
- label = "Collect Instances"
- order = pyblish.api.CollectorOrder
-
- @staticmethod
- def get_asset_groups() -> Generator:
- """Return all 'model' collections.
-
- Check if the family is 'model' and if it doesn't have the
- representation set. If the representation is set, it is a loaded model
- and we don't want to publish it.
- """
- instances = bpy.data.collections.get(AVALON_INSTANCES)
- for obj in instances.objects:
- avalon_prop = obj.get(AVALON_PROPERTY) or dict()
- if avalon_prop.get('id') == 'pyblish.avalon.instance':
- yield obj
-
- @staticmethod
- def get_collections() -> Generator:
- """Return all 'model' collections.
-
- Check if the family is 'model' and if it doesn't have the
- representation set. If the representation is set, it is a loaded model
- and we don't want to publish it.
- """
- for collection in bpy.data.collections:
- avalon_prop = collection.get(AVALON_PROPERTY) or dict()
- if avalon_prop.get('id') == 'pyblish.avalon.instance':
- yield collection
-
- def process(self, context):
- """Collect the models from the current Blender scene."""
- asset_groups = self.get_asset_groups()
- collections = self.get_collections()
-
- for group in asset_groups:
- avalon_prop = group[AVALON_PROPERTY]
- asset = avalon_prop['asset']
- family = avalon_prop['family']
- subset = avalon_prop['subset']
- task = avalon_prop['task']
- name = f"{asset}_{subset}"
- instance = context.create_instance(
- name=name,
- family=family,
- families=[family],
- subset=subset,
- asset=asset,
- task=task,
- )
- objects = list(group.children)
- members = set()
- for obj in objects:
- objects.extend(list(obj.children))
- members.add(obj)
- members.add(group)
- instance[:] = list(members)
- self.log.debug(json.dumps(instance.data, indent=4))
- for obj in instance:
- self.log.debug(obj)
-
- for collection in collections:
- avalon_prop = collection[AVALON_PROPERTY]
- asset = avalon_prop['asset']
- family = avalon_prop['family']
- subset = avalon_prop['subset']
- task = avalon_prop['task']
- name = f"{asset}_{subset}"
- instance = context.create_instance(
- name=name,
- family=family,
- families=[family],
- subset=subset,
- asset=asset,
- task=task,
- )
- members = list(collection.objects)
- if family == "animation":
- for obj in collection.objects:
- if obj.type == 'EMPTY' and obj.get(AVALON_PROPERTY):
- for child in obj.children:
- if child.type == 'ARMATURE':
- members.append(child)
- members.append(collection)
- instance[:] = members
- self.log.debug(json.dumps(instance.data, indent=4))
- for obj in instance:
- self.log.debug(obj)
diff --git a/openpype/hosts/blender/plugins/publish/collect_render.py b/openpype/hosts/blender/plugins/publish/collect_render.py
index 92e2473a95..da02f99052 100644
--- a/openpype/hosts/blender/plugins/publish/collect_render.py
+++ b/openpype/hosts/blender/plugins/publish/collect_render.py
@@ -11,12 +11,12 @@ import pyblish.api
class CollectBlenderRender(pyblish.api.InstancePlugin):
- """Gather all publishable render layers from renderSetup."""
+ """Gather all publishable render instances."""
order = pyblish.api.CollectorOrder + 0.01
hosts = ["blender"]
families = ["render"]
- label = "Collect Render Layers"
+ label = "Collect Render"
sync_workfile_version = False
@staticmethod
@@ -73,12 +73,11 @@ class CollectBlenderRender(pyblish.api.InstancePlugin):
def process(self, instance):
context = instance.context
- render_data = bpy.data.collections[str(instance)].get("render_data")
+ instance_node = instance.data["transientData"]["instance_node"]
+ render_data = instance_node.get("render_data")
assert render_data, "No render data found."
- self.log.info(f"render_data: {dict(render_data)}")
-
render_product = render_data.get("render_product")
aov_file_product = render_data.get("aov_file_product")
ext = render_data.get("image_format")
@@ -100,7 +99,7 @@ class CollectBlenderRender(pyblish.api.InstancePlugin):
expected_files = expected_beauty | expected_aovs
instance.data.update({
- "family": "render.farm",
+ "families": ["render", "render.farm"],
"frameStart": frame_start,
"frameEnd": frame_end,
"frameStartHandle": frame_handle_start,
@@ -119,5 +118,3 @@ class CollectBlenderRender(pyblish.api.InstancePlugin):
"colorspaceView": "ACES 1.0 SDR-video",
"renderProducts": colorspace.ARenderProduct(),
})
-
- self.log.info(f"data: {instance.data}")
diff --git a/openpype/hosts/blender/plugins/publish/collect_review.py b/openpype/hosts/blender/plugins/publish/collect_review.py
index 3bf2e39e24..2c077398da 100644
--- a/openpype/hosts/blender/plugins/publish/collect_review.py
+++ b/openpype/hosts/blender/plugins/publish/collect_review.py
@@ -16,10 +16,12 @@ class CollectReview(pyblish.api.InstancePlugin):
self.log.debug(f"instance: {instance}")
+ datablock = instance.data["transientData"]["instance_node"]
+
# get cameras
cameras = [
obj
- for obj in instance
+ for obj in datablock.all_objects
if isinstance(obj, bpy.types.Object) and obj.type == "CAMERA"
]
@@ -31,11 +33,12 @@ class CollectReview(pyblish.api.InstancePlugin):
focal_length = cameras[0].data.lens
- # get isolate objects list from meshes instance members .
+ # get isolate objects list from meshes instance members.
+ types = {"MESH", "GPENCIL"}
isolate_objects = [
obj
for obj in instance
- if isinstance(obj, bpy.types.Object) and obj.type == "MESH"
+ if isinstance(obj, bpy.types.Object) and obj.type in types
]
if not instance.data.get("remove"):
diff --git a/openpype/hosts/blender/plugins/publish/collect_workfile.py b/openpype/hosts/blender/plugins/publish/collect_workfile.py
new file mode 100644
index 0000000000..6561c89605
--- /dev/null
+++ b/openpype/hosts/blender/plugins/publish/collect_workfile.py
@@ -0,0 +1,37 @@
+from pathlib import Path
+
+from pyblish.api import InstancePlugin, CollectorOrder
+
+
+class CollectWorkfile(InstancePlugin):
+ """Inject workfile data into its instance."""
+
+ order = CollectorOrder
+ label = "Collect Workfile"
+ hosts = ["blender"]
+ families = ["workfile"]
+
+ def process(self, instance):
+ """Process collector."""
+
+ context = instance.context
+ filepath = Path(context.data["currentFile"])
+ ext = filepath.suffix
+
+ instance.data.update(
+ {
+ "setMembers": [filepath.as_posix()],
+ "frameStart": context.data.get("frameStart", 1),
+ "frameEnd": context.data.get("frameEnd", 1),
+ "handleStart": context.data.get("handleStart", 1),
+ "handledEnd": context.data.get("handleEnd", 1),
+ "representations": [
+ {
+ "name": ext.lstrip("."),
+ "ext": ext.lstrip("."),
+ "files": filepath.name,
+ "stagingDir": filepath.parent,
+ }
+ ],
+ }
+ )
diff --git a/openpype/hosts/blender/plugins/publish/extract_abc.py b/openpype/hosts/blender/plugins/publish/extract_abc.py
index 87159e53f0..0e242e9d53 100644
--- a/openpype/hosts/blender/plugins/publish/extract_abc.py
+++ b/openpype/hosts/blender/plugins/publish/extract_abc.py
@@ -4,40 +4,42 @@ import bpy
from openpype.pipeline import publish
from openpype.hosts.blender.api import plugin
-from openpype.hosts.blender.api.pipeline import AVALON_PROPERTY
-class ExtractABC(publish.Extractor):
+class ExtractABC(publish.Extractor, publish.OptionalPyblishPluginMixin):
"""Extract as ABC."""
label = "Extract ABC"
hosts = ["blender"]
- families = ["model", "pointcache"]
- optional = True
+ families = ["pointcache"]
def process(self, instance):
+ if not self.is_active(instance.data):
+ return
+
# Define extract output file path
stagingdir = self.staging_dir(instance)
- filename = f"{instance.name}.abc"
+ asset_name = instance.data["assetEntity"]["name"]
+ subset = instance.data["subset"]
+ instance_name = f"{asset_name}_{subset}"
+ filename = f"{instance_name}.abc"
filepath = os.path.join(stagingdir, filename)
# Perform extraction
- self.log.info("Performing extraction..")
+ self.log.debug("Performing extraction..")
plugin.deselect_all()
- selected = []
- active = None
+ asset_group = instance.data["transientData"]["instance_node"]
+ selected = []
for obj in instance:
- obj.select_set(True)
- selected.append(obj)
- # Set as active the asset group
- if obj.get(AVALON_PROPERTY):
- active = obj
+ if isinstance(obj, bpy.types.Object):
+ obj.select_set(True)
+ selected.append(obj)
context = plugin.create_blender_context(
- active=active, selected=selected)
+ active=asset_group, selected=selected)
with bpy.context.temp_override(**context):
# We export the abc
@@ -60,5 +62,14 @@ class ExtractABC(publish.Extractor):
}
instance.data["representations"].append(representation)
- self.log.info("Extracted instance '%s' to: %s",
- instance.name, representation)
+ self.log.debug("Extracted instance '%s' to: %s",
+ instance.name, representation)
+
+
+class ExtractModelABC(ExtractABC):
+ """Extract model as ABC."""
+
+ label = "Extract Model ABC"
+ hosts = ["blender"]
+ families = ["model"]
+ optional = True
diff --git a/openpype/hosts/blender/plugins/publish/extract_abc_animation.py b/openpype/hosts/blender/plugins/publish/extract_abc_animation.py
index 44b2ba3761..6ef9b29693 100644
--- a/openpype/hosts/blender/plugins/publish/extract_abc_animation.py
+++ b/openpype/hosts/blender/plugins/publish/extract_abc_animation.py
@@ -6,7 +6,10 @@ from openpype.pipeline import publish
from openpype.hosts.blender.api import plugin
-class ExtractAnimationABC(publish.Extractor):
+class ExtractAnimationABC(
+ publish.Extractor,
+ publish.OptionalPyblishPluginMixin,
+):
"""Extract as ABC."""
label = "Extract Animation ABC"
@@ -15,18 +18,25 @@ class ExtractAnimationABC(publish.Extractor):
optional = True
def process(self, instance):
+ if not self.is_active(instance.data):
+ return
+
# Define extract output file path
stagingdir = self.staging_dir(instance)
- filename = f"{instance.name}.abc"
+ asset_name = instance.data["assetEntity"]["name"]
+ subset = instance.data["subset"]
+ instance_name = f"{asset_name}_{subset}"
+ filename = f"{instance_name}.abc"
+
filepath = os.path.join(stagingdir, filename)
# Perform extraction
- self.log.info("Performing extraction..")
+ self.log.debug("Performing extraction..")
plugin.deselect_all()
selected = []
- asset_group = None
+ asset_group = instance.data["transientData"]["instance_node"]
objects = []
for obj in instance:
@@ -66,5 +76,5 @@ class ExtractAnimationABC(publish.Extractor):
}
instance.data["representations"].append(representation)
- self.log.info("Extracted instance '%s' to: %s",
- instance.name, representation)
+ self.log.debug("Extracted instance '%s' to: %s",
+ instance.name, representation)
diff --git a/openpype/hosts/blender/plugins/publish/extract_blend.py b/openpype/hosts/blender/plugins/publish/extract_blend.py
index d4f26b4f3c..94e87d537c 100644
--- a/openpype/hosts/blender/plugins/publish/extract_blend.py
+++ b/openpype/hosts/blender/plugins/publish/extract_blend.py
@@ -5,7 +5,7 @@ import bpy
from openpype.pipeline import publish
-class ExtractBlend(publish.Extractor):
+class ExtractBlend(publish.Extractor, publish.OptionalPyblishPluginMixin):
"""Extract a blend file."""
label = "Extract Blend"
@@ -14,30 +14,44 @@ class ExtractBlend(publish.Extractor):
optional = True
def process(self, instance):
+ if not self.is_active(instance.data):
+ return
+
# Define extract output file path
stagingdir = self.staging_dir(instance)
- filename = f"{instance.name}.blend"
+ asset_name = instance.data["assetEntity"]["name"]
+ subset = instance.data["subset"]
+ instance_name = f"{asset_name}_{subset}"
+ filename = f"{instance_name}.blend"
filepath = os.path.join(stagingdir, filename)
# Perform extraction
- self.log.info("Performing extraction..")
+ self.log.debug("Performing extraction..")
data_blocks = set()
- for obj in instance:
- data_blocks.add(obj)
+ for data in instance:
+ data_blocks.add(data)
# Pack used images in the blend files.
- if obj.type == 'MESH':
- for material_slot in obj.material_slots:
- mat = material_slot.material
- if mat and mat.use_nodes:
- tree = mat.node_tree
- if tree.type == 'SHADER':
- for node in tree.nodes:
- if node.bl_idname == 'ShaderNodeTexImage':
- if node.image:
- node.image.pack()
+ if not (
+ isinstance(data, bpy.types.Object) and data.type == 'MESH'
+ ):
+ continue
+ for material_slot in data.material_slots:
+ mat = material_slot.material
+ if not (mat and mat.use_nodes):
+ continue
+ tree = mat.node_tree
+ if tree.type != 'SHADER':
+ continue
+ for node in tree.nodes:
+ if node.bl_idname != 'ShaderNodeTexImage':
+ continue
+ # Check if image is not packed already
+ # and pack it if not.
+ if node.image and node.image.packed_file is None:
+ node.image.pack()
bpy.data.libraries.write(filepath, data_blocks)
@@ -52,5 +66,5 @@ class ExtractBlend(publish.Extractor):
}
instance.data["representations"].append(representation)
- self.log.info("Extracted instance '%s' to: %s",
- instance.name, representation)
+ self.log.debug("Extracted instance '%s' to: %s",
+ instance.name, representation)
diff --git a/openpype/hosts/blender/plugins/publish/extract_blend_animation.py b/openpype/hosts/blender/plugins/publish/extract_blend_animation.py
index 477411b73d..11eb268271 100644
--- a/openpype/hosts/blender/plugins/publish/extract_blend_animation.py
+++ b/openpype/hosts/blender/plugins/publish/extract_blend_animation.py
@@ -5,7 +5,10 @@ import bpy
from openpype.pipeline import publish
-class ExtractBlendAnimation(publish.Extractor):
+class ExtractBlendAnimation(
+ publish.Extractor,
+ publish.OptionalPyblishPluginMixin,
+):
"""Extract a blend file."""
label = "Extract Blend"
@@ -14,14 +17,20 @@ class ExtractBlendAnimation(publish.Extractor):
optional = True
def process(self, instance):
+ if not self.is_active(instance.data):
+ return
+
# Define extract output file path
stagingdir = self.staging_dir(instance)
- filename = f"{instance.name}.blend"
+ asset_name = instance.data["assetEntity"]["name"]
+ subset = instance.data["subset"]
+ instance_name = f"{asset_name}_{subset}"
+ filename = f"{instance_name}.blend"
filepath = os.path.join(stagingdir, filename)
# Perform extraction
- self.log.info("Performing extraction..")
+ self.log.debug("Performing extraction..")
data_blocks = set()
@@ -50,5 +59,5 @@ class ExtractBlendAnimation(publish.Extractor):
}
instance.data["representations"].append(representation)
- self.log.info("Extracted instance '%s' to: %s",
- instance.name, representation)
+ self.log.debug("Extracted instance '%s' to: %s",
+ instance.name, representation)
diff --git a/openpype/hosts/blender/plugins/publish/extract_camera_abc.py b/openpype/hosts/blender/plugins/publish/extract_camera_abc.py
index 036be7bf3c..df68668eae 100644
--- a/openpype/hosts/blender/plugins/publish/extract_camera_abc.py
+++ b/openpype/hosts/blender/plugins/publish/extract_camera_abc.py
@@ -7,7 +7,7 @@ from openpype.hosts.blender.api import plugin
from openpype.hosts.blender.api.pipeline import AVALON_PROPERTY
-class ExtractCameraABC(publish.Extractor):
+class ExtractCameraABC(publish.Extractor, publish.OptionalPyblishPluginMixin):
"""Extract camera as ABC."""
label = "Extract Camera (ABC)"
@@ -16,22 +16,23 @@ class ExtractCameraABC(publish.Extractor):
optional = True
def process(self, instance):
+ if not self.is_active(instance.data):
+ return
+
# Define extract output file path
stagingdir = self.staging_dir(instance)
- filename = f"{instance.name}.abc"
+ asset_name = instance.data["assetEntity"]["name"]
+ subset = instance.data["subset"]
+ instance_name = f"{asset_name}_{subset}"
+ filename = f"{instance_name}.abc"
filepath = os.path.join(stagingdir, filename)
# Perform extraction
- self.log.info("Performing extraction..")
+ self.log.debug("Performing extraction..")
plugin.deselect_all()
- asset_group = None
- for obj in instance:
- if obj.get(AVALON_PROPERTY):
- asset_group = obj
- break
- assert asset_group, "No asset group found"
+ asset_group = instance.data["transientData"]["instance_node"]
# Need to cast to list because children is a tuple
selected = list(asset_group.children)
@@ -64,5 +65,5 @@ class ExtractCameraABC(publish.Extractor):
}
instance.data["representations"].append(representation)
- self.log.info("Extracted instance '%s' to: %s",
- instance.name, representation)
+ self.log.debug("Extracted instance '%s' to: %s",
+ instance.name, representation)
diff --git a/openpype/hosts/blender/plugins/publish/extract_camera_fbx.py b/openpype/hosts/blender/plugins/publish/extract_camera_fbx.py
index 315994140e..ee046b7d11 100644
--- a/openpype/hosts/blender/plugins/publish/extract_camera_fbx.py
+++ b/openpype/hosts/blender/plugins/publish/extract_camera_fbx.py
@@ -6,7 +6,7 @@ from openpype.pipeline import publish
from openpype.hosts.blender.api import plugin
-class ExtractCamera(publish.Extractor):
+class ExtractCamera(publish.Extractor, publish.OptionalPyblishPluginMixin):
"""Extract as the camera as FBX."""
label = "Extract Camera (FBX)"
@@ -15,13 +15,19 @@ class ExtractCamera(publish.Extractor):
optional = True
def process(self, instance):
+ if not self.is_active(instance.data):
+ return
+
# Define extract output file path
stagingdir = self.staging_dir(instance)
- filename = f"{instance.name}.fbx"
+ asset_name = instance.data["assetEntity"]["name"]
+ subset = instance.data["subset"]
+ instance_name = f"{asset_name}_{subset}"
+ filename = f"{instance_name}.fbx"
filepath = os.path.join(stagingdir, filename)
# Perform extraction
- self.log.info("Performing extraction..")
+ self.log.debug("Performing extraction..")
plugin.deselect_all()
@@ -73,5 +79,5 @@ class ExtractCamera(publish.Extractor):
}
instance.data["representations"].append(representation)
- self.log.info("Extracted instance '%s' to: %s",
- instance.name, representation)
+ self.log.debug("Extracted instance '%s' to: %s",
+ instance.name, representation)
diff --git a/openpype/hosts/blender/plugins/publish/extract_fbx.py b/openpype/hosts/blender/plugins/publish/extract_fbx.py
index 0ad797c226..4ae6501f7d 100644
--- a/openpype/hosts/blender/plugins/publish/extract_fbx.py
+++ b/openpype/hosts/blender/plugins/publish/extract_fbx.py
@@ -7,7 +7,7 @@ from openpype.hosts.blender.api import plugin
from openpype.hosts.blender.api.pipeline import AVALON_PROPERTY
-class ExtractFBX(publish.Extractor):
+class ExtractFBX(publish.Extractor, publish.OptionalPyblishPluginMixin):
"""Extract as FBX."""
label = "Extract FBX"
@@ -16,24 +16,28 @@ class ExtractFBX(publish.Extractor):
optional = True
def process(self, instance):
+ if not self.is_active(instance.data):
+ return
+
# Define extract output file path
stagingdir = self.staging_dir(instance)
- filename = f"{instance.name}.fbx"
+ asset_name = instance.data["assetEntity"]["name"]
+ subset = instance.data["subset"]
+ instance_name = f"{asset_name}_{subset}"
+ filename = f"{instance_name}.fbx"
filepath = os.path.join(stagingdir, filename)
# Perform extraction
- self.log.info("Performing extraction..")
+ self.log.debug("Performing extraction..")
plugin.deselect_all()
- selected = []
- asset_group = None
+ asset_group = instance.data["transientData"]["instance_node"]
+ selected = []
for obj in instance:
obj.select_set(True)
selected.append(obj)
- if obj.get(AVALON_PROPERTY):
- asset_group = obj
context = plugin.create_blender_context(
active=asset_group, selected=selected)
@@ -84,5 +88,5 @@ class ExtractFBX(publish.Extractor):
}
instance.data["representations"].append(representation)
- self.log.info("Extracted instance '%s' to: %s",
- instance.name, representation)
+ self.log.debug("Extracted instance '%s' to: %s",
+ instance.name, representation)
diff --git a/openpype/hosts/blender/plugins/publish/extract_fbx_animation.py b/openpype/hosts/blender/plugins/publish/extract_fbx_animation.py
index 062b42e99d..4fc8230a1b 100644
--- a/openpype/hosts/blender/plugins/publish/extract_fbx_animation.py
+++ b/openpype/hosts/blender/plugins/publish/extract_fbx_animation.py
@@ -10,7 +10,41 @@ from openpype.hosts.blender.api import plugin
from openpype.hosts.blender.api.pipeline import AVALON_PROPERTY
-class ExtractAnimationFBX(publish.Extractor):
+def get_all_parents(obj):
+ """Get all recursive parents of object"""
+ result = []
+ while True:
+ obj = obj.parent
+ if not obj:
+ break
+ result.append(obj)
+ return result
+
+
+def get_highest_root(objects):
+ # Get the highest object that is also in the collection
+ included_objects = {obj.name_full for obj in objects}
+ num_parents_to_obj = {}
+ for obj in objects:
+ if isinstance(obj, bpy.types.Object):
+ parents = get_all_parents(obj)
+ # included parents
+ parents = [parent for parent in parents if
+ parent.name_full in included_objects]
+ if not parents:
+ # A node without parents must be a highest root
+ return obj
+
+ num_parents_to_obj.setdefault(len(parents), obj)
+
+ minimum_parent = min(num_parents_to_obj)
+ return num_parents_to_obj[minimum_parent]
+
+
+class ExtractAnimationFBX(
+ publish.Extractor,
+ publish.OptionalPyblishPluginMixin,
+):
"""Extract as animation."""
label = "Extract FBX"
@@ -19,23 +53,43 @@ class ExtractAnimationFBX(publish.Extractor):
optional = True
def process(self, instance):
+ if not self.is_active(instance.data):
+ return
+
# Define extract output file path
stagingdir = self.staging_dir(instance)
# Perform extraction
- self.log.info("Performing extraction..")
+ self.log.debug("Performing extraction..")
- # The first collection object in the instance is taken, as there
- # should be only one that contains the asset group.
- collection = [
- obj for obj in instance if type(obj) is bpy.types.Collection][0]
+ asset_group = instance.data["transientData"]["instance_node"]
- # Again, the first object in the collection is taken , as there
- # should be only the asset group in the collection.
- asset_group = collection.objects[0]
+ # Get objects in this collection (but not in children collections)
+ # and for those objects include the children hierarchy
+ # TODO: Would it make more sense for the Collect Instance collector
+ # to also always retrieve all the children?
+ objects = set(asset_group.objects)
- armature = [
- obj for obj in asset_group.children if obj.type == 'ARMATURE'][0]
+ # From the direct children of the collection find the 'root' node
+ # that we want to export - it is the 'highest' node in a hierarchy
+ root = get_highest_root(objects)
+
+ for obj in list(objects):
+ objects.update(obj.children_recursive)
+
+ # Find all armatures among the objects, assume to find only one
+ armatures = [obj for obj in objects if obj.type == "ARMATURE"]
+ if not armatures:
+ raise RuntimeError(
+ f"Unable to find ARMATURE in collection: "
+ f"{asset_group.name}"
+ )
+ elif len(armatures) > 1:
+ self.log.warning(
+ "Found more than one ARMATURE, using "
+ f"only first of: {armatures}"
+ )
+ armature = armatures[0]
object_action_pairs = []
original_actions = []
@@ -44,9 +98,6 @@ class ExtractAnimationFBX(publish.Extractor):
ending_frames = []
# For each armature, we make a copy of the current action
- curr_action = None
- copy_action = None
-
if armature.animation_data and armature.animation_data.action:
curr_action = armature.animation_data.action
copy_action = curr_action.copy()
@@ -56,12 +107,20 @@ class ExtractAnimationFBX(publish.Extractor):
starting_frames.append(curr_frame_range[0])
ending_frames.append(curr_frame_range[1])
else:
- self.log.info("Object have no animation.")
+ self.log.info(
+ f"Armature '{armature.name}' has no animation, "
+ f"skipping FBX animation extraction for {instance}."
+ )
return
asset_group_name = asset_group.name
- asset_group.name = asset_group.get(AVALON_PROPERTY).get("asset_name")
+ asset_name = asset_group.get(AVALON_PROPERTY).get("asset_name")
+ if asset_name:
+ # Rename for the export; this data is only present when loaded
+ # from a JSON Layout (layout family)
+ asset_group.name = asset_name
+ # Remove : from the armature name for the export
armature_name = armature.name
original_name = armature_name.split(':')[1]
armature.name = original_name
@@ -84,13 +143,16 @@ class ExtractAnimationFBX(publish.Extractor):
for obj in bpy.data.objects:
obj.select_set(False)
- asset_group.select_set(True)
+ root.select_set(True)
armature.select_set(True)
- fbx_filename = f"{instance.name}_{armature.name}.fbx"
+ asset_name = instance.data["assetEntity"]["name"]
+ subset = instance.data["subset"]
+ instance_name = f"{asset_name}_{subset}"
+ fbx_filename = f"{instance_name}_{armature.name}.fbx"
filepath = os.path.join(stagingdir, fbx_filename)
override = plugin.create_blender_context(
- active=asset_group, selected=[asset_group, armature])
+ active=root, selected=[root, armature])
bpy.ops.export_scene.fbx(
override,
filepath=filepath,
@@ -104,7 +166,7 @@ class ExtractAnimationFBX(publish.Extractor):
)
armature.name = armature_name
asset_group.name = asset_group_name
- asset_group.select_set(False)
+ root.select_set(True)
armature.select_set(False)
# We delete the baked action and set the original one back
@@ -119,7 +181,7 @@ class ExtractAnimationFBX(publish.Extractor):
pair[1].user_clear()
bpy.data.actions.remove(pair[1])
- json_filename = f"{instance.name}.json"
+ json_filename = f"{instance_name}.json"
json_path = os.path.join(stagingdir, json_filename)
json_dict = {
@@ -158,5 +220,5 @@ class ExtractAnimationFBX(publish.Extractor):
instance.data["representations"].append(fbx_representation)
instance.data["representations"].append(json_representation)
- self.log.info("Extracted instance '{}' to: {}".format(
- instance.name, fbx_representation))
+ self.log.debug("Extracted instance '{}' to: {}".format(
+ instance.name, fbx_representation))
diff --git a/openpype/hosts/blender/plugins/publish/extract_layout.py b/openpype/hosts/blender/plugins/publish/extract_layout.py
index f2d04f1178..3e8978c8d3 100644
--- a/openpype/hosts/blender/plugins/publish/extract_layout.py
+++ b/openpype/hosts/blender/plugins/publish/extract_layout.py
@@ -11,10 +11,10 @@ from openpype.hosts.blender.api import plugin
from openpype.hosts.blender.api.pipeline import AVALON_PROPERTY
-class ExtractLayout(publish.Extractor):
+class ExtractLayout(publish.Extractor, publish.OptionalPyblishPluginMixin):
"""Extract a layout."""
- label = "Extract Layout"
+ label = "Extract Layout (JSON)"
hosts = ["blender"]
families = ["layout"]
optional = True
@@ -45,7 +45,7 @@ class ExtractLayout(publish.Extractor):
starting_frames.append(curr_frame_range[0])
ending_frames.append(curr_frame_range[1])
else:
- self.log.info("Object have no animation.")
+ self.log.info("Object has no animation.")
continue
asset_group_name = asset.name
@@ -113,11 +113,14 @@ class ExtractLayout(publish.Extractor):
return None, n
def process(self, instance):
+ if not self.is_active(instance.data):
+ return
+
# Define extract output file path
stagingdir = self.staging_dir(instance)
# Perform extraction
- self.log.info("Performing extraction..")
+ self.log.debug("Performing extraction..")
if "representations" not in instance.data:
instance.data["representations"] = []
@@ -125,13 +128,22 @@ class ExtractLayout(publish.Extractor):
json_data = []
fbx_files = []
- asset_group = bpy.data.objects[str(instance)]
+ asset_group = instance.data["transientData"]["instance_node"]
fbx_count = 0
project_name = instance.context.data["projectEntity"]["name"]
for asset in asset_group.children:
metadata = asset.get(AVALON_PROPERTY)
+ if not metadata:
+ # Avoid raising error directly if there's just invalid data
+ # inside the instance; better to log it to the artist
+ # TODO: This should actually be validated in a validator
+ self.log.warning(
+ f"Found content in layout that is not a loaded "
+ f"asset, skipping: {asset.name_full}"
+ )
+ continue
version_id = metadata["parent"]
family = metadata["family"]
@@ -212,7 +224,11 @@ class ExtractLayout(publish.Extractor):
json_data.append(json_element)
- json_filename = "{}.json".format(instance.name)
+ asset_name = instance.data["assetEntity"]["name"]
+ subset = instance.data["subset"]
+ instance_name = f"{asset_name}_{subset}"
+ json_filename = f"{instance_name}.json"
+
json_path = os.path.join(stagingdir, json_filename)
with open(json_path, "w+") as file:
@@ -245,5 +261,5 @@ class ExtractLayout(publish.Extractor):
}
instance.data["representations"].append(fbx_representation)
- self.log.info("Extracted instance '%s' to: %s",
- instance.name, json_representation)
+ self.log.debug("Extracted instance '%s' to: %s",
+ instance.name, json_representation)
diff --git a/openpype/hosts/blender/plugins/publish/extract_playblast.py b/openpype/hosts/blender/plugins/publish/extract_playblast.py
index 196e75b8cc..a78aa14138 100644
--- a/openpype/hosts/blender/plugins/publish/extract_playblast.py
+++ b/openpype/hosts/blender/plugins/publish/extract_playblast.py
@@ -9,7 +9,7 @@ from openpype.hosts.blender.api import capture
from openpype.hosts.blender.api.lib import maintained_time
-class ExtractPlayblast(publish.Extractor):
+class ExtractPlayblast(publish.Extractor, publish.OptionalPyblishPluginMixin):
"""
Extract viewport playblast.
@@ -24,9 +24,8 @@ class ExtractPlayblast(publish.Extractor):
order = pyblish.api.ExtractorOrder + 0.01
def process(self, instance):
- self.log.info("Extracting capture..")
-
- self.log.info(instance.data)
+ if not self.is_active(instance.data):
+ return
# get scene fps
fps = instance.data.get("fps")
@@ -34,14 +33,14 @@ class ExtractPlayblast(publish.Extractor):
fps = bpy.context.scene.render.fps
instance.data["fps"] = fps
- self.log.info(f"fps: {fps}")
+ self.log.debug(f"fps: {fps}")
# If start and end frames cannot be determined,
# get them from Blender timeline.
start = instance.data.get("frameStart", bpy.context.scene.frame_start)
end = instance.data.get("frameEnd", bpy.context.scene.frame_end)
- self.log.info(f"start: {start}, end: {end}")
+ self.log.debug(f"start: {start}, end: {end}")
assert end > start, "Invalid time range !"
# get cameras
@@ -52,10 +51,13 @@ class ExtractPlayblast(publish.Extractor):
# get output path
stagingdir = self.staging_dir(instance)
- filename = instance.name
+ asset_name = instance.data["assetEntity"]["name"]
+ subset = instance.data["subset"]
+ filename = f"{asset_name}_{subset}"
+
path = os.path.join(stagingdir, filename)
- self.log.info(f"Outputting images to {path}")
+ self.log.debug(f"Outputting images to {path}")
project_settings = instance.context.data["project_settings"]["blender"]
presets = project_settings["publish"]["ExtractPlayblast"]["presets"]
@@ -100,7 +102,7 @@ class ExtractPlayblast(publish.Extractor):
frame_collection = collections[0]
- self.log.info(f"We found collection of interest {frame_collection}")
+ self.log.debug(f"Found collection of interest {frame_collection}")
instance.data.setdefault("representations", [])
diff --git a/openpype/hosts/blender/plugins/publish/extract_thumbnail.py b/openpype/hosts/blender/plugins/publish/extract_thumbnail.py
index 65c3627375..e593e0de27 100644
--- a/openpype/hosts/blender/plugins/publish/extract_thumbnail.py
+++ b/openpype/hosts/blender/plugins/publish/extract_thumbnail.py
@@ -24,13 +24,20 @@ class ExtractThumbnail(publish.Extractor):
presets = {}
def process(self, instance):
- self.log.info("Extracting capture..")
+ self.log.debug("Extracting capture..")
+
+ if instance.data.get("thumbnailSource"):
+ self.log.debug("Thumbnail source found, skipping...")
+ return
stagingdir = self.staging_dir(instance)
- filename = instance.name
+ asset_name = instance.data["assetEntity"]["name"]
+ subset = instance.data["subset"]
+ filename = f"{asset_name}_{subset}"
+
path = os.path.join(stagingdir, filename)
- self.log.info(f"Outputting images to {path}")
+ self.log.debug(f"Outputting images to {path}")
camera = instance.data.get("review_camera", "AUTO")
start = instance.data.get("frameStart", bpy.context.scene.frame_start)
@@ -61,7 +68,7 @@ class ExtractThumbnail(publish.Extractor):
thumbnail = os.path.basename(self._fix_output_path(path))
- self.log.info(f"thumbnail: {thumbnail}")
+ self.log.debug(f"thumbnail: {thumbnail}")
instance.data.setdefault("representations", [])
diff --git a/openpype/hosts/blender/plugins/publish/increment_workfile_version.py b/openpype/hosts/blender/plugins/publish/increment_workfile_version.py
index 3d176f9c30..9f8d20aedc 100644
--- a/openpype/hosts/blender/plugins/publish/increment_workfile_version.py
+++ b/openpype/hosts/blender/plugins/publish/increment_workfile_version.py
@@ -1,8 +1,12 @@
import pyblish.api
+from openpype.pipeline.publish import OptionalPyblishPluginMixin
from openpype.hosts.blender.api.workio import save_file
-class IncrementWorkfileVersion(pyblish.api.ContextPlugin):
+class IncrementWorkfileVersion(
+ pyblish.api.ContextPlugin,
+ OptionalPyblishPluginMixin
+):
"""Increment current workfile version."""
order = pyblish.api.IntegratorOrder + 0.9
@@ -10,9 +14,11 @@ class IncrementWorkfileVersion(pyblish.api.ContextPlugin):
optional = True
hosts = ["blender"]
families = ["animation", "model", "rig", "action", "layout", "blendScene",
- "render"]
+ "pointcache", "render.farm"]
def process(self, context):
+ if not self.is_active(context.data):
+ return
assert all(result["success"] for result in context.data["results"]), (
"Publishing not successful so version is not increased.")
@@ -23,4 +29,4 @@ class IncrementWorkfileVersion(pyblish.api.ContextPlugin):
save_file(filepath, copy=False)
- self.log.info('Incrementing script version')
+ self.log.debug('Incrementing blender workfile version')
diff --git a/openpype/hosts/blender/plugins/publish/integrate_animation.py b/openpype/hosts/blender/plugins/publish/integrate_animation.py
index d9a85bc79b..623da9c585 100644
--- a/openpype/hosts/blender/plugins/publish/integrate_animation.py
+++ b/openpype/hosts/blender/plugins/publish/integrate_animation.py
@@ -1,9 +1,13 @@
import json
import pyblish.api
+from openpype.pipeline.publish import OptionalPyblishPluginMixin
-class IntegrateAnimation(pyblish.api.InstancePlugin):
+class IntegrateAnimation(
+ pyblish.api.InstancePlugin,
+ OptionalPyblishPluginMixin,
+):
"""Generate a JSON file for animation."""
label = "Integrate Animation"
@@ -13,7 +17,7 @@ class IntegrateAnimation(pyblish.api.InstancePlugin):
families = ["setdress"]
def process(self, instance):
- self.log.info("Integrate Animation")
+ self.log.debug("Integrate Animation")
representation = instance.data.get('representations')[0]
json_path = representation.get('publishedFiles')[0]
diff --git a/openpype/hosts/blender/plugins/publish/validate_camera_zero_keyframe.py b/openpype/hosts/blender/plugins/publish/validate_camera_zero_keyframe.py
index 48c267fd18..9b6e513897 100644
--- a/openpype/hosts/blender/plugins/publish/validate_camera_zero_keyframe.py
+++ b/openpype/hosts/blender/plugins/publish/validate_camera_zero_keyframe.py
@@ -5,10 +5,15 @@ import bpy
import pyblish.api
import openpype.hosts.blender.api.action
-from openpype.pipeline.publish import ValidateContentsOrder
+from openpype.pipeline.publish import (
+ ValidateContentsOrder,
+ PublishValidationError,
+ OptionalPyblishPluginMixin
+)
-class ValidateCameraZeroKeyframe(pyblish.api.InstancePlugin):
+class ValidateCameraZeroKeyframe(pyblish.api.InstancePlugin,
+ OptionalPyblishPluginMixin):
"""Camera must have a keyframe at frame 0.
Unreal shifts the first keyframe to frame 0. Forcing the camera to have
@@ -40,8 +45,12 @@ class ValidateCameraZeroKeyframe(pyblish.api.InstancePlugin):
return invalid
def process(self, instance):
+ if not self.is_active(instance.data):
+ return
+
invalid = self.get_invalid(instance)
if invalid:
- raise RuntimeError(
- f"Camera must have a keyframe at frame 0: {invalid}"
+ names = ", ".join(obj.name for obj in invalid)
+ raise PublishValidationError(
+ f"Camera must have a keyframe at frame 0: {names}"
)
diff --git a/openpype/hosts/blender/plugins/publish/validate_deadline_publish.py b/openpype/hosts/blender/plugins/publish/validate_deadline_publish.py
index 14220b5c9c..bb243f08cc 100644
--- a/openpype/hosts/blender/plugins/publish/validate_deadline_publish.py
+++ b/openpype/hosts/blender/plugins/publish/validate_deadline_publish.py
@@ -19,7 +19,7 @@ class ValidateDeadlinePublish(pyblish.api.InstancePlugin,
"""
order = ValidateContentsOrder
- families = ["render.farm"]
+ families = ["render"]
hosts = ["blender"]
label = "Validate Render Output for Deadline"
optional = True
@@ -36,12 +36,12 @@ class ValidateDeadlinePublish(pyblish.api.InstancePlugin,
"Render output folder "
"doesn't match the blender scene name! "
"Use Repair action to "
- "fix the folder file path.."
+ "fix the folder file path."
)
@classmethod
def repair(cls, instance):
- container = bpy.data.collections[str(instance)]
+ container = instance.data["transientData"]["instance_node"]
prepare_rendering(container)
bpy.ops.wm.save_as_mainfile(filepath=bpy.data.filepath)
cls.log.debug("Reset the render output folder...")
diff --git a/openpype/hosts/blender/plugins/publish/validate_file_saved.py b/openpype/hosts/blender/plugins/publish/validate_file_saved.py
index e191585c55..442f856e05 100644
--- a/openpype/hosts/blender/plugins/publish/validate_file_saved.py
+++ b/openpype/hosts/blender/plugins/publish/validate_file_saved.py
@@ -2,8 +2,24 @@ import bpy
import pyblish.api
+from openpype.pipeline.publish import (
+ OptionalPyblishPluginMixin,
+ PublishValidationError
+)
-class ValidateFileSaved(pyblish.api.InstancePlugin):
+
+class SaveWorkfileAction(pyblish.api.Action):
+ """Save Workfile."""
+ label = "Save Workfile"
+ on = "failed"
+ icon = "save"
+
+ def process(self, context, plugin):
+ bpy.ops.wm.avalon_workfiles()
+
+
+class ValidateFileSaved(pyblish.api.ContextPlugin,
+ OptionalPyblishPluginMixin):
"""Validate that the workfile has been saved."""
order = pyblish.api.ValidatorOrder - 0.01
@@ -11,10 +27,35 @@ class ValidateFileSaved(pyblish.api.InstancePlugin):
label = "Validate File Saved"
optional = False
exclude_families = []
+ actions = [SaveWorkfileAction]
- def process(self, instance):
- if [ef for ef in self.exclude_families
- if instance.data["family"] in ef]:
+ def process(self, context):
+ if not self.is_active(context.data):
return
+
+ if not context.data["currentFile"]:
+ # File has not been saved at all and has no filename
+ raise PublishValidationError(
+ "Current file is empty. Save the file before continuing."
+ )
+
+ # Do not validate workfile has unsaved changes if only instances
+ # present of families that should be excluded
+ families = {
+ instance.data["family"] for instance in context
+ # Consider only enabled instances
+ if instance.data.get("publish", True)
+ and instance.data.get("active", True)
+ }
+
+ def is_excluded(family):
+ return any(family in exclude_family
+ for exclude_family in self.exclude_families)
+
+ if all(is_excluded(family) for family in families):
+ self.log.debug("Only excluded families found, skipping workfile "
+ "unsaved changes validation..")
+ return
+
if bpy.data.is_dirty:
- raise RuntimeError("Workfile is not saved.")
+ raise PublishValidationError("Workfile has unsaved changes.")
diff --git a/openpype/hosts/blender/plugins/publish/validate_instance_empty.py b/openpype/hosts/blender/plugins/publish/validate_instance_empty.py
new file mode 100644
index 0000000000..51a1dcf6ca
--- /dev/null
+++ b/openpype/hosts/blender/plugins/publish/validate_instance_empty.py
@@ -0,0 +1,19 @@
+import pyblish.api
+from openpype.pipeline.publish import PublishValidationError
+
+
+class ValidateInstanceEmpty(pyblish.api.InstancePlugin):
+ """Validator to verify that the instance is not empty"""
+
+ order = pyblish.api.ValidatorOrder - 0.01
+ hosts = ["blender"]
+ families = ["model", "pointcache", "rig", "camera" "layout", "blendScene"]
+ label = "Validate Instance is not Empty"
+ optional = False
+
+ def process(self, instance):
+ # Members are collected by `collect_instance` so we only need to check
+ # whether any member is included. The instance node will be included
+ # as a member as well, hence we will check for at least 2 members
+ if len(instance) < 2:
+ raise PublishValidationError(f"Instance {instance.name} is empty.")
diff --git a/openpype/hosts/blender/plugins/publish/validate_mesh_has_uv.py b/openpype/hosts/blender/plugins/publish/validate_mesh_has_uv.py
index edf47193be..060bccbd04 100644
--- a/openpype/hosts/blender/plugins/publish/validate_mesh_has_uv.py
+++ b/openpype/hosts/blender/plugins/publish/validate_mesh_has_uv.py
@@ -4,17 +4,24 @@ import bpy
import pyblish.api
-from openpype.pipeline.publish import ValidateContentsOrder
+from openpype.pipeline.publish import (
+ ValidateContentsOrder,
+ OptionalPyblishPluginMixin,
+ PublishValidationError
+)
import openpype.hosts.blender.api.action
-class ValidateMeshHasUvs(pyblish.api.InstancePlugin):
+class ValidateMeshHasUvs(
+ pyblish.api.InstancePlugin,
+ OptionalPyblishPluginMixin,
+):
"""Validate that the current mesh has UV's."""
order = ValidateContentsOrder
hosts = ["blender"]
families = ["model"]
- label = "Mesh Has UV's"
+ label = "Mesh Has UVs"
actions = [openpype.hosts.blender.api.action.SelectInvalidAction]
optional = True
@@ -49,8 +56,11 @@ class ValidateMeshHasUvs(pyblish.api.InstancePlugin):
return invalid
def process(self, instance):
+ if not self.is_active(instance.data):
+ return
+
invalid = self.get_invalid(instance)
if invalid:
- raise RuntimeError(
+ raise PublishValidationError(
f"Meshes found in instance without valid UV's: {invalid}"
)
diff --git a/openpype/hosts/blender/plugins/publish/validate_mesh_no_negative_scale.py b/openpype/hosts/blender/plugins/publish/validate_mesh_no_negative_scale.py
index 618feb95c1..7f77bbe38c 100644
--- a/openpype/hosts/blender/plugins/publish/validate_mesh_no_negative_scale.py
+++ b/openpype/hosts/blender/plugins/publish/validate_mesh_no_negative_scale.py
@@ -4,11 +4,16 @@ import bpy
import pyblish.api
-from openpype.pipeline.publish import ValidateContentsOrder
+from openpype.pipeline.publish import (
+ ValidateContentsOrder,
+ OptionalPyblishPluginMixin,
+ PublishValidationError
+)
import openpype.hosts.blender.api.action
-class ValidateMeshNoNegativeScale(pyblish.api.Validator):
+class ValidateMeshNoNegativeScale(pyblish.api.Validator,
+ OptionalPyblishPluginMixin):
"""Ensure that meshes don't have a negative scale."""
order = ValidateContentsOrder
@@ -27,8 +32,12 @@ class ValidateMeshNoNegativeScale(pyblish.api.Validator):
return invalid
def process(self, instance):
+ if not self.is_active(instance.data):
+ return
+
invalid = self.get_invalid(instance)
if invalid:
- raise RuntimeError(
- f"Meshes found in instance with negative scale: {invalid}"
+ names = ", ".join(obj.name for obj in invalid)
+ raise PublishValidationError(
+ f"Meshes found in instance with negative scale: {names}"
)
diff --git a/openpype/hosts/blender/plugins/publish/validate_no_colons_in_name.py b/openpype/hosts/blender/plugins/publish/validate_no_colons_in_name.py
index 1a98ec4c1d..caf555b535 100644
--- a/openpype/hosts/blender/plugins/publish/validate_no_colons_in_name.py
+++ b/openpype/hosts/blender/plugins/publish/validate_no_colons_in_name.py
@@ -5,10 +5,15 @@ import bpy
import pyblish.api
import openpype.hosts.blender.api.action
-from openpype.pipeline.publish import ValidateContentsOrder
+from openpype.pipeline.publish import (
+ ValidateContentsOrder,
+ OptionalPyblishPluginMixin,
+ PublishValidationError
+)
-class ValidateNoColonsInName(pyblish.api.InstancePlugin):
+class ValidateNoColonsInName(pyblish.api.InstancePlugin,
+ OptionalPyblishPluginMixin):
"""There cannot be colons in names
Object or bone names cannot include colons. Other software do not
@@ -36,8 +41,12 @@ class ValidateNoColonsInName(pyblish.api.InstancePlugin):
return invalid
def process(self, instance):
+ if not self.is_active(instance.data):
+ return
+
invalid = self.get_invalid(instance)
if invalid:
- raise RuntimeError(
- f"Objects found with colon in name: {invalid}"
+ names = ", ".join(obj.name for obj in invalid)
+ raise PublishValidationError(
+ f"Objects found with colon in name: {names}"
)
diff --git a/openpype/hosts/blender/plugins/publish/validate_object_mode.py b/openpype/hosts/blender/plugins/publish/validate_object_mode.py
index ac60e00f89..ab5f4bb467 100644
--- a/openpype/hosts/blender/plugins/publish/validate_object_mode.py
+++ b/openpype/hosts/blender/plugins/publish/validate_object_mode.py
@@ -3,10 +3,17 @@ from typing import List
import bpy
import pyblish.api
+from openpype.pipeline.publish import (
+ OptionalPyblishPluginMixin,
+ PublishValidationError
+)
import openpype.hosts.blender.api.action
-class ValidateObjectIsInObjectMode(pyblish.api.InstancePlugin):
+class ValidateObjectIsInObjectMode(
+ pyblish.api.InstancePlugin,
+ OptionalPyblishPluginMixin,
+):
"""Validate that the objects in the instance are in Object Mode."""
order = pyblish.api.ValidatorOrder - 0.01
@@ -25,8 +32,12 @@ class ValidateObjectIsInObjectMode(pyblish.api.InstancePlugin):
return invalid
def process(self, instance):
+ if not self.is_active(instance.data):
+ return
+
invalid = self.get_invalid(instance)
if invalid:
- raise RuntimeError(
- f"Object found in instance is not in Object Mode: {invalid}"
+ names = ", ".join(obj.name for obj in invalid)
+ raise PublishValidationError(
+ f"Object found in instance is not in Object Mode: {names}"
)
diff --git a/openpype/hosts/blender/plugins/publish/validate_render_camera_is_set.py b/openpype/hosts/blender/plugins/publish/validate_render_camera_is_set.py
index ba3a796f35..86d1fcc681 100644
--- a/openpype/hosts/blender/plugins/publish/validate_render_camera_is_set.py
+++ b/openpype/hosts/blender/plugins/publish/validate_render_camera_is_set.py
@@ -2,8 +2,14 @@ import bpy
import pyblish.api
+from openpype.pipeline.publish import (
+ OptionalPyblishPluginMixin,
+ PublishValidationError
+)
-class ValidateRenderCameraIsSet(pyblish.api.InstancePlugin):
+
+class ValidateRenderCameraIsSet(pyblish.api.InstancePlugin,
+ OptionalPyblishPluginMixin):
"""Validate that there is a camera set as active for rendering."""
order = pyblish.api.ValidatorOrder
@@ -13,5 +19,8 @@ class ValidateRenderCameraIsSet(pyblish.api.InstancePlugin):
optional = False
def process(self, instance):
+ if not self.is_active(instance.data):
+ return
+
if not bpy.context.scene.camera:
- raise RuntimeError("No camera is active for rendering.")
+ raise PublishValidationError("No camera is active for rendering.")
diff --git a/openpype/hosts/blender/plugins/publish/validate_transform_zero.py b/openpype/hosts/blender/plugins/publish/validate_transform_zero.py
index 66ef731e6e..1fb9535ee4 100644
--- a/openpype/hosts/blender/plugins/publish/validate_transform_zero.py
+++ b/openpype/hosts/blender/plugins/publish/validate_transform_zero.py
@@ -6,10 +6,15 @@ import bpy
import pyblish.api
import openpype.hosts.blender.api.action
-from openpype.pipeline.publish import ValidateContentsOrder
+from openpype.pipeline.publish import (
+ ValidateContentsOrder,
+ OptionalPyblishPluginMixin,
+ PublishValidationError
+)
-class ValidateTransformZero(pyblish.api.InstancePlugin):
+class ValidateTransformZero(pyblish.api.InstancePlugin,
+ OptionalPyblishPluginMixin):
"""Transforms can't have any values
To solve this issue, try freezing the transforms. So long
@@ -38,9 +43,13 @@ class ValidateTransformZero(pyblish.api.InstancePlugin):
return invalid
def process(self, instance):
+ if not self.is_active(instance.data):
+ return
+
invalid = self.get_invalid(instance)
if invalid:
- raise RuntimeError(
- "Object found in instance has not"
- f" transform to zero: {invalid}"
+ names = ", ".join(obj.name for obj in invalid)
+ raise PublishValidationError(
+ "Objects found in instance which do not"
+ f" have transform set to zero: {names}"
)
diff --git a/openpype/hosts/celaction/plugins/publish/collect_celaction_instances.py b/openpype/hosts/celaction/plugins/publish/collect_celaction_instances.py
index c815c1edd4..875f15fcc5 100644
--- a/openpype/hosts/celaction/plugins/publish/collect_celaction_instances.py
+++ b/openpype/hosts/celaction/plugins/publish/collect_celaction_instances.py
@@ -1,6 +1,8 @@
import os
import pyblish.api
+from openpype.client import get_asset_name_identifier
+
class CollectCelactionInstances(pyblish.api.ContextPlugin):
""" Adds the celaction render instances """
@@ -17,8 +19,10 @@ class CollectCelactionInstances(pyblish.api.ContextPlugin):
asset_entity = context.data["assetEntity"]
project_entity = context.data["projectEntity"]
+ asset_name = get_asset_name_identifier(asset_entity)
+
shared_instance_data = {
- "asset": asset_entity["name"],
+ "asset": asset_name,
"frameStart": asset_entity["data"]["frameStart"],
"frameEnd": asset_entity["data"]["frameEnd"],
"handleStart": asset_entity["data"]["handleStart"],
diff --git a/openpype/hosts/flame/plugins/publish/collect_timeline_otio.py b/openpype/hosts/flame/plugins/publish/collect_timeline_otio.py
index f8cfa9e963..20ac048986 100644
--- a/openpype/hosts/flame/plugins/publish/collect_timeline_otio.py
+++ b/openpype/hosts/flame/plugins/publish/collect_timeline_otio.py
@@ -1,5 +1,6 @@
import pyblish.api
+from openpype.client import get_asset_name_identifier
import openpype.hosts.flame.api as opfapi
from openpype.hosts.flame.otio import flame_export
from openpype.pipeline.create import get_subset_name
@@ -33,13 +34,15 @@ class CollecTimelineOTIO(pyblish.api.ContextPlugin):
project_settings=context.data["project_settings"]
)
+ asset_name = get_asset_name_identifier(asset_doc)
+
# adding otio timeline to context
with opfapi.maintained_segment_selection(sequence) as selected_seg:
otio_timeline = flame_export.create_otio_timeline(sequence)
instance_data = {
"name": subset_name,
- "asset": asset_doc["name"],
+ "asset": asset_name,
"subset": subset_name,
"family": "workfile",
"families": []
diff --git a/openpype/hosts/fusion/api/lib.py b/openpype/hosts/fusion/api/lib.py
index c4a1488606..85f9c54a73 100644
--- a/openpype/hosts/fusion/api/lib.py
+++ b/openpype/hosts/fusion/api/lib.py
@@ -280,7 +280,11 @@ def get_current_comp():
@contextlib.contextmanager
-def comp_lock_and_undo_chunk(comp, undo_queue_name="Script CMD"):
+def comp_lock_and_undo_chunk(
+ comp,
+ undo_queue_name="Script CMD",
+ keep_undo=True,
+):
"""Lock comp and open an undo chunk during the context"""
try:
comp.Lock()
@@ -288,4 +292,4 @@ def comp_lock_and_undo_chunk(comp, undo_queue_name="Script CMD"):
yield
finally:
comp.Unlock()
- comp.EndUndo()
+ comp.EndUndo(keep_undo)
diff --git a/openpype/hosts/fusion/api/menu.py b/openpype/hosts/fusion/api/menu.py
index 50250a6656..0b9ad1a43b 100644
--- a/openpype/hosts/fusion/api/menu.py
+++ b/openpype/hosts/fusion/api/menu.py
@@ -1,3 +1,4 @@
+import os
import sys
from qtpy import QtWidgets, QtCore, QtGui
@@ -18,6 +19,10 @@ from openpype.resources import get_openpype_icon_filepath
from .pipeline import FusionEventHandler
from .pulse import FusionPulse
+
+MENU_LABEL = os.environ["AVALON_LABEL"]
+
+
self = sys.modules[__name__]
self.menu = None
@@ -26,7 +31,7 @@ class OpenPypeMenu(QtWidgets.QWidget):
def __init__(self, *args, **kwargs):
super(OpenPypeMenu, self).__init__(*args, **kwargs)
- self.setObjectName("OpenPypeMenu")
+ self.setObjectName(f"{MENU_LABEL}Menu")
icon_path = get_openpype_icon_filepath()
icon = QtGui.QIcon(icon_path)
@@ -41,7 +46,7 @@ class OpenPypeMenu(QtWidgets.QWidget):
| QtCore.Qt.WindowStaysOnTopHint
)
self.render_mode_widget = None
- self.setWindowTitle("OpenPype")
+ self.setWindowTitle(MENU_LABEL)
asset_label = QtWidgets.QLabel("Context", self)
asset_label.setStyleSheet(
diff --git a/openpype/hosts/fusion/deploy/MenuScripts/openpype_menu.py b/openpype/hosts/fusion/deploy/MenuScripts/launch_menu.py
similarity index 100%
rename from openpype/hosts/fusion/deploy/MenuScripts/openpype_menu.py
rename to openpype/hosts/fusion/deploy/MenuScripts/launch_menu.py
diff --git a/openpype/hosts/fusion/deploy/ayon/Config/menu.fu b/openpype/hosts/fusion/deploy/ayon/Config/menu.fu
new file mode 100644
index 0000000000..c968a1bb3d
--- /dev/null
+++ b/openpype/hosts/fusion/deploy/ayon/Config/menu.fu
@@ -0,0 +1,60 @@
+{
+ Action
+ {
+ ID = "AYON_Menu",
+ Category = "AYON",
+ Name = "AYON Menu",
+
+ Targets =
+ {
+ Composition =
+ {
+ Execute = _Lua [=[
+ local scriptPath = app:MapPath("AYON:../MenuScripts/launch_menu.py")
+ if bmd.fileexists(scriptPath) == false then
+ print("[AYON Error] Can't run file: " .. scriptPath)
+ else
+ target:RunScript(scriptPath)
+ end
+ ]=],
+ },
+ },
+ },
+ Action
+ {
+ ID = "AYON_Install_PySide2",
+ Category = "AYON",
+ Name = "Install PySide2",
+
+ Targets =
+ {
+ Composition =
+ {
+ Execute = _Lua [=[
+ local scriptPath = app:MapPath("AYON:../MenuScripts/install_pyside2.py")
+ if bmd.fileexists(scriptPath) == false then
+ print("[AYON Error] Can't run file: " .. scriptPath)
+ else
+ target:RunScript(scriptPath)
+ end
+ ]=],
+ },
+ },
+ },
+ Menus
+ {
+ Target = "ChildFrame",
+
+ Before "Help"
+ {
+ Sub "AYON"
+ {
+ "AYON_Menu{}",
+ "_",
+ Sub "Admin" {
+ "AYON_Install_PySide2{}"
+ }
+ }
+ },
+ },
+}
diff --git a/openpype/hosts/fusion/deploy/ayon/fusion_shared.prefs b/openpype/hosts/fusion/deploy/ayon/fusion_shared.prefs
new file mode 100644
index 0000000000..9c67af7db9
--- /dev/null
+++ b/openpype/hosts/fusion/deploy/ayon/fusion_shared.prefs
@@ -0,0 +1,19 @@
+{
+Locked = true,
+Global = {
+ Paths = {
+ Map = {
+ ["AYON:"] = "$(OPENPYPE_FUSION)/deploy/ayon",
+ ["Config:"] = "UserPaths:Config;AYON:Config",
+ ["Scripts:"] = "UserPaths:Scripts;Reactor:System/Scripts",
+ },
+ },
+ Script = {
+ PythonVersion = 3,
+ Python3Forced = true
+ },
+ UserInterface = {
+ Language = "en_US"
+ },
+ },
+}
diff --git a/openpype/hosts/fusion/deploy/Config/openpype_menu.fu b/openpype/hosts/fusion/deploy/openpype/Config/menu.fu
similarity index 87%
rename from openpype/hosts/fusion/deploy/Config/openpype_menu.fu
rename to openpype/hosts/fusion/deploy/openpype/Config/menu.fu
index 8b8d448259..85134d2c62 100644
--- a/openpype/hosts/fusion/deploy/Config/openpype_menu.fu
+++ b/openpype/hosts/fusion/deploy/openpype/Config/menu.fu
@@ -10,7 +10,7 @@
Composition =
{
Execute = _Lua [=[
- local scriptPath = app:MapPath("OpenPype:MenuScripts/openpype_menu.py")
+ local scriptPath = app:MapPath("OpenPype:../MenuScripts/launch_menu.py")
if bmd.fileexists(scriptPath) == false then
print("[OpenPype Error] Can't run file: " .. scriptPath)
else
@@ -31,7 +31,7 @@
Composition =
{
Execute = _Lua [=[
- local scriptPath = app:MapPath("OpenPype:MenuScripts/install_pyside2.py")
+ local scriptPath = app:MapPath("OpenPype:../MenuScripts/install_pyside2.py")
if bmd.fileexists(scriptPath) == false then
print("[OpenPype Error] Can't run file: " .. scriptPath)
else
diff --git a/openpype/hosts/fusion/deploy/fusion_shared.prefs b/openpype/hosts/fusion/deploy/openpype/fusion_shared.prefs
similarity index 83%
rename from openpype/hosts/fusion/deploy/fusion_shared.prefs
rename to openpype/hosts/fusion/deploy/openpype/fusion_shared.prefs
index 93b08aa886..0035a38990 100644
--- a/openpype/hosts/fusion/deploy/fusion_shared.prefs
+++ b/openpype/hosts/fusion/deploy/openpype/fusion_shared.prefs
@@ -3,7 +3,7 @@ Locked = true,
Global = {
Paths = {
Map = {
- ["OpenPype:"] = "$(OPENPYPE_FUSION)/deploy",
+ ["OpenPype:"] = "$(OPENPYPE_FUSION)/deploy/openpype",
["Config:"] = "UserPaths:Config;OpenPype:Config",
["Scripts:"] = "UserPaths:Scripts;Reactor:System/Scripts",
},
diff --git a/openpype/hosts/fusion/hooks/pre_fusion_profile_hook.py b/openpype/hosts/fusion/hooks/pre_fusion_profile_hook.py
index 66b0f803aa..59053ba62a 100644
--- a/openpype/hosts/fusion/hooks/pre_fusion_profile_hook.py
+++ b/openpype/hosts/fusion/hooks/pre_fusion_profile_hook.py
@@ -2,6 +2,7 @@ import os
import shutil
import platform
from pathlib import Path
+from openpype import AYON_SERVER_ENABLED
from openpype.hosts.fusion import (
FUSION_HOST_DIR,
FUSION_VERSIONS_DICT,
@@ -161,6 +162,13 @@ class FusionCopyPrefsPrelaunch(PreLaunchHook):
# profile directory variables to customize Fusion
# to define where it can read custom scripts and tools from
master_prefs_variable = f"FUSION{profile_version}_MasterPrefs"
- master_prefs = Path(FUSION_HOST_DIR, "deploy", "fusion_shared.prefs")
+
+ if AYON_SERVER_ENABLED:
+ master_prefs = Path(
+ FUSION_HOST_DIR, "deploy", "ayon", "fusion_shared.prefs")
+ else:
+ master_prefs = Path(
+ FUSION_HOST_DIR, "deploy", "openpype", "fusion_shared.prefs")
+
self.log.info(f"Setting {master_prefs_variable}: {master_prefs}")
self.launch_context.env[master_prefs_variable] = str(master_prefs)
diff --git a/openpype/hosts/fusion/plugins/create/create_saver.py b/openpype/hosts/fusion/plugins/create/create_saver.py
index fccd8b2965..5870828b41 100644
--- a/openpype/hosts/fusion/plugins/create/create_saver.py
+++ b/openpype/hosts/fusion/plugins/create/create_saver.py
@@ -14,7 +14,7 @@ from openpype.pipeline import (
legacy_io,
Creator as NewCreator,
CreatedInstance,
- Anatomy
+ Anatomy,
)
@@ -27,28 +27,21 @@ class CreateSaver(NewCreator):
description = "Fusion Saver to generate image sequence"
icon = "fa5.eye"
- instance_attributes = [
- "reviewable"
- ]
+ instance_attributes = ["reviewable"]
+ image_format = "exr"
# TODO: This should be renamed together with Nuke so it is aligned
temp_rendering_path_template = (
- "{workdir}/renders/fusion/{subset}/{subset}.{frame}.{ext}")
+ "{workdir}/renders/fusion/{subset}/{subset}.{frame}.{ext}"
+ )
def create(self, subset_name, instance_data, pre_create_data):
- self.pass_pre_attributes_to_instance(
- instance_data,
- pre_create_data
+ self.pass_pre_attributes_to_instance(instance_data, pre_create_data)
+
+ instance_data.update(
+ {"id": "pyblish.avalon.instance", "subset": subset_name}
)
- instance_data.update({
- "id": "pyblish.avalon.instance",
- "subset": subset_name
- })
-
- # TODO: Add pre_create attributes to choose file format?
- file_format = "OpenEXRFormat"
-
comp = get_current_comp()
with comp_lock_and_undo_chunk(comp):
args = (-32768, -32768) # Magical position numbers
@@ -56,21 +49,6 @@ class CreateSaver(NewCreator):
self._update_tool_with_data(saver, data=instance_data)
- saver["OutputFormat"] = file_format
-
- # Check file format settings are available
- if saver[file_format] is None:
- raise RuntimeError(
- f"File format is not set to {file_format}, this is a bug"
- )
-
- # Set file format attributes
- saver[file_format]["Depth"] = 0 # Auto | float16 | float32
- # TODO Is this needed?
- saver[file_format]["SaveAlpha"] = 1
-
- self._imprint(saver, instance_data)
-
# Register the CreatedInstance
instance = CreatedInstance(
family=self.family,
@@ -78,6 +56,8 @@ class CreateSaver(NewCreator):
data=instance_data,
creator=self,
)
+ data = instance.data_to_store()
+ self._imprint(saver, data)
# Insert the transient data
instance.transient_data["tool"] = saver
@@ -140,8 +120,15 @@ class CreateSaver(NewCreator):
return
original_subset = tool.GetData("openpype.subset")
+ original_format = tool.GetData(
+ "openpype.creator_attributes.image_format"
+ )
+
subset = data["subset"]
- if original_subset != subset:
+ if (
+ original_subset != subset
+ or original_format != data["creator_attributes"]["image_format"]
+ ):
self._configure_saver_tool(data, tool, subset)
def _configure_saver_tool(self, data, tool, subset):
@@ -149,23 +136,22 @@ class CreateSaver(NewCreator):
# get frame padding from anatomy templates
anatomy = Anatomy()
- frame_padding = int(
- anatomy.templates["render"].get("frame_padding", 4)
- )
+ frame_padding = anatomy.templates["frame_padding"]
+
+ # get output format
+ ext = data["creator_attributes"]["image_format"]
# Subset change detected
workdir = os.path.normpath(legacy_io.Session["AVALON_WORKDIR"])
- formatting_data.update({
- "workdir": workdir,
- "frame": "0" * frame_padding,
- "ext": "exr"
- })
+ formatting_data.update(
+ {"workdir": workdir, "frame": "0" * frame_padding, "ext": ext}
+ )
# build file path to render
- filepath = self.temp_rendering_path_template.format(
- **formatting_data)
+ filepath = self.temp_rendering_path_template.format(**formatting_data)
- tool["Clip"] = os.path.normpath(filepath)
+ comp = get_current_comp()
+ tool["Clip"] = comp.ReverseMapPath(os.path.normpath(filepath))
# Rename tool
if tool.Name != subset:
@@ -202,7 +188,8 @@ class CreateSaver(NewCreator):
attr_defs = [
self._get_render_target_enum(),
self._get_reviewable_bool(),
- self._get_frame_range_enum()
+ self._get_frame_range_enum(),
+ self._get_image_format_enum(),
]
return attr_defs
@@ -210,11 +197,7 @@ class CreateSaver(NewCreator):
"""Settings for publish page"""
return self.get_pre_create_attr_defs()
- def pass_pre_attributes_to_instance(
- self,
- instance_data,
- pre_create_data
- ):
+ def pass_pre_attributes_to_instance(self, instance_data, pre_create_data):
creator_attrs = instance_data["creator_attributes"] = {}
for pass_key in pre_create_data.keys():
creator_attrs[pass_key] = pre_create_data[pass_key]
@@ -237,13 +220,13 @@ class CreateSaver(NewCreator):
frame_range_options = {
"asset_db": "Current asset context",
"render_range": "From render in/out",
- "comp_range": "From composition timeline"
+ "comp_range": "From composition timeline",
}
return EnumDef(
"frame_range_source",
items=frame_range_options,
- label="Frame range source"
+ label="Frame range source",
)
def _get_reviewable_bool(self):
@@ -253,20 +236,33 @@ class CreateSaver(NewCreator):
label="Review",
)
+ def _get_image_format_enum(self):
+ image_format_options = ["exr", "tga", "tif", "png", "jpg"]
+ return EnumDef(
+ "image_format",
+ items=image_format_options,
+ default=self.image_format,
+ label="Output Image Format",
+ )
+
def apply_settings(self, project_settings):
"""Method called on initialization of plugin to apply settings."""
# plugin settings
- plugin_settings = (
- project_settings["fusion"]["create"][self.__class__.__name__]
- )
+ plugin_settings = project_settings["fusion"]["create"][
+ self.__class__.__name__
+ ]
# individual attributes
self.instance_attributes = plugin_settings.get(
- "instance_attributes") or self.instance_attributes
- self.default_variants = plugin_settings.get(
- "default_variants") or self.default_variants
- self.temp_rendering_path_template = (
- plugin_settings.get("temp_rendering_path_template")
- or self.temp_rendering_path_template
+ "instance_attributes", self.instance_attributes
+ )
+ self.default_variants = plugin_settings.get(
+ "default_variants", self.default_variants
+ )
+ self.temp_rendering_path_template = plugin_settings.get(
+ "temp_rendering_path_template", self.temp_rendering_path_template
+ )
+ self.image_format = plugin_settings.get(
+ "image_format", self.image_format
)
diff --git a/openpype/hosts/fusion/plugins/create/create_workfile.py b/openpype/hosts/fusion/plugins/create/create_workfile.py
index 8acaaa172f..4092086ea4 100644
--- a/openpype/hosts/fusion/plugins/create/create_workfile.py
+++ b/openpype/hosts/fusion/plugins/create/create_workfile.py
@@ -1,6 +1,7 @@
from openpype.hosts.fusion.api import (
get_current_comp
)
+from openpype import AYON_SERVER_ENABLED
from openpype.client import get_asset_by_name
from openpype.pipeline import (
AutoCreator,
@@ -68,6 +69,13 @@ class FusionWorkfileCreator(AutoCreator):
task_name = self.create_context.get_current_task_name()
host_name = self.create_context.host_name
+ if existing_instance is None:
+ existing_instance_asset = None
+ elif AYON_SERVER_ENABLED:
+ existing_instance_asset = existing_instance["folderPath"]
+ else:
+ existing_instance_asset = existing_instance["asset"]
+
if existing_instance is None:
asset_doc = get_asset_by_name(project_name, asset_name)
subset_name = self.get_subset_name(
@@ -75,10 +83,13 @@ class FusionWorkfileCreator(AutoCreator):
project_name, host_name
)
data = {
- "asset": asset_name,
"task": task_name,
"variant": self.default_variant
}
+ if AYON_SERVER_ENABLED:
+ data["folderPath"] = asset_name
+ else:
+ data["asset"] = asset_name
data.update(self.get_dynamic_data(
self.default_variant, task_name, asset_doc,
project_name, host_name, None
@@ -91,7 +102,7 @@ class FusionWorkfileCreator(AutoCreator):
self._add_instance_to_context(new_instance)
elif (
- existing_instance["asset"] != asset_name
+ existing_instance_asset != asset_name
or existing_instance["task"] != task_name
):
asset_doc = get_asset_by_name(project_name, asset_name)
@@ -99,6 +110,9 @@ class FusionWorkfileCreator(AutoCreator):
self.default_variant, task_name, asset_doc,
project_name, host_name
)
- existing_instance["asset"] = asset_name
+ if AYON_SERVER_ENABLED:
+ existing_instance["folderPath"] = asset_name
+ else:
+ existing_instance["asset"] = asset_name
existing_instance["task"] = task_name
existing_instance["subset"] = subset_name
diff --git a/openpype/hosts/fusion/plugins/load/actions.py b/openpype/hosts/fusion/plugins/load/actions.py
index f83ab433ee..94ba361b50 100644
--- a/openpype/hosts/fusion/plugins/load/actions.py
+++ b/openpype/hosts/fusion/plugins/load/actions.py
@@ -11,6 +11,7 @@ class FusionSetFrameRangeLoader(load.LoaderPlugin):
families = ["animation",
"camera",
"imagesequence",
+ "render",
"yeticache",
"pointcache",
"render"]
@@ -46,6 +47,7 @@ class FusionSetFrameRangeWithHandlesLoader(load.LoaderPlugin):
families = ["animation",
"camera",
"imagesequence",
+ "render",
"yeticache",
"pointcache",
"render"]
diff --git a/openpype/hosts/fusion/plugins/load/load_sequence.py b/openpype/hosts/fusion/plugins/load/load_sequence.py
index 20be5faaba..4401af97eb 100644
--- a/openpype/hosts/fusion/plugins/load/load_sequence.py
+++ b/openpype/hosts/fusion/plugins/load/load_sequence.py
@@ -161,7 +161,7 @@ class FusionLoadSequence(load.LoaderPlugin):
with comp_lock_and_undo_chunk(comp, "Create Loader"):
args = (-32768, -32768)
tool = comp.AddTool("Loader", *args)
- tool["Clip"] = path
+ tool["Clip"] = comp.ReverseMapPath(path)
# Set global in point to start frame (if in version.data)
start = self._get_start(context["version"], tool)
@@ -244,7 +244,7 @@ class FusionLoadSequence(load.LoaderPlugin):
"TimeCodeOffset",
),
):
- tool["Clip"] = path
+ tool["Clip"] = comp.ReverseMapPath(path)
# Set the global in to the start frame of the sequence
global_in_changed = loader_shift(tool, start, relative=False)
diff --git a/openpype/hosts/fusion/plugins/load/load_usd.py b/openpype/hosts/fusion/plugins/load/load_usd.py
new file mode 100644
index 0000000000..4f1813a646
--- /dev/null
+++ b/openpype/hosts/fusion/plugins/load/load_usd.py
@@ -0,0 +1,87 @@
+from openpype.pipeline import (
+ load,
+ get_representation_path,
+)
+from openpype.hosts.fusion.api import (
+ imprint_container,
+ get_current_comp,
+ comp_lock_and_undo_chunk
+)
+from openpype.hosts.fusion.api.lib import get_fusion_module
+
+
+class FusionLoadUSD(load.LoaderPlugin):
+ """Load USD into Fusion
+
+ Support for USD was added since Fusion 18.5
+ """
+
+ families = ["*"]
+ representations = ["*"]
+ extensions = {"usd", "usda", "usdz"}
+
+ label = "Load USD"
+ order = -10
+ icon = "code-fork"
+ color = "orange"
+
+ tool_type = "uLoader"
+
+ @classmethod
+ def apply_settings(cls, project_settings, system_settings):
+ super(FusionLoadUSD, cls).apply_settings(project_settings,
+ system_settings)
+ if cls.enabled:
+ # Enable only in Fusion 18.5+
+ fusion = get_fusion_module()
+ version = fusion.GetVersion()
+ major = version[1]
+ minor = version[2]
+ is_usd_supported = (major, minor) >= (18, 5)
+ cls.enabled = is_usd_supported
+
+ def load(self, context, name, namespace, data):
+ # Fallback to asset name when namespace is None
+ if namespace is None:
+ namespace = context['asset']['name']
+
+ # Create the Loader with the filename path set
+ comp = get_current_comp()
+ with comp_lock_and_undo_chunk(comp, "Create tool"):
+
+ path = self.fname
+
+ args = (-32768, -32768)
+ tool = comp.AddTool(self.tool_type, *args)
+ tool["Filename"] = path
+
+ imprint_container(tool,
+ name=name,
+ namespace=namespace,
+ context=context,
+ loader=self.__class__.__name__)
+
+ def switch(self, container, representation):
+ self.update(container, representation)
+
+ def update(self, container, representation):
+
+ tool = container["_tool"]
+ assert tool.ID == self.tool_type, f"Must be {self.tool_type}"
+ comp = tool.Comp()
+
+ path = get_representation_path(representation)
+
+ with comp_lock_and_undo_chunk(comp, "Update tool"):
+ tool["Filename"] = path
+
+ # Update the imprinted representation
+ tool.SetData("avalon.representation", str(representation["_id"]))
+
+ def remove(self, container):
+ tool = container["_tool"]
+ assert tool.ID == self.tool_type, f"Must be {self.tool_type}"
+ comp = tool.Comp()
+
+ with comp_lock_and_undo_chunk(comp, "Remove tool"):
+ tool.Delete()
diff --git a/openpype/hosts/fusion/plugins/publish/collect_render.py b/openpype/hosts/fusion/plugins/publish/collect_render.py
index 341f3f191a..a7daa0b64c 100644
--- a/openpype/hosts/fusion/plugins/publish/collect_render.py
+++ b/openpype/hosts/fusion/plugins/publish/collect_render.py
@@ -145,9 +145,11 @@ class CollectFusionRender(
start = render_instance.frameStart - render_instance.handleStart
end = render_instance.frameEnd + render_instance.handleEnd
- path = (
- render_instance.tool["Clip"]
- [render_instance.workfileComp.TIME_UNDEFINED]
+ comp = render_instance.workfileComp
+ path = comp.MapPath(
+ render_instance.tool["Clip"][
+ render_instance.workfileComp.TIME_UNDEFINED
+ ]
)
output_dir = os.path.dirname(path)
render_instance.outputDir = output_dir
diff --git a/openpype/hosts/fusion/plugins/publish/extract_render_local.py b/openpype/hosts/fusion/plugins/publish/extract_render_local.py
index 25c101cf00..068df22c06 100644
--- a/openpype/hosts/fusion/plugins/publish/extract_render_local.py
+++ b/openpype/hosts/fusion/plugins/publish/extract_render_local.py
@@ -25,20 +25,24 @@ def enabled_savers(comp, savers):
"""
passthrough_key = "TOOLB_PassThrough"
original_states = {}
- enabled_save_names = {saver.Name for saver in savers}
+ enabled_saver_names = {saver.Name for saver in savers}
+
+ all_savers = comp.GetToolList(False, "Saver").values()
+ savers_by_name = {saver.Name: saver for saver in all_savers}
+
try:
- all_savers = comp.GetToolList(False, "Saver").values()
for saver in all_savers:
original_state = saver.GetAttrs()[passthrough_key]
- original_states[saver] = original_state
+ original_states[saver.Name] = original_state
# The passthrough state we want to set (passthrough != enabled)
- state = saver.Name not in enabled_save_names
+ state = saver.Name not in enabled_saver_names
if state != original_state:
saver.SetAttrs({passthrough_key: state})
yield
finally:
- for saver, original_state in original_states.items():
+ for saver_name, original_state in original_states.items():
+ saver = savers_by_name[saver_name]
saver.SetAttrs({"TOOLB_PassThrough": original_state})
@@ -142,11 +146,15 @@ class FusionRenderLocal(
staging_dir = os.path.dirname(path)
+ files = [os.path.basename(f) for f in expected_files]
+ if len(expected_files) == 1:
+ files = files[0]
+
repre = {
"name": ext[1:],
"ext": ext[1:],
"frameStart": f"%0{padding}d" % start,
- "files": [os.path.basename(f) for f in expected_files],
+ "files": files,
"stagingDir": staging_dir,
}
diff --git a/openpype/hosts/fusion/plugins/publish/validate_saver_resolution.py b/openpype/hosts/fusion/plugins/publish/validate_saver_resolution.py
new file mode 100644
index 0000000000..efa7295d11
--- /dev/null
+++ b/openpype/hosts/fusion/plugins/publish/validate_saver_resolution.py
@@ -0,0 +1,105 @@
+import pyblish.api
+from openpype.pipeline import (
+ PublishValidationError,
+ OptionalPyblishPluginMixin,
+)
+
+from openpype.hosts.fusion.api.action import SelectInvalidAction
+from openpype.hosts.fusion.api import comp_lock_and_undo_chunk
+
+
+def get_tool_resolution(tool, frame):
+ """Return the 2D input resolution to a Fusion tool
+
+ If the current tool hasn't been rendered its input resolution
+ hasn't been saved. To combat this, add an expression in
+ the comments field to read the resolution
+
+ Args
+ tool (Fusion Tool): The tool to query input resolution
+ frame (int): The frame to query the resolution on.
+
+ Returns:
+ tuple: width, height as 2-tuple of integers
+
+ """
+ comp = tool.Composition
+
+ # False undo removes the undo-stack from the undo list
+ with comp_lock_and_undo_chunk(comp, "Read resolution", False):
+ # Save old comment
+ old_comment = ""
+ has_expression = False
+ if tool["Comments"][frame] != "":
+ if tool["Comments"].GetExpression() is not None:
+ has_expression = True
+ old_comment = tool["Comments"].GetExpression()
+ tool["Comments"].SetExpression(None)
+ else:
+ old_comment = tool["Comments"][frame]
+ tool["Comments"][frame] = ""
+
+ # Get input width
+ tool["Comments"].SetExpression("self.Input.OriginalWidth")
+ width = int(tool["Comments"][frame])
+
+ # Get input height
+ tool["Comments"].SetExpression("self.Input.OriginalHeight")
+ height = int(tool["Comments"][frame])
+
+ # Reset old comment
+ tool["Comments"].SetExpression(None)
+ if has_expression:
+ tool["Comments"].SetExpression(old_comment)
+ else:
+ tool["Comments"][frame] = old_comment
+
+ return width, height
+
+
+class ValidateSaverResolution(
+ pyblish.api.InstancePlugin, OptionalPyblishPluginMixin
+):
+ """Validate that the saver input resolution matches the asset resolution"""
+
+ order = pyblish.api.ValidatorOrder
+ label = "Validate Asset Resolution"
+ families = ["render"]
+ hosts = ["fusion"]
+ optional = True
+ actions = [SelectInvalidAction]
+
+ def process(self, instance):
+ if not self.is_active(instance.data):
+ return
+
+ resolution = self.get_resolution(instance)
+ expected_resolution = self.get_expected_resolution(instance)
+ if resolution != expected_resolution:
+ raise PublishValidationError(
+ "The input's resolution does not match "
+ "the asset's resolution {}x{}.\n\n"
+ "The input's resolution is {}x{}.".format(
+ expected_resolution[0], expected_resolution[1],
+ resolution[0], resolution[1]
+ )
+ )
+
+ @classmethod
+ def get_invalid(cls, instance):
+ resolution = cls.get_resolution(instance)
+ expected_resolution = cls.get_expected_resolution(instance)
+ if resolution != expected_resolution:
+ saver = instance.data["tool"]
+ return [saver]
+
+ @classmethod
+ def get_resolution(cls, instance):
+ saver = instance.data["tool"]
+ first_frame = instance.data["frameStartHandle"]
+ return get_tool_resolution(saver, frame=first_frame)
+
+ @classmethod
+ def get_expected_resolution(cls, instance):
+ data = instance.data["assetEntity"]["data"]
+ return data["resolutionWidth"], data["resolutionHeight"]
diff --git a/openpype/hosts/harmony/api/TB_sceneOpened.js b/openpype/hosts/harmony/api/TB_sceneOpened.js
index a284a6ec5c..48daf094dd 100644
--- a/openpype/hosts/harmony/api/TB_sceneOpened.js
+++ b/openpype/hosts/harmony/api/TB_sceneOpened.js
@@ -13,7 +13,7 @@ var LD_OPENHARMONY_PATH = System.getenv('LIB_OPENHARMONY_PATH');
LD_OPENHARMONY_PATH = LD_OPENHARMONY_PATH + '/openHarmony.js';
LD_OPENHARMONY_PATH = LD_OPENHARMONY_PATH.replace(/\\/g, "/");
include(LD_OPENHARMONY_PATH);
-this.__proto__['$'] = $;
+//this.__proto__['$'] = $;
function Client() {
var self = this;
diff --git a/openpype/hosts/harmony/plugins/publish/extract_render.py b/openpype/hosts/harmony/plugins/publish/extract_render.py
index 5825d95a4a..96a375716b 100644
--- a/openpype/hosts/harmony/plugins/publish/extract_render.py
+++ b/openpype/hosts/harmony/plugins/publish/extract_render.py
@@ -59,8 +59,8 @@ class ExtractRender(pyblish.api.InstancePlugin):
args = [application_path, "-batch",
"-frames", str(frame_start), str(frame_end),
- "-scene", scene_path]
- self.log.info(f"running [ {application_path} {' '.join(args)}")
+ scene_path]
+ self.log.info(f"running: {' '.join(args)}")
proc = subprocess.Popen(
args,
stdout=subprocess.PIPE,
diff --git a/openpype/hosts/hiero/api/menu.py b/openpype/hosts/hiero/api/menu.py
index 9967e9c875..ca611570cc 100644
--- a/openpype/hosts/hiero/api/menu.py
+++ b/openpype/hosts/hiero/api/menu.py
@@ -95,18 +95,18 @@ def menu_install():
menu.addSeparator()
- publish_action = menu.addAction("Publish...")
- publish_action.setIcon(QtGui.QIcon("icons:Output.png"))
- publish_action.triggered.connect(
- lambda *args: publish(hiero.ui.mainWindow())
- )
-
creator_action = menu.addAction("Create...")
creator_action.setIcon(QtGui.QIcon("icons:CopyRectangle.png"))
creator_action.triggered.connect(
lambda: host_tools.show_creator(parent=main_window)
)
+ publish_action = menu.addAction("Publish...")
+ publish_action.setIcon(QtGui.QIcon("icons:Output.png"))
+ publish_action.triggered.connect(
+ lambda *args: publish(hiero.ui.mainWindow())
+ )
+
loader_action = menu.addAction("Load...")
loader_action.setIcon(QtGui.QIcon("icons:CopyRectangle.png"))
loader_action.triggered.connect(
diff --git a/openpype/hosts/hiero/api/plugin.py b/openpype/hosts/hiero/api/plugin.py
index 52f96261b2..b0c73e41fb 100644
--- a/openpype/hosts/hiero/api/plugin.py
+++ b/openpype/hosts/hiero/api/plugin.py
@@ -11,7 +11,6 @@ import qargparse
from openpype.settings import get_current_project_settings
from openpype.lib import Logger
from openpype.pipeline import LoaderPlugin, LegacyCreator
-from openpype.pipeline.context_tools import get_current_project_asset
from openpype.pipeline.load import get_representation_path_from_context
from . import lib
@@ -32,7 +31,7 @@ def load_stylesheet():
class CreatorWidget(QtWidgets.QDialog):
# output items
- items = dict()
+ items = {}
def __init__(self, name, info, ui_inputs, parent=None):
super(CreatorWidget, self).__init__(parent)
@@ -494,9 +493,8 @@ class ClipLoader:
joint `data` key with asset.data dict into the representation
"""
- asset_name = self.context["representation"]["context"]["asset"]
- asset_doc = get_current_project_asset(asset_name)
- log.debug("__ asset_doc: {}".format(pformat(asset_doc)))
+
+ asset_doc = self.context["asset"]
self.data["assetData"] = asset_doc["data"]
def _make_track_item(self, source_bin_item, audio=False):
@@ -644,8 +642,8 @@ class PublishClip:
Returns:
hiero.core.TrackItem: hiero track item object with pype tag
"""
- vertical_clip_match = dict()
- tag_data = dict()
+ vertical_clip_match = {}
+ tag_data = {}
types = {
"shot": "shot",
"folder": "folder",
@@ -707,9 +705,10 @@ class PublishClip:
self._create_parents()
def convert(self):
-
# solve track item data and add them to tag data
- self._convert_to_tag_data()
+ tag_hierarchy_data = self._convert_to_tag_data()
+
+ self.tag_data.update(tag_hierarchy_data)
# if track name is in review track name and also if driving track name
# is not in review track name: skip tag creation
@@ -723,16 +722,23 @@ class PublishClip:
if self.rename:
# rename track item
self.track_item.setName(new_name)
- self.tag_data["asset"] = new_name
+ self.tag_data["asset_name"] = new_name
else:
- self.tag_data["asset"] = self.ti_name
+ self.tag_data["asset_name"] = self.ti_name
self.tag_data["hierarchyData"]["shot"] = self.ti_name
+ # AYON unique identifier
+ folder_path = "/{}/{}".format(
+ tag_hierarchy_data["hierarchy"],
+ self.tag_data["asset_name"]
+ )
+ self.tag_data["folderPath"] = folder_path
if self.tag_data["heroTrack"] and self.review_layer:
self.tag_data.update({"reviewTrack": self.review_layer})
else:
self.tag_data.update({"reviewTrack": None})
+ # TODO: remove debug print
log.debug("___ self.tag_data: {}".format(
pformat(self.tag_data)
))
@@ -891,7 +897,7 @@ class PublishClip:
tag_hierarchy_data = hero_data
# add data to return data dict
- self.tag_data.update(tag_hierarchy_data)
+ return tag_hierarchy_data
def _solve_tag_hierarchy_data(self, hierarchy_formatting_data):
""" Solve tag data from hierarchy data and templates. """
diff --git a/openpype/hosts/hiero/plugins/publish/collect_frame_tag_instances.py b/openpype/hosts/hiero/plugins/publish/collect_frame_tag_instances.py
index 982a34efd6..79bf67b336 100644
--- a/openpype/hosts/hiero/plugins/publish/collect_frame_tag_instances.py
+++ b/openpype/hosts/hiero/plugins/publish/collect_frame_tag_instances.py
@@ -5,6 +5,8 @@ import json
import pyblish.api
+from openpype.client import get_asset_name_identifier
+
class CollectFrameTagInstances(pyblish.api.ContextPlugin):
"""Collect frames from tags.
@@ -99,6 +101,9 @@ class CollectFrameTagInstances(pyblish.api.ContextPlugin):
# first collect all available subset tag frames
subset_data = {}
+ context_asset_doc = context.data["assetEntity"]
+ context_asset_name = get_asset_name_identifier(context_asset_doc)
+
for tag_data in sequence_tags:
frame = int(tag_data["start"])
@@ -115,7 +120,7 @@ class CollectFrameTagInstances(pyblish.api.ContextPlugin):
subset_data[subset] = {
"frames": [frame],
"format": tag_data["format"],
- "asset": context.data["assetEntity"]["name"]
+ "asset": context_asset_name
}
return subset_data
diff --git a/openpype/hosts/hiero/plugins/publish/precollect_instances.py b/openpype/hosts/hiero/plugins/publish/precollect_instances.py
index 3f9da2cf60..590d7b7050 100644
--- a/openpype/hosts/hiero/plugins/publish/precollect_instances.py
+++ b/openpype/hosts/hiero/plugins/publish/precollect_instances.py
@@ -1,9 +1,12 @@
import pyblish
+
+from openpype import AYON_SERVER_ENABLED
from openpype.pipeline.editorial import is_overlapping_otio_ranges
+
from openpype.hosts.hiero import api as phiero
from openpype.hosts.hiero.api.otio import hiero_export
-import hiero
+import hiero
# # developer reload modules
from pprint import pformat
@@ -80,25 +83,24 @@ class PrecollectInstances(pyblish.api.ContextPlugin):
if k not in ("id", "applieswhole", "label")
})
- asset = tag_data["asset"]
+ asset, asset_name = self._get_asset_data(tag_data)
+
subset = tag_data["subset"]
# insert family into families
- family = tag_data["family"]
families = [str(f) for f in tag_data["families"]]
- families.insert(0, str(family))
# form label
- label = asset
- if asset != clip_name:
+ label = "{} -".format(asset)
+ if asset_name != clip_name:
label += " ({})".format(clip_name)
label += " {}".format(subset)
- label += " {}".format("[" + ", ".join(families) + "]")
data.update({
"name": "{}_{}".format(asset, subset),
"label": label,
"asset": asset,
+ "asset_name": asset_name,
"item": track_item,
"families": families,
"publish": tag_data["publish"],
@@ -176,9 +178,9 @@ class PrecollectInstances(pyblish.api.ContextPlugin):
})
def create_shot_instance(self, context, **data):
+ subset = "shotMain"
master_layer = data.get("heroTrack")
hierarchy_data = data.get("hierarchyData")
- asset = data.get("asset")
item = data.get("item")
clip_name = item.name()
@@ -189,23 +191,21 @@ class PrecollectInstances(pyblish.api.ContextPlugin):
return
asset = data["asset"]
- subset = "shotMain"
+ asset_name = data["asset_name"]
# insert family into families
family = "shot"
# form label
- label = asset
- if asset != clip_name:
+ label = "{} -".format(asset)
+ if asset_name != clip_name:
label += " ({}) ".format(clip_name)
label += " {}".format(subset)
- label += " [{}]".format(family)
data.update({
"name": "{}_{}".format(asset, subset),
"label": label,
"subset": subset,
- "asset": asset,
"family": family,
"families": []
})
@@ -215,7 +215,33 @@ class PrecollectInstances(pyblish.api.ContextPlugin):
self.log.debug(
"_ instance.data: {}".format(pformat(instance.data)))
+ def _get_asset_data(self, data):
+ folder_path = data.pop("folderPath", None)
+
+ if data.get("asset_name"):
+ asset_name = data["asset_name"]
+ else:
+ asset_name = data["asset"]
+
+ # backward compatibility for clip tags
+ # which are missing folderPath key
+ # TODO remove this in future versions
+ if not folder_path:
+ hierarchy_path = data["hierarchy"]
+ folder_path = "/{}/{}".format(
+ hierarchy_path,
+ asset_name
+ )
+
+ if AYON_SERVER_ENABLED:
+ asset = folder_path
+ else:
+ asset = asset_name
+
+ return asset, asset_name
+
def create_audio_instance(self, context, **data):
+ subset = "audioMain"
master_layer = data.get("heroTrack")
if not master_layer:
@@ -230,23 +256,21 @@ class PrecollectInstances(pyblish.api.ContextPlugin):
return
asset = data["asset"]
- subset = "audioMain"
+ asset_name = data["asset_name"]
# insert family into families
family = "audio"
# form label
- label = asset
- if asset != clip_name:
+ label = "{} -".format(asset)
+ if asset_name != clip_name:
label += " ({}) ".format(clip_name)
label += " {}".format(subset)
- label += " [{}]".format(family)
data.update({
"name": "{}_{}".format(asset, subset),
"label": label,
"subset": subset,
- "asset": asset,
"family": family,
"families": ["clip"]
})
diff --git a/openpype/hosts/hiero/plugins/publish/precollect_workfile.py b/openpype/hosts/hiero/plugins/publish/precollect_workfile.py
index 5a66581531..8abb0885c6 100644
--- a/openpype/hosts/hiero/plugins/publish/precollect_workfile.py
+++ b/openpype/hosts/hiero/plugins/publish/precollect_workfile.py
@@ -7,6 +7,7 @@ from qtpy.QtGui import QPixmap
import hiero.ui
+from openpype import AYON_SERVER_ENABLED
from openpype.hosts.hiero.api.otio import hiero_export
@@ -17,9 +18,11 @@ class PrecollectWorkfile(pyblish.api.ContextPlugin):
order = pyblish.api.CollectorOrder - 0.491
def process(self, context):
-
asset = context.data["asset"]
- subset = "workfile"
+ asset_name = asset
+ if AYON_SERVER_ENABLED:
+ asset_name = asset_name.split("/")[-1]
+
active_timeline = hiero.ui.activeSequence()
project = active_timeline.project()
fps = active_timeline.framerate().toFloat()
@@ -27,7 +30,7 @@ class PrecollectWorkfile(pyblish.api.ContextPlugin):
# adding otio timeline to context
otio_timeline = hiero_export.create_otio_timeline()
- # get workfile thumnail paths
+ # get workfile thumbnail paths
tmp_staging = tempfile.mkdtemp(prefix="pyblish_tmp_")
thumbnail_name = "workfile_thumbnail.png"
thumbnail_path = os.path.join(tmp_staging, thumbnail_name)
@@ -49,8 +52,8 @@ class PrecollectWorkfile(pyblish.api.ContextPlugin):
}
# get workfile paths
- curent_file = project.path()
- staging_dir, base_name = os.path.split(curent_file)
+ current_file = project.path()
+ staging_dir, base_name = os.path.split(current_file)
# creating workfile representation
workfile_representation = {
@@ -59,13 +62,16 @@ class PrecollectWorkfile(pyblish.api.ContextPlugin):
'files': base_name,
"stagingDir": staging_dir,
}
-
+ family = "workfile"
instance_data = {
- "name": "{}_{}".format(asset, subset),
- "asset": asset,
- "subset": "{}{}".format(asset, subset.capitalize()),
+ "label": "{} - {}Main".format(
+ asset, family),
+ "name": "{}_{}".format(asset_name, family),
+ "asset": context.data["asset"],
+ # TODO use 'get_subset_name'
+ "subset": "{}{}Main".format(asset_name, family.capitalize()),
"item": project,
- "family": "workfile",
+ "family": family,
"families": [],
"representations": [workfile_representation, thumb_representation]
}
@@ -78,7 +84,7 @@ class PrecollectWorkfile(pyblish.api.ContextPlugin):
"activeProject": project,
"activeTimeline": active_timeline,
"otioTimeline": otio_timeline,
- "currentFile": curent_file,
+ "currentFile": current_file,
"colorspace": self.get_colorspace(project),
"fps": fps
}
diff --git a/openpype/hosts/hiero/plugins/publish_old_workflow/collect_assetbuilds.py b/openpype/hosts/hiero/plugins/publish_old_workflow/collect_assetbuilds.py
index 767f7c30f7..37370497a5 100644
--- a/openpype/hosts/hiero/plugins/publish_old_workflow/collect_assetbuilds.py
+++ b/openpype/hosts/hiero/plugins/publish_old_workflow/collect_assetbuilds.py
@@ -1,5 +1,6 @@
from pyblish import api
-from openpype.client import get_assets
+
+from openpype.client import get_assets, get_asset_name_identifier
class CollectAssetBuilds(api.ContextPlugin):
@@ -19,10 +20,13 @@ class CollectAssetBuilds(api.ContextPlugin):
def process(self, context):
project_name = context.data["projectName"]
asset_builds = {}
- for asset in get_assets(project_name):
- if asset["data"]["entityType"] == "AssetBuild":
- self.log.debug("Found \"{}\" in database.".format(asset))
- asset_builds[asset["name"]] = asset
+ for asset_doc in get_assets(project_name):
+ if asset_doc["data"].get("entityType") != "AssetBuild":
+ continue
+
+ asset_name = get_asset_name_identifier(asset_doc)
+ self.log.debug("Found \"{}\" in database.".format(asset_doc))
+ asset_builds[asset_name] = asset_doc
for instance in context:
if instance.data["family"] != "clip":
@@ -50,9 +54,7 @@ class CollectAssetBuilds(api.ContextPlugin):
# Collect asset builds.
data = {"assetbuilds": []}
for name in asset_names:
- data["assetbuilds"].append(
- asset_builds[name]
- )
+ data["assetbuilds"].append(asset_builds[name])
self.log.debug(
"Found asset builds: {}".format(data["assetbuilds"])
)
diff --git a/openpype/hosts/houdini/api/creator_node_shelves.py b/openpype/hosts/houdini/api/creator_node_shelves.py
index 1f9fef7417..14662dc419 100644
--- a/openpype/hosts/houdini/api/creator_node_shelves.py
+++ b/openpype/hosts/houdini/api/creator_node_shelves.py
@@ -173,6 +173,7 @@ def install():
os.remove(filepath)
icon = get_openpype_icon_filepath()
+ tab_menu_label = os.environ.get("AVALON_LABEL") or "AYON"
# Create context only to get creator plugins, so we don't reset and only
# populate what we need to retrieve the list of creator plugins
@@ -197,14 +198,14 @@ def install():
if not network_categories:
continue
- key = "openpype_create.{}".format(identifier)
+ key = "ayon_create.{}".format(identifier)
log.debug(f"Registering {key}")
script = CREATE_SCRIPT.format(identifier=identifier)
data = {
"script": script,
"language": hou.scriptLanguage.Python,
"icon": icon,
- "help": "Create OpenPype publish instance for {}".format(
+ "help": "Create Ayon publish instance for {}".format(
creator.label
),
"help_url": None,
@@ -213,7 +214,7 @@ def install():
"cop_viewer_categories": [],
"network_op_type": None,
"viewer_op_type": None,
- "locations": ["OpenPype"]
+ "locations": [tab_menu_label]
}
label = "Create {}".format(creator.label)
tool = hou.shelves.tool(key)
diff --git a/openpype/hosts/houdini/api/lib.py b/openpype/hosts/houdini/api/lib.py
index 3db18ca69a..edd50f10c1 100644
--- a/openpype/hosts/houdini/api/lib.py
+++ b/openpype/hosts/houdini/api/lib.py
@@ -11,14 +11,22 @@ import json
import six
from openpype.lib import StringTemplate
-from openpype.client import get_asset_by_name
+from openpype.client import get_project, get_asset_by_name
from openpype.settings import get_current_project_settings
-from openpype.pipeline import get_current_project_name, get_current_asset_name
-from openpype.pipeline.context_tools import (
- get_current_context_template_data,
- get_current_project_asset
+from openpype.pipeline import (
+ Anatomy,
+ get_current_project_name,
+ get_current_asset_name,
+ registered_host,
+ get_current_context,
+ get_current_host_name,
)
+from openpype.pipeline.create import CreateContext
+from openpype.pipeline.template_data import get_template_data
+from openpype.pipeline.context_tools import get_current_project_asset
from openpype.widgets import popup
+from openpype.tools.utils.host_tools import get_tool_by_name
+
import hou
@@ -114,36 +122,66 @@ def get_id_required_nodes():
def get_output_parameter(node):
- """Return the render output parameter name of the given node
+ """Return the render output parameter of the given node
Example:
root = hou.node("/obj")
my_alembic_node = root.createNode("alembic")
get_output_parameter(my_alembic_node)
- # Result: "output"
+ >>> "filename"
+
+ Notes:
+ I'm using node.type().name() to get on par with the creators,
+ Because the return value of `node.type().name()` is the
+ same string value used in creators
+ e.g. instance_data.update({"node_type": "alembic"})
+
+ Rop nodes in different network categories have
+ the same output parameter.
+ So, I took that into consideration as a hint for
+ future development.
Args:
node(hou.Node): node instance
Returns:
hou.Parm
-
"""
node_type = node.type().name()
- if node_type == "geometry":
- return node.parm("sopoutput")
- elif node_type == "alembic":
+
+ # Figure out which type of node is being rendered
+ if node_type in {"alembic", "rop_alembic"}:
return node.parm("filename")
+ elif node_type == "arnold":
+ if node_type.evalParm("ar_ass_export_enable"):
+ return node.parm("ar_ass_file")
+ return node.parm("ar_picture")
+ elif node_type in {
+ "geometry",
+ "rop_geometry",
+ "filmboxfbx",
+ "rop_fbx"
+ }:
+ return node.parm("sopoutput")
elif node_type == "comp":
return node.parm("copoutput")
- elif node_type == "opengl":
+ elif node_type in {"karma", "opengl"}:
return node.parm("picture")
- elif node_type == "arnold":
- if node.evalParm("ar_ass_export_enable"):
- return node.parm("ar_ass_file")
+ elif node_type == "ifd": # Mantra
+ if node.evalParm("soho_outputmode"):
+ return node.parm("soho_diskfile")
+ return node.parm("vm_picture")
elif node_type == "Redshift_Proxy_Output":
return node.parm("RS_archive_file")
+ elif node_type == "Redshift_ROP":
+ return node.parm("RS_outputFileNamePrefix")
+ elif node_type in {"usd", "usd_rop", "usdexport"}:
+ return node.parm("lopoutput")
+ elif node_type in {"usdrender", "usdrender_rop"}:
+ return node.parm("outputimage")
+ elif node_type == "vray_renderer":
+ return node.parm("SettingsOutput_img_file_path")
raise TypeError("Node type '%s' not supported" % node_type)
@@ -325,52 +363,61 @@ def imprint(node, data, update=False):
return
current_parms = {p.name(): p for p in node.spareParms()}
- update_parms = []
- templates = []
+ update_parm_templates = []
+ new_parm_templates = []
for key, value in data.items():
if value is None:
continue
- parm = get_template_from_value(key, value)
+ parm_template = get_template_from_value(key, value)
if key in current_parms:
- if node.evalParm(key) == data[key]:
+ if node.evalParm(key) == value:
continue
if not update:
log.debug(f"{key} already exists on {node}")
else:
log.debug(f"replacing {key}")
- update_parms.append(parm)
+ update_parm_templates.append(parm_template)
continue
- templates.append(parm)
+ new_parm_templates.append(parm_template)
- parm_group = node.parmTemplateGroup()
- parm_folder = parm_group.findFolder("Extra")
-
- # if folder doesn't exist yet, create one and append to it,
- # else append to existing one
- if not parm_folder:
- parm_folder = hou.FolderParmTemplate("folder", "Extra")
- parm_folder.setParmTemplates(templates)
- parm_group.append(parm_folder)
- else:
- for template in templates:
- parm_group.appendToFolder(parm_folder, template)
- # this is needed because the pointer to folder
- # is for some reason lost every call to `appendToFolder()`
- parm_folder = parm_group.findFolder("Extra")
-
- node.setParmTemplateGroup(parm_group)
-
- # TODO: Updating is done here, by calling probably deprecated functions.
- # This needs to be addressed in the future.
- if not update_parms:
+ if not new_parm_templates and not update_parm_templates:
return
- for parm in update_parms:
- node.replaceSpareParmTuple(parm.name(), parm)
+ parm_group = node.parmTemplateGroup()
+
+ # Add new parm templates
+ if new_parm_templates:
+ parm_folder = parm_group.findFolder("Extra")
+
+ # if folder doesn't exist yet, create one and append to it,
+ # else append to existing one
+ if not parm_folder:
+ parm_folder = hou.FolderParmTemplate("folder", "Extra")
+ parm_folder.setParmTemplates(new_parm_templates)
+ parm_group.append(parm_folder)
+ else:
+ # Add to parm template folder instance then replace with updated
+ # one in parm template group
+ for template in new_parm_templates:
+ parm_folder.addParmTemplate(template)
+ parm_group.replace(parm_folder.name(), parm_folder)
+
+ # Update existing parm templates
+ for parm_template in update_parm_templates:
+ parm_group.replace(parm_template.name(), parm_template)
+
+ # When replacing a parm with a parm of the same name it preserves its
+ # value if before the replacement the parm was not at the default,
+ # because it has a value override set. Since we're trying to update the
+ # parm by using the new value as `default` we enforce the parm is at
+ # default state
+ node.parm(parm_template.name()).revertToDefaults()
+
+ node.setParmTemplateGroup(parm_group)
def lsattr(attr, value=None, root="/"):
@@ -552,29 +599,56 @@ def get_template_from_value(key, value):
return parm
-def get_frame_data(node):
- """Get the frame data: start frame, end frame and steps.
+def get_frame_data(node, log=None):
+ """Get the frame data: `frameStartHandle`, `frameEndHandle`
+ and `byFrameStep`.
+
+ This function uses Houdini node's `trange`, `t1, `t2` and `t3`
+ parameters as the source of truth for the full inclusive frame
+ range to render, as such these are considered as the frame
+ range including the handles.
+
+ The non-inclusive frame start and frame end without handles
+ can be computed by subtracting the handles from the inclusive
+ frame range.
Args:
- node(hou.Node)
+ node (hou.Node): ROP node to retrieve frame range from,
+ the frame range is assumed to be the frame range
+ *including* the start and end handles.
Returns:
- dict: frame data for star, end and steps.
+ dict: frame data for `frameStartHandle`, `frameEndHandle`
+ and `byFrameStep`.
"""
+
+ if log is None:
+ log = self.log
+
data = {}
if node.parm("trange") is None:
-
+ log.debug(
+ "Node has no 'trange' parameter: {}".format(node.path())
+ )
return data
if node.evalParm("trange") == 0:
- self.log.debug("trange is 0")
- return data
+ data["frameStartHandle"] = hou.intFrame()
+ data["frameEndHandle"] = hou.intFrame()
+ data["byFrameStep"] = 1.0
- data["frameStart"] = node.evalParm("f1")
- data["frameEnd"] = node.evalParm("f2")
- data["steps"] = node.evalParm("f3")
+ log.info(
+ "Node '{}' has 'Render current frame' set.\n"
+ "Asset Handles are ignored.\n"
+ "frameStart and frameEnd are set to the "
+ "current frame.".format(node.path())
+ )
+ else:
+ data["frameStartHandle"] = int(node.evalParm("f1"))
+ data["frameEndHandle"] = int(node.evalParm("f2"))
+ data["byFrameStep"] = node.evalParm("f3")
return data
@@ -753,6 +827,45 @@ def get_camera_from_container(container):
return cameras[0]
+def get_current_context_template_data_with_asset_data():
+ """
+ TODOs:
+ Support both 'assetData' and 'folderData' in future.
+ """
+
+ context = get_current_context()
+ project_name = context["project_name"]
+ asset_name = context["asset_name"]
+ task_name = context["task_name"]
+ host_name = get_current_host_name()
+
+ anatomy = Anatomy(project_name)
+ project_doc = get_project(project_name)
+ asset_doc = get_asset_by_name(project_name, asset_name)
+
+ # get context specific vars
+ asset_data = asset_doc["data"]
+
+ # compute `frameStartHandle` and `frameEndHandle`
+ frame_start = asset_data.get("frameStart")
+ frame_end = asset_data.get("frameEnd")
+ handle_start = asset_data.get("handleStart")
+ handle_end = asset_data.get("handleEnd")
+ if frame_start is not None and handle_start is not None:
+ asset_data["frameStartHandle"] = frame_start - handle_start
+
+ if frame_end is not None and handle_end is not None:
+ asset_data["frameEndHandle"] = frame_end + handle_end
+
+ template_data = get_template_data(
+ project_doc, asset_doc, task_name, host_name
+ )
+ template_data["root"] = anatomy.roots
+ template_data["assetData"] = asset_data
+
+ return template_data
+
+
def get_context_var_changes():
"""get context var changes."""
@@ -772,7 +885,7 @@ def get_context_var_changes():
return houdini_vars_to_update
# Get Template data
- template_data = get_current_context_template_data()
+ template_data = get_current_context_template_data_with_asset_data()
# Set Houdini Vars
for item in houdini_vars:
@@ -847,3 +960,97 @@ def update_houdini_vars_context_dialog():
dialog.on_clicked.connect(update_houdini_vars_context)
dialog.show()
+
+
+def publisher_show_and_publish(comment=None):
+ """Open publisher window and trigger publishing action.
+
+ Args:
+ comment (Optional[str]): Comment to set in publisher window.
+ """
+
+ main_window = get_main_window()
+ publisher_window = get_tool_by_name(
+ tool_name="publisher",
+ parent=main_window,
+ )
+ publisher_window.show_and_publish(comment)
+
+
+def find_rop_input_dependencies(input_tuple):
+ """Self publish from ROP nodes.
+
+ Arguments:
+ tuple (hou.RopNode.inputDependencies) which can be a nested tuples
+ represents the input dependencies of the ROP node, consisting of ROPs,
+ and the frames that need to be be rendered prior to rendering the ROP.
+
+ Returns:
+ list of the RopNode.path() that can be found inside
+ the input tuple.
+ """
+
+ out_list = []
+ if isinstance(input_tuple[0], hou.RopNode):
+ return input_tuple[0].path()
+
+ if isinstance(input_tuple[0], tuple):
+ for item in input_tuple:
+ out_list.append(find_rop_input_dependencies(item))
+
+ return out_list
+
+
+def self_publish():
+ """Self publish from ROP nodes.
+
+ Firstly, it gets the node and its dependencies.
+ Then, it deactivates all other ROPs
+ And finaly, it triggers the publishing action.
+ """
+
+ result, comment = hou.ui.readInput(
+ "Add Publish Comment",
+ buttons=("Publish", "Cancel"),
+ title="Publish comment",
+ close_choice=1
+ )
+
+ if result:
+ return
+
+ current_node = hou.node(".")
+ inputs_paths = find_rop_input_dependencies(
+ current_node.inputDependencies()
+ )
+ inputs_paths.append(current_node.path())
+
+ host = registered_host()
+ context = CreateContext(host, reset=True)
+
+ for instance in context.instances:
+ node_path = instance.data.get("instance_node")
+ instance["active"] = node_path and node_path in inputs_paths
+
+ context.save_changes()
+
+ publisher_show_and_publish(comment)
+
+
+def add_self_publish_button(node):
+ """Adds a self publish button to the rop node."""
+
+ label = os.environ.get("AVALON_LABEL") or "AYON"
+
+ button_parm = hou.ButtonParmTemplate(
+ "ayon_self_publish",
+ "{} Publish".format(label),
+ script_callback="from openpype.hosts.houdini.api.lib import "
+ "self_publish; self_publish()",
+ script_callback_language=hou.scriptLanguage.Python,
+ join_with_next=True
+ )
+
+ template = node.parmTemplateGroup()
+ template.insertBefore((0,), button_parm)
+ node.setParmTemplateGroup(template)
diff --git a/openpype/hosts/houdini/api/pipeline.py b/openpype/hosts/houdini/api/pipeline.py
index f8db45c56b..d0f45c36b5 100644
--- a/openpype/hosts/houdini/api/pipeline.py
+++ b/openpype/hosts/houdini/api/pipeline.py
@@ -3,7 +3,6 @@
import os
import sys
import logging
-import contextlib
import hou # noqa
@@ -66,15 +65,7 @@ class HoudiniHost(HostBase, IWorkfileHost, ILoadHost, IPublishHost):
register_event_callback("open", on_open)
register_event_callback("new", on_new)
- pyblish.api.register_callback(
- "instanceToggled", on_pyblish_instance_toggled
- )
-
self._has_been_setup = True
- # add houdini vendor packages
- hou_pythonpath = os.path.join(HOUDINI_HOST_DIR, "vendor")
-
- sys.path.append(hou_pythonpath)
# Set asset settings for the empty scene directly after launch of
# Houdini so it initializes into the correct scene FPS,
@@ -406,54 +397,3 @@ def _set_context_settings():
lib.reset_framerange()
lib.update_houdini_vars_context()
-
-
-def on_pyblish_instance_toggled(instance, new_value, old_value):
- """Toggle saver tool passthrough states on instance toggles."""
- @contextlib.contextmanager
- def main_take(no_update=True):
- """Enter root take during context"""
- original_take = hou.takes.currentTake()
- original_update_mode = hou.updateModeSetting()
- root = hou.takes.rootTake()
- has_changed = False
- try:
- if original_take != root:
- has_changed = True
- if no_update:
- hou.setUpdateMode(hou.updateMode.Manual)
- hou.takes.setCurrentTake(root)
- yield
- finally:
- if has_changed:
- if no_update:
- hou.setUpdateMode(original_update_mode)
- hou.takes.setCurrentTake(original_take)
-
- if not instance.data.get("_allowToggleBypass", True):
- return
-
- nodes = instance[:]
- if not nodes:
- return
-
- # Assume instance node is first node
- instance_node = nodes[0]
-
- if not hasattr(instance_node, "isBypassed"):
- # Likely not a node that can actually be bypassed
- log.debug("Can't bypass node: %s", instance_node.path())
- return
-
- if instance_node.isBypassed() != (not old_value):
- print("%s old bypass state didn't match old instance state, "
- "updating anyway.." % instance_node.path())
-
- try:
- # Go into the main take, because when in another take changing
- # the bypass state of a note cannot be done due to it being locked
- # by default.
- with main_take(no_update=True):
- instance_node.bypass(not new_value)
- except hou.PermissionError as exc:
- log.warning("%s - %s", instance_node.path(), exc)
diff --git a/openpype/hosts/houdini/api/plugin.py b/openpype/hosts/houdini/api/plugin.py
index a0a7dcc2e4..e162d0e461 100644
--- a/openpype/hosts/houdini/api/plugin.py
+++ b/openpype/hosts/houdini/api/plugin.py
@@ -6,6 +6,8 @@ from abc import (
)
import six
import hou
+
+from openpype import AYON_SERVER_ENABLED
from openpype.pipeline import (
CreatorError,
LegacyCreator,
@@ -13,7 +15,7 @@ from openpype.pipeline import (
CreatedInstance
)
from openpype.lib import BoolDef
-from .lib import imprint, read, lsattr
+from .lib import imprint, read, lsattr, add_self_publish_button
class OpenPypeCreatorError(CreatorError):
@@ -142,12 +144,13 @@ class HoudiniCreatorBase(object):
@staticmethod
def create_instance_node(
- node_name, parent,
- node_type="geometry"):
+ asset_name, node_name, parent, node_type="geometry"
+ ):
# type: (str, str, str) -> hou.Node
"""Create node representing instance.
Arguments:
+ asset_name (str): Asset name.
node_name (str): Name of the new node.
parent (str): Name of the parent node.
node_type (str, optional): Type of the node.
@@ -168,6 +171,7 @@ class HoudiniCreator(NewCreator, HoudiniCreatorBase):
"""Base class for most of the Houdini creator plugins."""
selected_nodes = []
settings_name = None
+ add_publish_button = False
def create(self, subset_name, instance_data, pre_create_data):
try:
@@ -181,8 +185,13 @@ class HoudiniCreator(NewCreator, HoudiniCreatorBase):
if node_type is None:
node_type = "geometry"
+ if AYON_SERVER_ENABLED:
+ asset_name = instance_data["folderPath"]
+ else:
+ asset_name = instance_data["asset"]
+
instance_node = self.create_instance_node(
- subset_name, "/out", node_type)
+ asset_name, subset_name, "/out", node_type)
self.customize_node_look(instance_node)
@@ -195,6 +204,10 @@ class HoudiniCreator(NewCreator, HoudiniCreatorBase):
self)
self._add_instance_to_context(instance)
self.imprint(instance_node, instance.data_to_store())
+
+ if self.add_publish_button:
+ add_self_publish_button(instance_node)
+
return instance
except hou.Error as er:
@@ -245,6 +258,7 @@ class HoudiniCreator(NewCreator, HoudiniCreatorBase):
key: changes[key].new_value
for key in changes.changed_keys
}
+ # Update parm templates and values
self.imprint(
instance_node,
new_values,
@@ -316,6 +330,12 @@ class HoudiniCreator(NewCreator, HoudiniCreatorBase):
def apply_settings(self, project_settings):
"""Method called on initialization of plugin to apply settings."""
+ # Apply General Settings
+ houdini_general_settings = project_settings["houdini"]["general"]
+ self.add_publish_button = houdini_general_settings.get(
+ "add_self_publish_button", False)
+
+ # Apply Creator Settings
settings_name = self.settings_name
if settings_name is None:
settings_name = self.__class__.__name__
diff --git a/openpype/hosts/houdini/api/shelves.py b/openpype/hosts/houdini/api/shelves.py
index 21e44e494a..5093a90988 100644
--- a/openpype/hosts/houdini/api/shelves.py
+++ b/openpype/hosts/houdini/api/shelves.py
@@ -6,8 +6,12 @@ import platform
from openpype.settings import get_project_settings
from openpype.pipeline import get_current_project_name
+from openpype.lib import StringTemplate
+
import hou
+from .lib import get_current_context_template_data_with_asset_data
+
log = logging.getLogger("openpype.hosts.houdini.shelves")
@@ -20,23 +24,33 @@ def generate_shelves():
# load configuration of houdini shelves
project_name = get_current_project_name()
project_settings = get_project_settings(project_name)
- shelves_set_config = project_settings["houdini"]["shelves"]
+ shelves_configs = project_settings["houdini"]["shelves"]
- if not shelves_set_config:
+ if not shelves_configs:
log.debug("No custom shelves found in project settings.")
return
- for shelf_set_config in shelves_set_config:
- shelf_set_filepath = shelf_set_config.get('shelf_set_source_path')
- shelf_set_os_filepath = shelf_set_filepath[current_os]
- if shelf_set_os_filepath:
- if not os.path.isfile(shelf_set_os_filepath):
- log.error("Shelf path doesn't exist - "
- "{}".format(shelf_set_os_filepath))
- continue
+ # Get Template data
+ template_data = get_current_context_template_data_with_asset_data()
- hou.shelves.newShelfSet(file_path=shelf_set_os_filepath)
- continue
+ for config in shelves_configs:
+ selected_option = config["options"]
+ shelf_set_config = config[selected_option]
+
+ shelf_set_filepath = shelf_set_config.get('shelf_set_source_path')
+ if shelf_set_filepath:
+ shelf_set_os_filepath = shelf_set_filepath[current_os]
+ if shelf_set_os_filepath:
+ shelf_set_os_filepath = get_path_using_template_data(
+ shelf_set_os_filepath, template_data
+ )
+ if not os.path.isfile(shelf_set_os_filepath):
+ log.error("Shelf path doesn't exist - "
+ "{}".format(shelf_set_os_filepath))
+ continue
+
+ hou.shelves.loadFile(shelf_set_os_filepath)
+ continue
shelf_set_name = shelf_set_config.get('shelf_set_name')
if not shelf_set_name:
@@ -81,7 +95,9 @@ def generate_shelves():
"script path of the tool.")
continue
- tool = get_or_create_tool(tool_definition, shelf)
+ tool = get_or_create_tool(
+ tool_definition, shelf, template_data
+ )
if not tool:
continue
@@ -144,7 +160,7 @@ def get_or_create_shelf(shelf_label):
return new_shelf
-def get_or_create_tool(tool_definition, shelf):
+def get_or_create_tool(tool_definition, shelf, template_data):
"""This function verifies if the tool exists and updates it. If not, creates
a new one.
@@ -162,10 +178,16 @@ def get_or_create_tool(tool_definition, shelf):
return
script_path = tool_definition["script"]
+ script_path = get_path_using_template_data(script_path, template_data)
if not script_path or not os.path.exists(script_path):
log.warning("This path doesn't exist - {}".format(script_path))
return
+ icon_path = tool_definition["icon"]
+ if icon_path:
+ icon_path = get_path_using_template_data(icon_path, template_data)
+ tool_definition["icon"] = icon_path
+
existing_tools = shelf.tools()
existing_tool = next(
(tool for tool in existing_tools if tool.label() == tool_label),
@@ -184,3 +206,10 @@ def get_or_create_tool(tool_definition, shelf):
tool_name = re.sub(r"[^\w\d]+", "_", tool_label).lower()
return hou.shelves.newTool(name=tool_name, **tool_definition)
+
+
+def get_path_using_template_data(path, template_data):
+ path = StringTemplate.format_template(path, template_data)
+ path = path.replace("\\", "/")
+
+ return path
diff --git a/openpype/hosts/houdini/plugins/create/create_arnold_ass.py b/openpype/hosts/houdini/plugins/create/create_arnold_ass.py
index 12d08f7d83..437a14c723 100644
--- a/openpype/hosts/houdini/plugins/create/create_arnold_ass.py
+++ b/openpype/hosts/houdini/plugins/create/create_arnold_ass.py
@@ -1,6 +1,7 @@
# -*- coding: utf-8 -*-
"""Creator plugin for creating Arnold ASS files."""
from openpype.hosts.houdini.api import plugin
+from openpype.lib import BoolDef
class CreateArnoldAss(plugin.HoudiniCreator):
@@ -21,6 +22,9 @@ class CreateArnoldAss(plugin.HoudiniCreator):
instance_data.pop("active", None)
instance_data.update({"node_type": "arnold"})
+ creator_attributes = instance_data.setdefault(
+ "creator_attributes", dict())
+ creator_attributes["farm"] = pre_create_data["farm"]
instance = super(CreateArnoldAss, self).create(
subset_name,
@@ -52,3 +56,15 @@ class CreateArnoldAss(plugin.HoudiniCreator):
# Lock any parameters in this list
to_lock = ["ar_ass_export_enable", "family", "id"]
self.lock_parameters(instance_node, to_lock)
+
+ def get_instance_attr_defs(self):
+ return [
+ BoolDef("farm",
+ label="Submitting to Farm",
+ default=False)
+ ]
+
+ def get_pre_create_attr_defs(self):
+ attrs = super().get_pre_create_attr_defs()
+ # Use same attributes as for instance attributes
+ return attrs + self.get_instance_attr_defs()
diff --git a/openpype/hosts/houdini/plugins/create/create_arnold_rop.py b/openpype/hosts/houdini/plugins/create/create_arnold_rop.py
index b58c377a20..85c4f0f5ff 100644
--- a/openpype/hosts/houdini/plugins/create/create_arnold_rop.py
+++ b/openpype/hosts/houdini/plugins/create/create_arnold_rop.py
@@ -13,6 +13,9 @@ class CreateArnoldRop(plugin.HoudiniCreator):
# Default extension
ext = "exr"
+ # Default to split export and render jobs
+ export_job = True
+
def create(self, subset_name, instance_data, pre_create_data):
import hou
@@ -48,6 +51,15 @@ class CreateArnoldRop(plugin.HoudiniCreator):
"ar_exr_half_precision": 1 # half precision
}
+ if pre_create_data.get("export_job"):
+ ass_filepath = \
+ "{export_dir}{subset_name}/{subset_name}.$F4.ass".format(
+ export_dir=hou.text.expandString("$HIP/pyblish/ass/"),
+ subset_name=subset_name,
+ )
+ parms["ar_ass_export_enable"] = 1
+ parms["ar_ass_file"] = ass_filepath
+
instance_node.setParms(parms)
# Lock any parameters in this list
@@ -66,6 +78,9 @@ class CreateArnoldRop(plugin.HoudiniCreator):
BoolDef("farm",
label="Submitting to Farm",
default=True),
+ BoolDef("export_job",
+ label="Split export and render jobs",
+ default=self.export_job),
EnumDef("image_format",
image_format_enum,
default=self.ext,
diff --git a/openpype/hosts/houdini/plugins/create/create_bgeo.py b/openpype/hosts/houdini/plugins/create/create_bgeo.py
index a3f31e7e94..4140919202 100644
--- a/openpype/hosts/houdini/plugins/create/create_bgeo.py
+++ b/openpype/hosts/houdini/plugins/create/create_bgeo.py
@@ -2,7 +2,8 @@
"""Creator plugin for creating pointcache bgeo files."""
from openpype.hosts.houdini.api import plugin
from openpype.pipeline import CreatedInstance, CreatorError
-from openpype.lib import EnumDef
+import hou
+from openpype.lib import EnumDef, BoolDef
class CreateBGEO(plugin.HoudiniCreator):
@@ -13,11 +14,13 @@ class CreateBGEO(plugin.HoudiniCreator):
icon = "gears"
def create(self, subset_name, instance_data, pre_create_data):
- import hou
instance_data.pop("active", None)
instance_data.update({"node_type": "geometry"})
+ creator_attributes = instance_data.setdefault(
+ "creator_attributes", dict())
+ creator_attributes["farm"] = pre_create_data["farm"]
instance = super(CreateBGEO, self).create(
subset_name,
@@ -58,6 +61,13 @@ class CreateBGEO(plugin.HoudiniCreator):
instance_node.setParms(parms)
+ def get_instance_attr_defs(self):
+ return [
+ BoolDef("farm",
+ label="Submitting to Farm",
+ default=False)
+ ]
+
def get_pre_create_attr_defs(self):
attrs = super().get_pre_create_attr_defs()
bgeo_enum = [
@@ -89,4 +99,10 @@ class CreateBGEO(plugin.HoudiniCreator):
return attrs + [
EnumDef("bgeo_type", bgeo_enum, label="BGEO Options"),
+ ] + self.get_instance_attr_defs()
+
+ def get_network_categories(self):
+ return [
+ hou.ropNodeTypeCategory(),
+ hou.sopNodeTypeCategory()
]
diff --git a/openpype/hosts/houdini/plugins/create/create_composite.py b/openpype/hosts/houdini/plugins/create/create_composite.py
index 9d4f7969bb..52ea6fa054 100644
--- a/openpype/hosts/houdini/plugins/create/create_composite.py
+++ b/openpype/hosts/houdini/plugins/create/create_composite.py
@@ -45,6 +45,11 @@ class CreateCompositeSequence(plugin.HoudiniCreator):
instance_node.setParms(parms)
+ # Manually set f1 & f2 to $FSTART and $FEND respectively
+ # to match other Houdini nodes default.
+ instance_node.parm("f1").setExpression("$FSTART")
+ instance_node.parm("f2").setExpression("$FEND")
+
# Lock any parameters in this list
to_lock = ["prim_to_detail_pattern"]
self.lock_parameters(instance_node, to_lock)
diff --git a/openpype/hosts/houdini/plugins/create/create_hda.py b/openpype/hosts/houdini/plugins/create/create_hda.py
index c4093bfbc6..f670b55eb6 100644
--- a/openpype/hosts/houdini/plugins/create/create_hda.py
+++ b/openpype/hosts/houdini/plugins/create/create_hda.py
@@ -5,6 +5,7 @@ from openpype.client import (
get_subsets,
)
from openpype.hosts.houdini.api import plugin
+import hou
class CreateHDA(plugin.HoudiniCreator):
@@ -16,13 +17,13 @@ class CreateHDA(plugin.HoudiniCreator):
icon = "gears"
maintain_selection = False
- def _check_existing(self, subset_name):
+ def _check_existing(self, asset_name, subset_name):
# type: (str) -> bool
"""Check if existing subset name versions already exists."""
# Get all subsets of the current asset
project_name = self.project_name
asset_doc = get_asset_by_name(
- project_name, self.data["asset"], fields=["_id"]
+ project_name, asset_name, fields=["_id"]
)
subset_docs = get_subsets(
project_name, asset_ids=[asset_doc["_id"]], fields=["name"]
@@ -34,8 +35,8 @@ class CreateHDA(plugin.HoudiniCreator):
return subset_name.lower() in existing_subset_names_low
def create_instance_node(
- self, node_name, parent, node_type="geometry"):
- import hou
+ self, asset_name, node_name, parent, node_type="geometry"
+ ):
parent_node = hou.node("/obj")
if self.selected_nodes:
@@ -61,7 +62,7 @@ class CreateHDA(plugin.HoudiniCreator):
hda_file_name="$HIP/{}.hda".format(node_name)
)
hda_node.layoutChildren()
- elif self._check_existing(node_name):
+ elif self._check_existing(asset_name, node_name):
raise plugin.OpenPypeCreatorError(
("subset {} is already published with different HDA"
"definition.").format(node_name))
@@ -81,3 +82,8 @@ class CreateHDA(plugin.HoudiniCreator):
pre_create_data) # type: plugin.CreatedInstance
return instance
+
+ def get_network_categories(self):
+ return [
+ hou.objNodeTypeCategory()
+ ]
diff --git a/openpype/hosts/houdini/plugins/create/create_mantra_ifd.py b/openpype/hosts/houdini/plugins/create/create_mantra_ifd.py
new file mode 100644
index 0000000000..7ea7d1042f
--- /dev/null
+++ b/openpype/hosts/houdini/plugins/create/create_mantra_ifd.py
@@ -0,0 +1,56 @@
+# -*- coding: utf-8 -*-
+"""Creator plugin for creating pointcache alembics."""
+from openpype.hosts.houdini.api import plugin
+from openpype.pipeline import CreatedInstance
+from openpype.lib import BoolDef
+
+
+class CreateMantraIFD(plugin.HoudiniCreator):
+ """Mantra .ifd Archive"""
+ identifier = "io.openpype.creators.houdini.mantraifd"
+ label = "Mantra IFD"
+ family = "mantraifd"
+ icon = "gears"
+
+ def create(self, subset_name, instance_data, pre_create_data):
+ import hou
+ instance_data.pop("active", None)
+ instance_data.update({"node_type": "ifd"})
+ creator_attributes = instance_data.setdefault(
+ "creator_attributes", dict())
+ creator_attributes["farm"] = pre_create_data["farm"]
+ instance = super(CreateMantraIFD, self).create(
+ subset_name,
+ instance_data,
+ pre_create_data) # type: CreatedInstance
+
+ instance_node = hou.node(instance.get("instance_node"))
+
+ filepath = "{}{}".format(
+ hou.text.expandString("$HIP/pyblish/"),
+ "{}.$F4.ifd".format(subset_name))
+ parms = {
+ # Render frame range
+ "trange": 1,
+ # Arnold ROP settings
+ "soho_diskfile": filepath,
+ "soho_outputmode": 1
+ }
+
+ instance_node.setParms(parms)
+
+ # Lock any parameters in this list
+ to_lock = ["soho_outputmode", "family", "id"]
+ self.lock_parameters(instance_node, to_lock)
+
+ def get_instance_attr_defs(self):
+ return [
+ BoolDef("farm",
+ label="Submitting to Farm",
+ default=False)
+ ]
+
+ def get_pre_create_attr_defs(self):
+ attrs = super().get_pre_create_attr_defs()
+ # Use same attributes as for instance attributes
+ return attrs + self.get_instance_attr_defs()
diff --git a/openpype/hosts/houdini/plugins/create/create_mantra_rop.py b/openpype/hosts/houdini/plugins/create/create_mantra_rop.py
index d2f0e735a8..8ecfbea802 100644
--- a/openpype/hosts/houdini/plugins/create/create_mantra_rop.py
+++ b/openpype/hosts/houdini/plugins/create/create_mantra_rop.py
@@ -12,6 +12,9 @@ class CreateMantraROP(plugin.HoudiniCreator):
family = "mantra_rop"
icon = "magic"
+ # Default to split export and render jobs
+ export_job = True
+
def create(self, subset_name, instance_data, pre_create_data):
import hou # noqa
@@ -44,6 +47,15 @@ class CreateMantraROP(plugin.HoudiniCreator):
"vm_picture": filepath,
}
+ if pre_create_data.get("export_job"):
+ ifd_filepath = \
+ "{export_dir}{subset_name}/{subset_name}.$F4.ifd".format(
+ export_dir=hou.text.expandString("$HIP/pyblish/ifd/"),
+ subset_name=subset_name,
+ )
+ parms["soho_outputmode"] = 1
+ parms["soho_diskfile"] = ifd_filepath
+
if self.selected_nodes:
# If camera found in selection
# we will use as render camera
@@ -78,6 +90,9 @@ class CreateMantraROP(plugin.HoudiniCreator):
BoolDef("farm",
label="Submitting to Farm",
default=True),
+ BoolDef("export_job",
+ label="Split export and render jobs",
+ default=self.export_job),
EnumDef("image_format",
image_format_enum,
default="exr",
diff --git a/openpype/hosts/houdini/plugins/create/create_pointcache.py b/openpype/hosts/houdini/plugins/create/create_pointcache.py
index 7eaf2aff2b..2d2f89cc48 100644
--- a/openpype/hosts/houdini/plugins/create/create_pointcache.py
+++ b/openpype/hosts/houdini/plugins/create/create_pointcache.py
@@ -1,10 +1,12 @@
# -*- coding: utf-8 -*-
"""Creator plugin for creating pointcache alembics."""
from openpype.hosts.houdini.api import plugin
+from openpype.lib import BoolDef
import hou
+
class CreatePointCache(plugin.HoudiniCreator):
"""Alembic ROP to pointcache"""
identifier = "io.openpype.creators.houdini.pointcache"
@@ -15,6 +17,9 @@ class CreatePointCache(plugin.HoudiniCreator):
def create(self, subset_name, instance_data, pre_create_data):
instance_data.pop("active", None)
instance_data.update({"node_type": "alembic"})
+ creator_attributes = instance_data.setdefault(
+ "creator_attributes", dict())
+ creator_attributes["farm"] = pre_create_data["farm"]
instance = super(CreatePointCache, self).create(
subset_name,
@@ -105,3 +110,15 @@ class CreatePointCache(plugin.HoudiniCreator):
else:
return min(outputs,
key=lambda node: node.evalParm('outputidx'))
+
+ def get_instance_attr_defs(self):
+ return [
+ BoolDef("farm",
+ label="Submitting to Farm",
+ default=False)
+ ]
+
+ def get_pre_create_attr_defs(self):
+ attrs = super().get_pre_create_attr_defs()
+ # Use same attributes as for instance attributes
+ return attrs + self.get_instance_attr_defs()
diff --git a/openpype/hosts/houdini/plugins/create/create_redshift_proxy.py b/openpype/hosts/houdini/plugins/create/create_redshift_proxy.py
index b814dd9d57..e1577c92e9 100644
--- a/openpype/hosts/houdini/plugins/create/create_redshift_proxy.py
+++ b/openpype/hosts/houdini/plugins/create/create_redshift_proxy.py
@@ -1,7 +1,8 @@
# -*- coding: utf-8 -*-
"""Creator plugin for creating Redshift proxies."""
from openpype.hosts.houdini.api import plugin
-from openpype.pipeline import CreatedInstance
+import hou
+from openpype.lib import BoolDef
class CreateRedshiftProxy(plugin.HoudiniCreator):
@@ -12,7 +13,7 @@ class CreateRedshiftProxy(plugin.HoudiniCreator):
icon = "magic"
def create(self, subset_name, instance_data, pre_create_data):
- import hou # noqa
+
# Remove the active, we are checking the bypass flag of the nodes
instance_data.pop("active", None)
@@ -24,11 +25,14 @@ class CreateRedshiftProxy(plugin.HoudiniCreator):
# TODO: Somehow enforce so that it only shows the original limited
# attributes of the Redshift_Proxy_Output node type
instance_data.update({"node_type": "Redshift_Proxy_Output"})
+ creator_attributes = instance_data.setdefault(
+ "creator_attributes", dict())
+ creator_attributes["farm"] = pre_create_data["farm"]
instance = super(CreateRedshiftProxy, self).create(
subset_name,
instance_data,
- pre_create_data) # type: CreatedInstance
+ pre_create_data)
instance_node = hou.node(instance.get("instance_node"))
@@ -44,3 +48,21 @@ class CreateRedshiftProxy(plugin.HoudiniCreator):
# Lock some Avalon attributes
to_lock = ["family", "id", "prim_to_detail_pattern"]
self.lock_parameters(instance_node, to_lock)
+
+ def get_network_categories(self):
+ return [
+ hou.ropNodeTypeCategory(),
+ hou.sopNodeTypeCategory()
+ ]
+
+ def get_instance_attr_defs(self):
+ return [
+ BoolDef("farm",
+ label="Submitting to Farm",
+ default=False)
+ ]
+
+ def get_pre_create_attr_defs(self):
+ attrs = super().get_pre_create_attr_defs()
+ # Use same attributes as for instance attributes
+ return attrs + self.get_instance_attr_defs()
diff --git a/openpype/hosts/houdini/plugins/create/create_staticmesh.py b/openpype/hosts/houdini/plugins/create/create_staticmesh.py
index ea0b36f03f..d0985198bd 100644
--- a/openpype/hosts/houdini/plugins/create/create_staticmesh.py
+++ b/openpype/hosts/houdini/plugins/create/create_staticmesh.py
@@ -54,6 +54,7 @@ class CreateStaticMesh(plugin.HoudiniCreator):
def get_network_categories(self):
return [
hou.ropNodeTypeCategory(),
+ hou.objNodeTypeCategory(),
hou.sopNodeTypeCategory()
]
diff --git a/openpype/hosts/houdini/plugins/create/create_vbd_cache.py b/openpype/hosts/houdini/plugins/create/create_vbd_cache.py
index 9c96e48e3a..53df9dda68 100644
--- a/openpype/hosts/houdini/plugins/create/create_vbd_cache.py
+++ b/openpype/hosts/houdini/plugins/create/create_vbd_cache.py
@@ -2,6 +2,7 @@
"""Creator plugin for creating VDB Caches."""
from openpype.hosts.houdini.api import plugin
from openpype.pipeline import CreatedInstance
+from openpype.lib import BoolDef
import hou
@@ -19,15 +20,20 @@ class CreateVDBCache(plugin.HoudiniCreator):
instance_data.pop("active", None)
instance_data.update({"node_type": "geometry"})
-
+ creator_attributes = instance_data.setdefault(
+ "creator_attributes", dict())
+ creator_attributes["farm"] = pre_create_data["farm"]
instance = super(CreateVDBCache, self).create(
subset_name,
instance_data,
pre_create_data) # type: CreatedInstance
instance_node = hou.node(instance.get("instance_node"))
+ file_path = "{}{}".format(
+ hou.text.expandString("$HIP/pyblish/"),
+ "{}.$F4.vdb".format(subset_name))
parms = {
- "sopoutput": "$HIP/pyblish/{}.$F4.vdb".format(subset_name),
+ "sopoutput": file_path,
"initsim": True,
"trange": 1
}
@@ -40,6 +46,7 @@ class CreateVDBCache(plugin.HoudiniCreator):
def get_network_categories(self):
return [
hou.ropNodeTypeCategory(),
+ hou.objNodeTypeCategory(),
hou.sopNodeTypeCategory()
]
@@ -102,3 +109,15 @@ class CreateVDBCache(plugin.HoudiniCreator):
else:
return min(outputs,
key=lambda node: node.evalParm('outputidx'))
+
+ def get_instance_attr_defs(self):
+ return [
+ BoolDef("farm",
+ label="Submitting to Farm",
+ default=False)
+ ]
+
+ def get_pre_create_attr_defs(self):
+ attrs = super().get_pre_create_attr_defs()
+ # Use same attributes as for instance attributes
+ return attrs + self.get_instance_attr_defs()
diff --git a/openpype/hosts/houdini/plugins/create/create_vray_rop.py b/openpype/hosts/houdini/plugins/create/create_vray_rop.py
index 793a544fdf..272b57b548 100644
--- a/openpype/hosts/houdini/plugins/create/create_vray_rop.py
+++ b/openpype/hosts/houdini/plugins/create/create_vray_rop.py
@@ -16,6 +16,9 @@ class CreateVrayROP(plugin.HoudiniCreator):
icon = "magic"
ext = "exr"
+ # Default to split export and render jobs
+ export_job = True
+
def create(self, subset_name, instance_data, pre_create_data):
instance_data.pop("active", None)
@@ -52,6 +55,17 @@ class CreateVrayROP(plugin.HoudiniCreator):
"SettingsEXR_bits_per_channel": "16" # half precision
}
+ if pre_create_data.get("export_job"):
+ scene_filepath = \
+ "{export_dir}{subset_name}/{subset_name}.$F4.vrscene".format(
+ export_dir=hou.text.expandString("$HIP/pyblish/vrscene/"),
+ subset_name=subset_name,
+ )
+ # Setting render_export_mode to "2" because that's for
+ # "Export only" ("1" is for "Export & Render")
+ parms["render_export_mode"] = "2"
+ parms["render_export_filepath"] = scene_filepath
+
if self.selected_nodes:
# set up the render camera from the selected node
camera = None
@@ -140,6 +154,9 @@ class CreateVrayROP(plugin.HoudiniCreator):
BoolDef("farm",
label="Submitting to Farm",
default=True),
+ BoolDef("export_job",
+ label="Split export and render jobs",
+ default=self.export_job),
EnumDef("image_format",
image_format_enum,
default=self.ext,
diff --git a/openpype/hosts/houdini/plugins/create/create_workfile.py b/openpype/hosts/houdini/plugins/create/create_workfile.py
index cc45a6c2a8..850f5c994e 100644
--- a/openpype/hosts/houdini/plugins/create/create_workfile.py
+++ b/openpype/hosts/houdini/plugins/create/create_workfile.py
@@ -1,5 +1,6 @@
# -*- coding: utf-8 -*-
"""Creator plugin for creating workfiles."""
+from openpype import AYON_SERVER_ENABLED
from openpype.hosts.houdini.api import plugin
from openpype.hosts.houdini.api.lib import read, imprint
from openpype.hosts.houdini.api.pipeline import CONTEXT_CONTAINER
@@ -30,16 +31,27 @@ class CreateWorkfile(plugin.HoudiniCreatorBase, AutoCreator):
task_name = self.create_context.get_current_task_name()
host_name = self.host_name
+ if current_instance is None:
+ current_instance_asset = None
+ elif AYON_SERVER_ENABLED:
+ current_instance_asset = current_instance["folderPath"]
+ else:
+ current_instance_asset = current_instance["asset"]
+
if current_instance is None:
asset_doc = get_asset_by_name(project_name, asset_name)
subset_name = self.get_subset_name(
variant, task_name, asset_doc, project_name, host_name
)
data = {
- "asset": asset_name,
"task": task_name,
"variant": variant
}
+ if AYON_SERVER_ENABLED:
+ data["folderPath"] = asset_name
+ else:
+ data["asset"] = asset_name
+
data.update(
self.get_dynamic_data(
variant, task_name, asset_doc,
@@ -51,15 +63,18 @@ class CreateWorkfile(plugin.HoudiniCreatorBase, AutoCreator):
)
self._add_instance_to_context(current_instance)
elif (
- current_instance["asset"] != asset_name
- or current_instance["task"] != task_name
+ current_instance_asset != asset_name
+ or current_instance["task"] != task_name
):
# Update instance context if is not the same
asset_doc = get_asset_by_name(project_name, asset_name)
subset_name = self.get_subset_name(
variant, task_name, asset_doc, project_name, host_name
)
- current_instance["asset"] = asset_name
+ if AYON_SERVER_ENABLED:
+ current_instance["folderPath"] = asset_name
+ else:
+ current_instance["asset"] = asset_name
current_instance["task"] = task_name
current_instance["subset"] = subset_name
diff --git a/openpype/hosts/houdini/plugins/load/load_image.py b/openpype/hosts/houdini/plugins/load/load_image.py
index 663a93e48b..cff2b74e52 100644
--- a/openpype/hosts/houdini/plugins/load/load_image.py
+++ b/openpype/hosts/houdini/plugins/load/load_image.py
@@ -119,7 +119,8 @@ class ImageLoader(load.LoaderPlugin):
if not parent.children():
parent.destroy()
- def _get_file_sequence(self, root):
+ def _get_file_sequence(self, file_path):
+ root = os.path.dirname(file_path)
files = sorted(os.listdir(root))
first_fname = files[0]
diff --git a/openpype/hosts/houdini/plugins/load/show_usdview.py b/openpype/hosts/houdini/plugins/load/show_usdview.py
index 7b03a0738a..d56c4acc4f 100644
--- a/openpype/hosts/houdini/plugins/load/show_usdview.py
+++ b/openpype/hosts/houdini/plugins/load/show_usdview.py
@@ -1,4 +1,5 @@
import os
+import platform
import subprocess
from openpype.lib.vendor_bin_utils import find_executable
@@ -8,17 +9,31 @@ from openpype.pipeline import load
class ShowInUsdview(load.LoaderPlugin):
"""Open USD file in usdview"""
- families = ["colorbleed.usd"]
label = "Show in usdview"
- representations = ["usd", "usda", "usdlc", "usdnc"]
- order = 10
+ representations = ["*"]
+ families = ["*"]
+ extensions = {"usd", "usda", "usdlc", "usdnc", "abc"}
+ order = 15
icon = "code-fork"
color = "white"
def load(self, context, name=None, namespace=None, data=None):
+ from pathlib import Path
- usdview = find_executable("usdview")
+ if platform.system() == "Windows":
+ executable = "usdview.bat"
+ else:
+ executable = "usdview"
+
+ usdview = find_executable(executable)
+ if not usdview:
+ raise RuntimeError("Unable to find usdview")
+
+ # For some reason Windows can return the path like:
+ # C:/PROGRA~1/SIDEEF~1/HOUDIN~1.435/bin/usdview
+ # convert to resolved path so `subprocess` can take it
+ usdview = str(Path(usdview).resolve().as_posix())
filepath = self.filepath_from_context(context)
filepath = os.path.normpath(filepath)
@@ -30,14 +45,4 @@ class ShowInUsdview(load.LoaderPlugin):
self.log.info("Start houdini variant of usdview...")
- # For now avoid some pipeline environment variables that initialize
- # Avalon in Houdini as it is redundant for usdview and slows boot time
- env = os.environ.copy()
- env.pop("PYTHONPATH", None)
- env.pop("HOUDINI_SCRIPT_PATH", None)
- env.pop("HOUDINI_MENU_PATH", None)
-
- # Force string to avoid unicode issues
- env = {str(key): str(value) for key, value in env.items()}
-
- subprocess.Popen([usdview, filepath, "--renderer", "GL"], env=env)
+ subprocess.Popen([usdview, filepath, "--renderer", "GL"])
diff --git a/openpype/hosts/houdini/plugins/publish/collect_arnold_rop.py b/openpype/hosts/houdini/plugins/publish/collect_arnold_rop.py
index 43b8428c60..ffc2a526a3 100644
--- a/openpype/hosts/houdini/plugins/publish/collect_arnold_rop.py
+++ b/openpype/hosts/houdini/plugins/publish/collect_arnold_rop.py
@@ -20,7 +20,9 @@ class CollectArnoldROPRenderProducts(pyblish.api.InstancePlugin):
"""
label = "Arnold ROP Render Products"
- order = pyblish.api.CollectorOrder + 0.4
+ # This specific order value is used so that
+ # this plugin runs after CollectFrames
+ order = pyblish.api.CollectorOrder + 0.11
hosts = ["houdini"]
families = ["arnold_rop"]
@@ -38,6 +40,25 @@ class CollectArnoldROPRenderProducts(pyblish.api.InstancePlugin):
default_prefix = evalParmNoFrame(rop, "ar_picture")
render_products = []
+ # Store whether we are splitting the render job (export + render)
+ split_render = bool(rop.parm("ar_ass_export_enable").eval())
+ instance.data["splitRender"] = split_render
+ export_prefix = None
+ export_products = []
+ if split_render:
+ export_prefix = evalParmNoFrame(
+ rop, "ar_ass_file", pad_character="0"
+ )
+ beauty_export_product = self.get_render_product_name(
+ prefix=export_prefix,
+ suffix=None)
+ export_products.append(beauty_export_product)
+ self.log.debug(
+ "Found export product: {}".format(beauty_export_product)
+ )
+ instance.data["ifdFile"] = beauty_export_product
+ instance.data["exportFiles"] = list(export_products)
+
# Default beauty AOV
beauty_product = self.get_render_product_name(prefix=default_prefix,
suffix=None)
@@ -126,8 +147,9 @@ class CollectArnoldROPRenderProducts(pyblish.api.InstancePlugin):
return path
expected_files = []
- start = instance.data["frameStart"]
- end = instance.data["frameEnd"]
+ start = instance.data["frameStartHandle"]
+ end = instance.data["frameEndHandle"]
+
for i in range(int(start), (int(end) + 1)):
expected_files.append(
os.path.join(dir, (file % i)).replace("\\", "/"))
diff --git a/openpype/hosts/houdini/plugins/publish/collect_asset_handles.py b/openpype/hosts/houdini/plugins/publish/collect_asset_handles.py
new file mode 100644
index 0000000000..67a281639d
--- /dev/null
+++ b/openpype/hosts/houdini/plugins/publish/collect_asset_handles.py
@@ -0,0 +1,124 @@
+# -*- coding: utf-8 -*-
+"""Collector plugin for frames data on ROP instances."""
+import hou # noqa
+import pyblish.api
+from openpype.lib import BoolDef
+from openpype.pipeline import OpenPypePyblishPluginMixin
+
+
+class CollectAssetHandles(pyblish.api.InstancePlugin,
+ OpenPypePyblishPluginMixin):
+ """Apply asset handles.
+
+ If instance does not have:
+ - frameStart
+ - frameEnd
+ - handleStart
+ - handleEnd
+ But it does have:
+ - frameStartHandle
+ - frameEndHandle
+
+ Then we will retrieve the asset's handles to compute
+ the exclusive frame range and actual handle ranges.
+ """
+
+ hosts = ["houdini"]
+
+ # This specific order value is used so that
+ # this plugin runs after CollectAnatomyInstanceData
+ order = pyblish.api.CollectorOrder + 0.499
+
+ label = "Collect Asset Handles"
+ use_asset_handles = True
+
+ def process(self, instance):
+ # Only process instances without already existing handles data
+ # but that do have frameStartHandle and frameEndHandle defined
+ # like the data collected from CollectRopFrameRange
+ if "frameStartHandle" not in instance.data:
+ return
+ if "frameEndHandle" not in instance.data:
+ return
+
+ has_existing_data = {
+ "handleStart",
+ "handleEnd",
+ "frameStart",
+ "frameEnd"
+ }.issubset(instance.data)
+ if has_existing_data:
+ return
+
+ attr_values = self.get_attr_values_from_data(instance.data)
+ if attr_values.get("use_handles", self.use_asset_handles):
+ asset_data = instance.data["assetEntity"]["data"]
+ handle_start = asset_data.get("handleStart", 0)
+ handle_end = asset_data.get("handleEnd", 0)
+ else:
+ handle_start = 0
+ handle_end = 0
+
+ frame_start = instance.data["frameStartHandle"] + handle_start
+ frame_end = instance.data["frameEndHandle"] - handle_end
+
+ instance.data.update({
+ "handleStart": handle_start,
+ "handleEnd": handle_end,
+ "frameStart": frame_start,
+ "frameEnd": frame_end
+ })
+
+ # Log debug message about the collected frame range
+ if attr_values.get("use_handles", self.use_asset_handles):
+ self.log.debug(
+ "Full Frame range with Handles "
+ "[{frame_start_handle} - {frame_end_handle}]"
+ .format(
+ frame_start_handle=instance.data["frameStartHandle"],
+ frame_end_handle=instance.data["frameEndHandle"]
+ )
+ )
+ else:
+ self.log.debug(
+ "Use handles is deactivated for this instance, "
+ "start and end handles are set to 0."
+ )
+
+ # Log collected frame range to the user
+ message = "Frame range [{frame_start} - {frame_end}]".format(
+ frame_start=frame_start,
+ frame_end=frame_end
+ )
+ if handle_start or handle_end:
+ message += " with handles [{handle_start}]-[{handle_end}]".format(
+ handle_start=handle_start,
+ handle_end=handle_end
+ )
+ self.log.info(message)
+
+ if instance.data.get("byFrameStep", 1.0) != 1.0:
+ self.log.info(
+ "Frame steps {}".format(instance.data["byFrameStep"]))
+
+ # Add frame range to label if the instance has a frame range.
+ label = instance.data.get("label", instance.data["name"])
+ instance.data["label"] = (
+ "{label} [{frame_start_handle} - {frame_end_handle}]"
+ .format(
+ label=label,
+ frame_start_handle=instance.data["frameStartHandle"],
+ frame_end_handle=instance.data["frameEndHandle"]
+ )
+ )
+
+ @classmethod
+ def get_attribute_defs(cls):
+ return [
+ BoolDef("use_handles",
+ tooltip="Disable this if you want the publisher to"
+ " ignore start and end handles specified in the"
+ " asset data for this publish instance",
+ default=cls.use_asset_handles,
+ label="Use asset handles")
+ ]
diff --git a/openpype/hosts/houdini/plugins/publish/collect_cache_farm.py b/openpype/hosts/houdini/plugins/publish/collect_cache_farm.py
new file mode 100644
index 0000000000..36ade32a35
--- /dev/null
+++ b/openpype/hosts/houdini/plugins/publish/collect_cache_farm.py
@@ -0,0 +1,75 @@
+import os
+import pyblish.api
+import hou
+from openpype.hosts.houdini.api import lib
+
+
+class CollectDataforCache(pyblish.api.InstancePlugin):
+ """Collect data for caching to Deadline."""
+
+ order = pyblish.api.CollectorOrder + 0.04
+ families = ["ass", "pointcache",
+ "mantraifd", "redshiftproxy",
+ "vdbcache"]
+ hosts = ["houdini"]
+ targets = ["local", "remote"]
+ label = "Collect Data for Cache"
+
+ def process(self, instance):
+ creator_attribute = instance.data["creator_attributes"]
+ farm_enabled = creator_attribute["farm"]
+ instance.data["farm"] = farm_enabled
+ if not farm_enabled:
+ self.log.debug("Caching on farm is disabled. "
+ "Skipping farm collecting.")
+ return
+ # Why do we need this particular collector to collect the expected
+ # output files from a ROP node. Don't we have a dedicated collector
+ # for that yet?
+ # Collect expected files
+ ropnode = hou.node(instance.data["instance_node"])
+ output_parm = lib.get_output_parameter(ropnode)
+ expected_filepath = output_parm.eval()
+ instance.data.setdefault("files", list())
+ instance.data.setdefault("expectedFiles", list())
+ if instance.data.get("frames"):
+ files = self.get_files(instance, expected_filepath)
+ # list of files
+ instance.data["files"].extend(files)
+ else:
+ # single file
+ instance.data["files"].append(output_parm.eval())
+ cache_files = {"_": instance.data["files"]}
+ # Convert instance family to pointcache if it is bgeo or abc
+ # because ???
+ for family in instance.data["families"]:
+ if family == "bgeo" or "abc":
+ instance.data["family"] = "pointcache"
+ break
+ instance.data.update({
+ "plugin": "Houdini",
+ "publish": True
+ })
+ instance.data["families"].append("publish.hou")
+ instance.data["expectedFiles"].append(cache_files)
+
+ self.log.debug("{}".format(instance.data))
+
+ def get_files(self, instance, output_parm):
+ """Get the files with the frame range data
+
+ Args:
+ instance (_type_): instance
+ output_parm (_type_): path of output parameter
+
+ Returns:
+ files: a list of files
+ """
+ directory = os.path.dirname(output_parm)
+
+ files = [
+ os.path.join(directory, frame).replace("\\", "/")
+ for frame in instance.data["frames"]
+ ]
+
+ return files
diff --git a/openpype/hosts/houdini/plugins/publish/collect_chunk_size.py b/openpype/hosts/houdini/plugins/publish/collect_chunk_size.py
new file mode 100644
index 0000000000..f8cd4089e2
--- /dev/null
+++ b/openpype/hosts/houdini/plugins/publish/collect_chunk_size.py
@@ -0,0 +1,33 @@
+import pyblish.api
+from openpype.lib import NumberDef
+from openpype.pipeline import OpenPypePyblishPluginMixin
+
+
+class CollectChunkSize(pyblish.api.InstancePlugin,
+ OpenPypePyblishPluginMixin):
+ """Collect chunk size for cache submission to Deadline."""
+
+ order = pyblish.api.CollectorOrder + 0.05
+ families = ["ass", "pointcache",
+ "vdbcache", "mantraifd",
+ "redshiftproxy"]
+ hosts = ["houdini"]
+ targets = ["local", "remote"]
+ label = "Collect Chunk Size"
+ chunk_size = 999999
+
+ def process(self, instance):
+ # need to get the chunk size info from the setting
+ attr_values = self.get_attr_values_from_data(instance.data)
+ instance.data["chunkSize"] = attr_values.get("chunkSize")
+
+ @classmethod
+ def get_attribute_defs(cls):
+ return [
+ NumberDef("chunkSize",
+ minimum=1,
+ maximum=999999,
+ decimals=0,
+ default=cls.chunk_size,
+ label="Frame Per Task")
+ ]
diff --git a/openpype/hosts/houdini/plugins/publish/collect_frames.py b/openpype/hosts/houdini/plugins/publish/collect_frames.py
index 01df809d4c..089fae6b1b 100644
--- a/openpype/hosts/houdini/plugins/publish/collect_frames.py
+++ b/openpype/hosts/houdini/plugins/publish/collect_frames.py
@@ -11,17 +11,20 @@ from openpype.hosts.houdini.api import lib
class CollectFrames(pyblish.api.InstancePlugin):
"""Collect all frames which would be saved from the ROP nodes"""
- order = pyblish.api.CollectorOrder + 0.01
+ # This specific order value is used so that
+ # this plugin runs after CollectRopFrameRange
+ order = pyblish.api.CollectorOrder + 0.1
label = "Collect Frames"
families = ["vdbcache", "imagesequence", "ass",
- "redshiftproxy", "review", "bgeo"]
+ "mantraifd", "redshiftproxy", "review",
+ "bgeo"]
def process(self, instance):
ropnode = hou.node(instance.data["instance_node"])
- start_frame = instance.data.get("frameStart", None)
- end_frame = instance.data.get("frameEnd", None)
+ start_frame = instance.data.get("frameStartHandle", None)
+ end_frame = instance.data.get("frameEndHandle", None)
output_parm = lib.get_output_parameter(ropnode)
if start_frame is not None:
diff --git a/openpype/hosts/houdini/plugins/publish/collect_instance_frame_data.py b/openpype/hosts/houdini/plugins/publish/collect_instance_frame_data.py
deleted file mode 100644
index 584343cd64..0000000000
--- a/openpype/hosts/houdini/plugins/publish/collect_instance_frame_data.py
+++ /dev/null
@@ -1,56 +0,0 @@
-import hou
-
-import pyblish.api
-
-
-class CollectInstanceNodeFrameRange(pyblish.api.InstancePlugin):
- """Collect time range frame data for the instance node."""
-
- order = pyblish.api.CollectorOrder + 0.001
- label = "Instance Node Frame Range"
- hosts = ["houdini"]
-
- def process(self, instance):
-
- node_path = instance.data.get("instance_node")
- node = hou.node(node_path) if node_path else None
- if not node_path or not node:
- self.log.debug("No instance node found for instance: "
- "{}".format(instance))
- return
-
- frame_data = self.get_frame_data(node)
- if not frame_data:
- return
-
- self.log.info("Collected time data: {}".format(frame_data))
- instance.data.update(frame_data)
-
- def get_frame_data(self, node):
- """Get the frame data: start frame, end frame and steps
- Args:
- node(hou.Node)
-
- Returns:
- dict
-
- """
-
- data = {}
-
- if node.parm("trange") is None:
- self.log.debug("Node has no 'trange' parameter: "
- "{}".format(node.path()))
- return data
-
- if node.evalParm("trange") == 0:
- # Ignore 'render current frame'
- self.log.debug("Node '{}' has 'Render current frame' set. "
- "Time range data ignored.".format(node.path()))
- return data
-
- data["frameStart"] = node.evalParm("f1")
- data["frameEnd"] = node.evalParm("f2")
- data["byFrameStep"] = node.evalParm("f3")
-
- return data
diff --git a/openpype/hosts/houdini/plugins/publish/collect_instances.py b/openpype/hosts/houdini/plugins/publish/collect_instances.py
index 3772c9e705..52966fb3c2 100644
--- a/openpype/hosts/houdini/plugins/publish/collect_instances.py
+++ b/openpype/hosts/houdini/plugins/publish/collect_instances.py
@@ -91,27 +91,3 @@ class CollectInstances(pyblish.api.ContextPlugin):
context[:] = sorted(context, key=sort_by_family)
return context
-
- def get_frame_data(self, node):
- """Get the frame data: start frame, end frame and steps
- Args:
- node(hou.Node)
-
- Returns:
- dict
-
- """
-
- data = {}
-
- if node.parm("trange") is None:
- return data
-
- if node.evalParm("trange") == 0:
- return data
-
- data["frameStart"] = node.evalParm("f1")
- data["frameEnd"] = node.evalParm("f2")
- data["byFrameStep"] = node.evalParm("f3")
-
- return data
diff --git a/openpype/hosts/houdini/plugins/publish/collect_instances_usd_layered.py b/openpype/hosts/houdini/plugins/publish/collect_instances_usd_layered.py
index 0600730d00..d154cdc7c0 100644
--- a/openpype/hosts/houdini/plugins/publish/collect_instances_usd_layered.py
+++ b/openpype/hosts/houdini/plugins/publish/collect_instances_usd_layered.py
@@ -122,10 +122,6 @@ class CollectInstancesUsdLayered(pyblish.api.ContextPlugin):
instance.data.update(save_data)
instance.data["usdLayer"] = layer
- # Don't allow the Pyblish `instanceToggled` we have installed
- # to set this node to bypass.
- instance.data["_allowToggleBypass"] = False
-
instances.append(instance)
# Store the collected ROP node dependencies
diff --git a/openpype/hosts/houdini/plugins/publish/collect_karma_rop.py b/openpype/hosts/houdini/plugins/publish/collect_karma_rop.py
index eabb1128d8..dac350a6ef 100644
--- a/openpype/hosts/houdini/plugins/publish/collect_karma_rop.py
+++ b/openpype/hosts/houdini/plugins/publish/collect_karma_rop.py
@@ -24,7 +24,9 @@ class CollectKarmaROPRenderProducts(pyblish.api.InstancePlugin):
"""
label = "Karma ROP Render Products"
- order = pyblish.api.CollectorOrder + 0.4
+ # This specific order value is used so that
+ # this plugin runs after CollectFrames
+ order = pyblish.api.CollectorOrder + 0.11
hosts = ["houdini"]
families = ["karma_rop"]
@@ -95,8 +97,9 @@ class CollectKarmaROPRenderProducts(pyblish.api.InstancePlugin):
return path
expected_files = []
- start = instance.data["frameStart"]
- end = instance.data["frameEnd"]
+ start = instance.data["frameStartHandle"]
+ end = instance.data["frameEndHandle"]
+
for i in range(int(start), (int(end) + 1)):
expected_files.append(
os.path.join(dir, (file % i)).replace("\\", "/"))
diff --git a/openpype/hosts/houdini/plugins/publish/collect_mantra_rop.py b/openpype/hosts/houdini/plugins/publish/collect_mantra_rop.py
index c4460f5350..64ef20f4e7 100644
--- a/openpype/hosts/houdini/plugins/publish/collect_mantra_rop.py
+++ b/openpype/hosts/houdini/plugins/publish/collect_mantra_rop.py
@@ -24,7 +24,9 @@ class CollectMantraROPRenderProducts(pyblish.api.InstancePlugin):
"""
label = "Mantra ROP Render Products"
- order = pyblish.api.CollectorOrder + 0.4
+ # This specific order value is used so that
+ # this plugin runs after CollectFrames
+ order = pyblish.api.CollectorOrder + 0.11
hosts = ["houdini"]
families = ["mantra_rop"]
@@ -42,6 +44,25 @@ class CollectMantraROPRenderProducts(pyblish.api.InstancePlugin):
default_prefix = evalParmNoFrame(rop, "vm_picture")
render_products = []
+ # Store whether we are splitting the render job (export + render)
+ split_render = bool(rop.parm("soho_outputmode").eval())
+ instance.data["splitRender"] = split_render
+ export_prefix = None
+ export_products = []
+ if split_render:
+ export_prefix = evalParmNoFrame(
+ rop, "soho_diskfile", pad_character="0"
+ )
+ beauty_export_product = self.get_render_product_name(
+ prefix=export_prefix,
+ suffix=None)
+ export_products.append(beauty_export_product)
+ self.log.debug(
+ "Found export product: {}".format(beauty_export_product)
+ )
+ instance.data["ifdFile"] = beauty_export_product
+ instance.data["exportFiles"] = list(export_products)
+
# Default beauty AOV
beauty_product = self.get_render_product_name(
prefix=default_prefix, suffix=None
@@ -118,8 +139,9 @@ class CollectMantraROPRenderProducts(pyblish.api.InstancePlugin):
return path
expected_files = []
- start = instance.data["frameStart"]
- end = instance.data["frameEnd"]
+ start = instance.data["frameStartHandle"]
+ end = instance.data["frameEndHandle"]
+
for i in range(int(start), (int(end) + 1)):
expected_files.append(
os.path.join(dir, (file % i)).replace("\\", "/"))
diff --git a/openpype/hosts/houdini/plugins/publish/collect_redshift_rop.py b/openpype/hosts/houdini/plugins/publish/collect_redshift_rop.py
index dbb15ab88f..0acddab011 100644
--- a/openpype/hosts/houdini/plugins/publish/collect_redshift_rop.py
+++ b/openpype/hosts/houdini/plugins/publish/collect_redshift_rop.py
@@ -24,7 +24,9 @@ class CollectRedshiftROPRenderProducts(pyblish.api.InstancePlugin):
"""
label = "Redshift ROP Render Products"
- order = pyblish.api.CollectorOrder + 0.4
+ # This specific order value is used so that
+ # this plugin runs after CollectFrames
+ order = pyblish.api.CollectorOrder + 0.11
hosts = ["houdini"]
families = ["redshift_rop"]
@@ -132,8 +134,9 @@ class CollectRedshiftROPRenderProducts(pyblish.api.InstancePlugin):
return path
expected_files = []
- start = instance.data["frameStart"]
- end = instance.data["frameEnd"]
+ start = instance.data["frameStartHandle"]
+ end = instance.data["frameEndHandle"]
+
for i in range(int(start), (int(end) + 1)):
expected_files.append(
os.path.join(dir, (file % i)).replace("\\", "/"))
diff --git a/openpype/hosts/houdini/plugins/publish/collect_review_data.py b/openpype/hosts/houdini/plugins/publish/collect_review_data.py
index 3efb75e66c..9671945b9a 100644
--- a/openpype/hosts/houdini/plugins/publish/collect_review_data.py
+++ b/openpype/hosts/houdini/plugins/publish/collect_review_data.py
@@ -6,6 +6,8 @@ class CollectHoudiniReviewData(pyblish.api.InstancePlugin):
"""Collect Review Data."""
label = "Collect Review Data"
+ # This specific order value is used so that
+ # this plugin runs after CollectRopFrameRange
order = pyblish.api.CollectorOrder + 0.1
hosts = ["houdini"]
families = ["review"]
@@ -41,8 +43,8 @@ class CollectHoudiniReviewData(pyblish.api.InstancePlugin):
return
if focal_length_parm.isTimeDependent():
- start = instance.data["frameStart"]
- end = instance.data["frameEnd"] + 1
+ start = instance.data["frameStartHandle"]
+ end = instance.data["frameEndHandle"] + 1
focal_length = [
focal_length_parm.evalAsFloatAtFrame(t)
for t in range(int(start), int(end))
diff --git a/openpype/hosts/houdini/plugins/publish/collect_rop_frame_range.py b/openpype/hosts/houdini/plugins/publish/collect_rop_frame_range.py
index 2a6be6b9f1..1e6bc3b16e 100644
--- a/openpype/hosts/houdini/plugins/publish/collect_rop_frame_range.py
+++ b/openpype/hosts/houdini/plugins/publish/collect_rop_frame_range.py
@@ -8,6 +8,7 @@ from openpype.hosts.houdini.api import lib
class CollectRopFrameRange(pyblish.api.InstancePlugin):
"""Collect all frames which would be saved from the ROP nodes"""
+ hosts = ["houdini"]
order = pyblish.api.CollectorOrder
label = "Collect RopNode Frame Range"
@@ -16,26 +17,22 @@ class CollectRopFrameRange(pyblish.api.InstancePlugin):
node_path = instance.data.get("instance_node")
if node_path is None:
# Instance without instance node like a workfile instance
+ self.log.debug(
+ "No instance node found for instance: {}".format(instance)
+ )
return
ropnode = hou.node(node_path)
- frame_data = lib.get_frame_data(ropnode)
+ frame_data = lib.get_frame_data(
+ ropnode, self.log
+ )
- if "frameStart" in frame_data and "frameEnd" in frame_data:
+ if not frame_data:
+ return
- # Log artist friendly message about the collected frame range
- message = (
- "Frame range {0[frameStart]} - {0[frameEnd]}"
- ).format(frame_data)
- if frame_data.get("step", 1.0) != 1.0:
- message += " with step {0[step]}".format(frame_data)
- self.log.info(message)
+ # Log debug message about the collected frame range
+ self.log.debug(
+ "Collected frame_data: {}".format(frame_data)
+ )
- instance.data.update(frame_data)
-
- # Add frame range to label if the instance has a frame range.
- label = instance.data.get("label", instance.data["name"])
- instance.data["label"] = (
- "{0} [{1[frameStart]} - {1[frameEnd]}]".format(label,
- frame_data)
- )
+ instance.data.update(frame_data)
diff --git a/openpype/hosts/houdini/plugins/publish/collect_usd_bootstrap.py b/openpype/hosts/houdini/plugins/publish/collect_usd_bootstrap.py
index 14a8e3c056..462cf99b9c 100644
--- a/openpype/hosts/houdini/plugins/publish/collect_usd_bootstrap.py
+++ b/openpype/hosts/houdini/plugins/publish/collect_usd_bootstrap.py
@@ -1,6 +1,10 @@
import pyblish.api
-from openpype.client import get_subset_by_name, get_asset_by_name
+from openpype.client import (
+ get_subset_by_name,
+ get_asset_by_name,
+ get_asset_name_identifier,
+)
import openpype.lib.usdlib as usdlib
@@ -51,8 +55,9 @@ class CollectUsdBootstrap(pyblish.api.InstancePlugin):
self.log.debug("Add bootstrap for: %s" % bootstrap)
project_name = instance.context.data["projectName"]
- asset = get_asset_by_name(project_name, instance.data["asset"])
- assert asset, "Asset must exist: %s" % asset
+ asset_name = instance.data["asset"]
+ asset_doc = get_asset_by_name(project_name, asset_name)
+ assert asset_doc, "Asset must exist: %s" % asset_name
# Check which are not about to be created and don't exist yet
required = {"shot": ["usdShot"], "asset": ["usdAsset"]}.get(bootstrap)
@@ -67,19 +72,21 @@ class CollectUsdBootstrap(pyblish.api.InstancePlugin):
required += list(layers)
self.log.debug("Checking required bootstrap: %s" % required)
- for subset in required:
- if self._subset_exists(project_name, instance, subset, asset):
+ for subset_name in required:
+ if self._subset_exists(
+ project_name, instance, subset_name, asset_doc
+ ):
continue
self.log.debug(
"Creating {0} USD bootstrap: {1} {2}".format(
- bootstrap, asset["name"], subset
+ bootstrap, asset_name, subset_name
)
)
- new = instance.context.create_instance(subset)
- new.data["subset"] = subset
- new.data["label"] = "{0} ({1})".format(subset, asset["name"])
+ new = instance.context.create_instance(subset_name)
+ new.data["subset"] = subset_name
+ new.data["label"] = "{0} ({1})".format(subset_name, asset_name)
new.data["family"] = "usd.bootstrap"
new.data["comment"] = "Automated bootstrap USD file."
new.data["publishFamilies"] = ["usd"]
@@ -91,21 +98,23 @@ class CollectUsdBootstrap(pyblish.api.InstancePlugin):
for key in ["asset"]:
new.data[key] = instance.data[key]
- def _subset_exists(self, project_name, instance, subset, asset):
+ def _subset_exists(self, project_name, instance, subset_name, asset_doc):
"""Return whether subset exists in current context or in database."""
# Allow it to be created during this publish session
context = instance.context
+
+ asset_doc_name = get_asset_name_identifier(asset_doc)
for inst in context:
if (
- inst.data["subset"] == subset
- and inst.data["asset"] == asset["name"]
+ inst.data["subset"] == subset_name
+ and inst.data["asset"] == asset_doc_name
):
return True
# Or, if they already exist in the database we can
# skip them too.
if get_subset_by_name(
- project_name, subset, asset["_id"], fields=["_id"]
+ project_name, subset_name, asset_doc["_id"], fields=["_id"]
):
return True
return False
diff --git a/openpype/hosts/houdini/plugins/publish/collect_vray_rop.py b/openpype/hosts/houdini/plugins/publish/collect_vray_rop.py
index 277f922ba4..ad4fdb0da5 100644
--- a/openpype/hosts/houdini/plugins/publish/collect_vray_rop.py
+++ b/openpype/hosts/houdini/plugins/publish/collect_vray_rop.py
@@ -24,7 +24,9 @@ class CollectVrayROPRenderProducts(pyblish.api.InstancePlugin):
"""
label = "VRay ROP Render Products"
- order = pyblish.api.CollectorOrder + 0.4
+ # This specific order value is used so that
+ # this plugin runs after CollectFrames
+ order = pyblish.api.CollectorOrder + 0.11
hosts = ["houdini"]
families = ["vray_rop"]
@@ -43,7 +45,26 @@ class CollectVrayROPRenderProducts(pyblish.api.InstancePlugin):
render_products = []
# TODO: add render elements if render element
- beauty_product = self.get_beauty_render_product(default_prefix)
+ # Store whether we are splitting the render job in an export + render
+ split_render = rop.parm("render_export_mode").eval() == "2"
+ instance.data["splitRender"] = split_render
+ export_prefix = None
+ export_products = []
+ if split_render:
+ export_prefix = evalParmNoFrame(
+ rop, "render_export_filepath", pad_character="0"
+ )
+ beauty_export_product = self.get_render_product_name(
+ prefix=export_prefix,
+ suffix=None)
+ export_products.append(beauty_export_product)
+ self.log.debug(
+ "Found export product: {}".format(beauty_export_product)
+ )
+ instance.data["ifdFile"] = beauty_export_product
+ instance.data["exportFiles"] = list(export_products)
+
+ beauty_product = self.get_render_product_name(default_prefix)
render_products.append(beauty_product)
files_by_aov = {
"RGB Color": self.generate_expected_files(instance,
@@ -77,7 +98,7 @@ class CollectVrayROPRenderProducts(pyblish.api.InstancePlugin):
instance.data["colorspaceDisplay"] = colorspace_data["display"]
instance.data["colorspaceView"] = colorspace_data["view"]
- def get_beauty_render_product(self, prefix, suffix=""):
+ def get_render_product_name(self, prefix, suffix=""):
"""Return the beauty output filename if render element enabled
"""
# Remove aov suffix from the product: `prefix.aov_suffix` -> `prefix`
@@ -115,8 +136,9 @@ class CollectVrayROPRenderProducts(pyblish.api.InstancePlugin):
return path
expected_files = []
- start = instance.data["frameStart"]
- end = instance.data["frameEnd"]
+ start = instance.data["frameStartHandle"]
+ end = instance.data["frameEndHandle"]
+
for i in range(int(start), (int(end) + 1)):
expected_files.append(
os.path.join(dir, (file % i)).replace("\\", "/"))
diff --git a/openpype/hosts/houdini/plugins/publish/extract_alembic.py b/openpype/hosts/houdini/plugins/publish/extract_alembic.py
index bdd19b23d4..df2fdda241 100644
--- a/openpype/hosts/houdini/plugins/publish/extract_alembic.py
+++ b/openpype/hosts/houdini/plugins/publish/extract_alembic.py
@@ -14,8 +14,12 @@ class ExtractAlembic(publish.Extractor):
label = "Extract Alembic"
hosts = ["houdini"]
families = ["abc", "camera"]
+ targets = ["local", "remote"]
def process(self, instance):
+ if instance.data.get("farm"):
+ self.log.debug("Should be processed on farm, skipping.")
+ return
ropnode = hou.node(instance.data["instance_node"])
diff --git a/openpype/hosts/houdini/plugins/publish/extract_ass.py b/openpype/hosts/houdini/plugins/publish/extract_ass.py
index 0d246625ba..28dd08f999 100644
--- a/openpype/hosts/houdini/plugins/publish/extract_ass.py
+++ b/openpype/hosts/houdini/plugins/publish/extract_ass.py
@@ -14,9 +14,12 @@ class ExtractAss(publish.Extractor):
label = "Extract Ass"
families = ["ass"]
hosts = ["houdini"]
+ targets = ["local", "remote"]
def process(self, instance):
-
+ if instance.data.get("farm"):
+ self.log.debug("Should be processed on farm, skipping.")
+ return
ropnode = hou.node(instance.data["instance_node"])
# Get the filename from the filename parameter
@@ -56,7 +59,7 @@ class ExtractAss(publish.Extractor):
'ext': ext,
"files": files,
"stagingDir": staging_dir,
- "frameStart": instance.data["frameStart"],
- "frameEnd": instance.data["frameEnd"],
+ "frameStart": instance.data["frameStartHandle"],
+ "frameEnd": instance.data["frameEndHandle"],
}
instance.data["representations"].append(representation)
diff --git a/openpype/hosts/houdini/plugins/publish/extract_bgeo.py b/openpype/hosts/houdini/plugins/publish/extract_bgeo.py
index c9625ec880..a3840f8f73 100644
--- a/openpype/hosts/houdini/plugins/publish/extract_bgeo.py
+++ b/openpype/hosts/houdini/plugins/publish/extract_bgeo.py
@@ -17,7 +17,9 @@ class ExtractBGEO(publish.Extractor):
families = ["bgeo"]
def process(self, instance):
-
+ if instance.data.get("farm"):
+ self.log.debug("Should be processed on farm, skipping.")
+ return
ropnode = hou.node(instance.data["instance_node"])
# Get the filename from the filename parameter
@@ -47,7 +49,7 @@ class ExtractBGEO(publish.Extractor):
"ext": ext.lstrip("."),
"files": output,
"stagingDir": staging_dir,
- "frameStart": instance.data["frameStart"],
- "frameEnd": instance.data["frameEnd"]
+ "frameStart": instance.data["frameStartHandle"],
+ "frameEnd": instance.data["frameEndHandle"]
}
instance.data["representations"].append(representation)
diff --git a/openpype/hosts/houdini/plugins/publish/extract_composite.py b/openpype/hosts/houdini/plugins/publish/extract_composite.py
index 7a1ab36b93..11cf83a46d 100644
--- a/openpype/hosts/houdini/plugins/publish/extract_composite.py
+++ b/openpype/hosts/houdini/plugins/publish/extract_composite.py
@@ -41,8 +41,8 @@ class ExtractComposite(publish.Extractor):
"ext": ext,
"files": output,
"stagingDir": staging_dir,
- "frameStart": instance.data["frameStart"],
- "frameEnd": instance.data["frameEnd"],
+ "frameStart": instance.data["frameStartHandle"],
+ "frameEnd": instance.data["frameEndHandle"],
}
from pprint import pformat
diff --git a/openpype/hosts/houdini/plugins/publish/extract_fbx.py b/openpype/hosts/houdini/plugins/publish/extract_fbx.py
index 7993b3352f..7dc193c6a9 100644
--- a/openpype/hosts/houdini/plugins/publish/extract_fbx.py
+++ b/openpype/hosts/houdini/plugins/publish/extract_fbx.py
@@ -40,9 +40,9 @@ class ExtractFBX(publish.Extractor):
}
# A single frame may also be rendered without start/end frame.
- if "frameStart" in instance.data and "frameEnd" in instance.data:
- representation["frameStart"] = instance.data["frameStart"]
- representation["frameEnd"] = instance.data["frameEnd"]
+ if "frameStartHandle" in instance.data and "frameEndHandle" in instance.data: # noqa
+ representation["frameStart"] = instance.data["frameStartHandle"]
+ representation["frameEnd"] = instance.data["frameEndHandle"]
# set value type for 'representations' key to list
if "representations" not in instance.data:
diff --git a/openpype/hosts/houdini/plugins/publish/extract_mantra_ifd.py b/openpype/hosts/houdini/plugins/publish/extract_mantra_ifd.py
new file mode 100644
index 0000000000..894260d1bf
--- /dev/null
+++ b/openpype/hosts/houdini/plugins/publish/extract_mantra_ifd.py
@@ -0,0 +1,51 @@
+import os
+
+import pyblish.api
+
+from openpype.pipeline import publish
+
+import hou
+
+
+class ExtractMantraIFD(publish.Extractor):
+
+ order = pyblish.api.ExtractorOrder
+ label = "Extract Mantra ifd"
+ hosts = ["houdini"]
+ families = ["mantraifd"]
+ targets = ["local", "remote"]
+
+ def process(self, instance):
+ if instance.data.get("farm"):
+ self.log.debug("Should be processed on farm, skipping.")
+ return
+
+ ropnode = hou.node(instance.data.get("instance_node"))
+ output = ropnode.evalParm("soho_diskfile")
+ staging_dir = os.path.dirname(output)
+ instance.data["stagingDir"] = staging_dir
+
+ files = instance.data["frames"]
+ missing_frames = [
+ frame
+ for frame in instance.data["frames"]
+ if not os.path.exists(
+ os.path.normpath(os.path.join(staging_dir, frame)))
+ ]
+ if missing_frames:
+ raise RuntimeError("Failed to complete Mantra ifd extraction. "
+ "Missing output files: {}".format(
+ missing_frames))
+
+ if "representations" not in instance.data:
+ instance.data["representations"] = []
+
+ representation = {
+ 'name': 'ifd',
+ 'ext': 'ifd',
+ 'files': files,
+ "stagingDir": staging_dir,
+ "frameStart": instance.data["frameStart"],
+ "frameEnd": instance.data["frameEnd"],
+ }
+ instance.data["representations"].append(representation)
diff --git a/openpype/hosts/houdini/plugins/publish/extract_opengl.py b/openpype/hosts/houdini/plugins/publish/extract_opengl.py
index 6c36dec5f5..38808089ac 100644
--- a/openpype/hosts/houdini/plugins/publish/extract_opengl.py
+++ b/openpype/hosts/houdini/plugins/publish/extract_opengl.py
@@ -39,8 +39,8 @@ class ExtractOpenGL(publish.Extractor):
"ext": instance.data["imageFormat"],
"files": output,
"stagingDir": staging_dir,
- "frameStart": instance.data["frameStart"],
- "frameEnd": instance.data["frameEnd"],
+ "frameStart": instance.data["frameStartHandle"],
+ "frameEnd": instance.data["frameEndHandle"],
"tags": tags,
"preview": True,
"camera_name": instance.data.get("review_camera")
diff --git a/openpype/hosts/houdini/plugins/publish/extract_redshift_proxy.py b/openpype/hosts/houdini/plugins/publish/extract_redshift_proxy.py
index 1d99ac665c..218f3b9256 100644
--- a/openpype/hosts/houdini/plugins/publish/extract_redshift_proxy.py
+++ b/openpype/hosts/houdini/plugins/publish/extract_redshift_proxy.py
@@ -14,9 +14,12 @@ class ExtractRedshiftProxy(publish.Extractor):
label = "Extract Redshift Proxy"
families = ["redshiftproxy"]
hosts = ["houdini"]
+ targets = ["local", "remote"]
def process(self, instance):
-
+ if instance.data.get("farm"):
+ self.log.debug("Should be processed on farm, skipping.")
+ return
ropnode = hou.node(instance.data.get("instance_node"))
# Get the filename from the filename parameter
@@ -44,8 +47,8 @@ class ExtractRedshiftProxy(publish.Extractor):
}
# A single frame may also be rendered without start/end frame.
- if "frameStart" in instance.data and "frameEnd" in instance.data:
- representation["frameStart"] = instance.data["frameStart"]
- representation["frameEnd"] = instance.data["frameEnd"]
+ if "frameStartHandle" in instance.data and "frameEndHandle" in instance.data: # noqa
+ representation["frameStart"] = instance.data["frameStartHandle"]
+ representation["frameEnd"] = instance.data["frameEndHandle"]
instance.data["representations"].append(representation)
diff --git a/openpype/hosts/houdini/plugins/publish/extract_vdb_cache.py b/openpype/hosts/houdini/plugins/publish/extract_vdb_cache.py
index 4bca758f08..8ac16704f0 100644
--- a/openpype/hosts/houdini/plugins/publish/extract_vdb_cache.py
+++ b/openpype/hosts/houdini/plugins/publish/extract_vdb_cache.py
@@ -16,7 +16,9 @@ class ExtractVDBCache(publish.Extractor):
hosts = ["houdini"]
def process(self, instance):
-
+ if instance.data.get("farm"):
+ self.log.debug("Should be processed on farm, skipping.")
+ return
ropnode = hou.node(instance.data["instance_node"])
# Get the filename from the filename parameter
@@ -40,7 +42,7 @@ class ExtractVDBCache(publish.Extractor):
"ext": "vdb",
"files": output,
"stagingDir": staging_dir,
- "frameStart": instance.data["frameStart"],
- "frameEnd": instance.data["frameEnd"],
+ "frameStart": instance.data["frameStartHandle"],
+ "frameEnd": instance.data["frameEndHandle"],
}
instance.data["representations"].append(representation)
diff --git a/openpype/hosts/houdini/plugins/publish/increment_current_file.py b/openpype/hosts/houdini/plugins/publish/increment_current_file.py
index 3569de7693..4788cca3cf 100644
--- a/openpype/hosts/houdini/plugins/publish/increment_current_file.py
+++ b/openpype/hosts/houdini/plugins/publish/increment_current_file.py
@@ -22,7 +22,8 @@ class IncrementCurrentFile(pyblish.api.ContextPlugin):
"arnold_rop",
"mantra_rop",
"karma_rop",
- "usdrender"]
+ "usdrender",
+ "publish.hou"]
optional = True
def process(self, context):
diff --git a/openpype/hosts/houdini/plugins/publish/validate_frame_range.py b/openpype/hosts/houdini/plugins/publish/validate_frame_range.py
new file mode 100644
index 0000000000..1b12fa7096
--- /dev/null
+++ b/openpype/hosts/houdini/plugins/publish/validate_frame_range.py
@@ -0,0 +1,106 @@
+# -*- coding: utf-8 -*-
+import pyblish.api
+from openpype.pipeline import PublishValidationError
+from openpype.pipeline.publish import RepairAction
+from openpype.hosts.houdini.api.action import SelectInvalidAction
+
+import hou
+
+
+class DisableUseAssetHandlesAction(RepairAction):
+ label = "Disable use asset handles"
+ icon = "mdi.toggle-switch-off"
+
+
+class ValidateFrameRange(pyblish.api.InstancePlugin):
+ """Validate Frame Range.
+
+ Due to the usage of start and end handles,
+ then Frame Range must be >= (start handle + end handle)
+ which results that frameEnd be smaller than frameStart
+ """
+
+ order = pyblish.api.ValidatorOrder - 0.1
+ hosts = ["houdini"]
+ label = "Validate Frame Range"
+ actions = [DisableUseAssetHandlesAction, SelectInvalidAction]
+
+ def process(self, instance):
+
+ invalid = self.get_invalid(instance)
+ if invalid:
+ raise PublishValidationError(
+ title="Invalid Frame Range",
+ message=(
+ "Invalid frame range because the instance "
+ "start frame ({0[frameStart]}) is higher than "
+ "the end frame ({0[frameEnd]})"
+ .format(instance.data)
+ ),
+ description=(
+ "## Invalid Frame Range\n"
+ "The frame range for the instance is invalid because "
+ "the start frame is higher than the end frame.\n\nThis "
+ "is likely due to asset handles being applied to your "
+ "instance or the ROP node's start frame "
+ "is set higher than the end frame.\n\nIf your ROP frame "
+ "range is correct and you do not want to apply asset "
+ "handles make sure to disable Use asset handles on the "
+ "publish instance."
+ )
+ )
+
+ @classmethod
+ def get_invalid(cls, instance):
+
+ if not instance.data.get("instance_node"):
+ return
+
+ rop_node = hou.node(instance.data["instance_node"])
+ frame_start = instance.data.get("frameStart")
+ frame_end = instance.data.get("frameEnd")
+
+ if frame_start is None or frame_end is None:
+ cls.log.debug(
+ "Skipping frame range validation for "
+ "instance without frame data: {}".format(rop_node.path())
+ )
+ return
+
+ if frame_start > frame_end:
+ cls.log.info(
+ "The ROP node render range is set to "
+ "{0[frameStartHandle]} - {0[frameEndHandle]} "
+ "The asset handles applied to the instance are start handle "
+ "{0[handleStart]} and end handle {0[handleEnd]}"
+ .format(instance.data)
+ )
+ return [rop_node]
+
+ @classmethod
+ def repair(cls, instance):
+
+ if not cls.get_invalid(instance):
+ # Already fixed
+ return
+
+ # Disable use asset handles
+ context = instance.context
+ create_context = context.data["create_context"]
+ instance_id = instance.data.get("instance_id")
+ if not instance_id:
+ cls.log.debug("'{}' must have instance id"
+ .format(instance))
+ return
+
+ created_instance = create_context.get_instance_by_id(instance_id)
+ if not instance_id:
+ cls.log.debug("Unable to find instance '{}' by id"
+ .format(instance))
+ return
+
+ created_instance.publish_attributes["CollectAssetHandles"]["use_handles"] = False # noqa
+
+ create_context.save_changes()
+ cls.log.debug("use asset handles is turned off for '{}'"
+ .format(instance))
diff --git a/openpype/hosts/houdini/plugins/publish/validate_houdini_license_category.py b/openpype/hosts/houdini/plugins/publish/validate_houdini_license_category.py
index f1c52f22c1..108a700bbe 100644
--- a/openpype/hosts/houdini/plugins/publish/validate_houdini_license_category.py
+++ b/openpype/hosts/houdini/plugins/publish/validate_houdini_license_category.py
@@ -1,32 +1,39 @@
# -*- coding: utf-8 -*-
import pyblish.api
from openpype.pipeline import PublishValidationError
+import hou
-class ValidateHoudiniCommercialLicense(pyblish.api.InstancePlugin):
- """Validate the Houdini instance runs a Commercial license.
+class ValidateHoudiniNotApprenticeLicense(pyblish.api.InstancePlugin):
+ """Validate the Houdini instance runs a non Apprentice license.
- When extracting USD files from a non-commercial Houdini license, even with
- Houdini Indie license, the resulting files will get "scrambled" with
- a license protection and get a special .usdnc or .usdlc suffix.
+ USD ROPs:
+ When extracting USD files from an apprentice Houdini license,
+ the resulting files will get "scrambled" with a license protection
+ and get a special .usdnc suffix.
- This currently breaks the Subset/representation pipeline so we disallow
- any publish with those licenses. Only the commercial license is valid.
+ This currently breaks the Subset/representation pipeline so we disallow
+ any publish with apprentice license.
+ Alembic ROPs:
+ Houdini Apprentice does not export Alembic.
"""
order = pyblish.api.ValidatorOrder
- families = ["usd"]
+ families = ["usd", "abc", "fbx", "camera"]
hosts = ["houdini"]
- label = "Houdini Commercial License"
+ label = "Houdini Apprentice License"
def process(self, instance):
- import hou
+ if hou.isApprentice():
+ # Find which family was matched with the plug-in
+ families = {instance.data["family"]}
+ families.update(instance.data.get("families", []))
+ disallowed_families = families.intersection(self.families)
+ families = " ".join(sorted(disallowed_families)).title()
- license = hou.licenseCategory()
- if license != hou.licenseCategoryType.Commercial:
raise PublishValidationError(
- ("USD Publishing requires a full Commercial "
- "license. You are on: {}").format(license),
+ "{} publishing requires a non apprentice license."
+ .format(families),
title=self.label)
diff --git a/openpype/hosts/houdini/plugins/publish/validate_subset_name.py b/openpype/hosts/houdini/plugins/publish/validate_subset_name.py
index bb3648f361..7bed74ebb1 100644
--- a/openpype/hosts/houdini/plugins/publish/validate_subset_name.py
+++ b/openpype/hosts/houdini/plugins/publish/validate_subset_name.py
@@ -54,12 +54,13 @@ class ValidateSubsetName(pyblish.api.InstancePlugin,
rop_node = hou.node(instance.data["instance_node"])
# Check subset name
+ asset_doc = instance.data["assetEntity"]
subset_name = get_subset_name(
family=instance.data["family"],
variant=instance.data["variant"],
task_name=instance.data["task"],
- asset_doc=instance.data["assetEntity"],
- dynamic_data={"asset": instance.data["asset"]}
+ asset_doc=asset_doc,
+ dynamic_data={"asset": asset_doc["name"]}
)
if instance.data.get("subset") != subset_name:
@@ -76,12 +77,13 @@ class ValidateSubsetName(pyblish.api.InstancePlugin,
rop_node = hou.node(instance.data["instance_node"])
# Check subset name
+ asset_doc = instance.data["assetEntity"]
subset_name = get_subset_name(
family=instance.data["family"],
variant=instance.data["variant"],
task_name=instance.data["task"],
- asset_doc=instance.data["assetEntity"],
- dynamic_data={"asset": instance.data["asset"]}
+ asset_doc=asset_doc,
+ dynamic_data={"asset": asset_doc["name"]}
)
instance.data["subset"] = subset_name
diff --git a/openpype/hosts/houdini/startup/MainMenuCommon.xml b/openpype/hosts/houdini/startup/MainMenuCommon.xml
index b2e32a70f9..0903aef7bc 100644
--- a/openpype/hosts/houdini/startup/MainMenuCommon.xml
+++ b/openpype/hosts/houdini/startup/MainMenuCommon.xml
@@ -4,7 +4,7 @@