mirror of
https://github.com/ynput/ayon-core.git
synced 2025-12-24 21:04:40 +01:00
Merge remote-tracking branch 'upstream/develop' into maya_new_publisher
# Conflicts: # openpype/hosts/maya/plugins/create/create_look.py # openpype/hosts/maya/plugins/create/create_review.py # openpype/hosts/maya/plugins/publish/collect_instances.py # openpype/hosts/maya/plugins/publish/validate_attributes.py # openpype/hosts/maya/plugins/publish/validate_frame_range.py # openpype/hosts/maya/plugins/publish/validate_maya_units.py # openpype/hosts/maya/plugins/publish/validate_mesh_overlapping_uvs.py # openpype/modules/deadline/plugins/publish/collect_pools.py
This commit is contained in:
parent
fd6285399e
commit
14d767a97a
295 changed files with 10651 additions and 8683 deletions
15
.github/pr-branch-labeler.yml
vendored
Normal file
15
.github/pr-branch-labeler.yml
vendored
Normal file
|
|
@ -0,0 +1,15 @@
|
|||
# Apply label "feature" if head matches "feature/*"
|
||||
'type: feature':
|
||||
head: "feature/*"
|
||||
|
||||
# Apply label "feature" if head matches "feature/*"
|
||||
'type: enhancement':
|
||||
head: "enhancement/*"
|
||||
|
||||
# Apply label "bugfix" if head matches one of "bugfix/*" or "hotfix/*"
|
||||
'type: bug':
|
||||
head: ["bugfix/*", "hotfix/*"]
|
||||
|
||||
# Apply label "release" if base matches "release/*"
|
||||
'Bump Minor':
|
||||
base: "release/next-minor"
|
||||
102
.github/pr-glob-labeler.yml
vendored
Normal file
102
.github/pr-glob-labeler.yml
vendored
Normal file
|
|
@ -0,0 +1,102 @@
|
|||
# Add type: unittest label if any changes in tests folders
|
||||
'type: unittest':
|
||||
- '*/*tests*/**/*'
|
||||
|
||||
# any changes in documentation structure
|
||||
'type: documentation':
|
||||
- '*/**/*website*/**/*'
|
||||
- '*/**/*docs*/**/*'
|
||||
|
||||
# hosts triage
|
||||
'host: Nuke':
|
||||
- '*/**/*nuke*'
|
||||
- '*/**/*nuke*/**/*'
|
||||
|
||||
'host: Photoshop':
|
||||
- '*/**/*photoshop*'
|
||||
- '*/**/*photoshop*/**/*'
|
||||
|
||||
'host: Harmony':
|
||||
- '*/**/*harmony*'
|
||||
- '*/**/*harmony*/**/*'
|
||||
|
||||
'host: UE':
|
||||
- '*/**/*unreal*'
|
||||
- '*/**/*unreal*/**/*'
|
||||
|
||||
'host: Houdini':
|
||||
- '*/**/*houdini*'
|
||||
- '*/**/*houdini*/**/*'
|
||||
|
||||
'host: Maya':
|
||||
- '*/**/*maya*'
|
||||
- '*/**/*maya*/**/*'
|
||||
|
||||
'host: Resolve':
|
||||
- '*/**/*resolve*'
|
||||
- '*/**/*resolve*/**/*'
|
||||
|
||||
'host: Blender':
|
||||
- '*/**/*blender*'
|
||||
- '*/**/*blender*/**/*'
|
||||
|
||||
'host: Hiero':
|
||||
- '*/**/*hiero*'
|
||||
- '*/**/*hiero*/**/*'
|
||||
|
||||
'host: Fusion':
|
||||
- '*/**/*fusion*'
|
||||
- '*/**/*fusion*/**/*'
|
||||
|
||||
'host: Flame':
|
||||
- '*/**/*flame*'
|
||||
- '*/**/*flame*/**/*'
|
||||
|
||||
'host: TrayPublisher':
|
||||
- '*/**/*traypublisher*'
|
||||
- '*/**/*traypublisher*/**/*'
|
||||
|
||||
'host: 3dsmax':
|
||||
- '*/**/*max*'
|
||||
- '*/**/*max*/**/*'
|
||||
|
||||
'host: TV Paint':
|
||||
- '*/**/*tvpaint*'
|
||||
- '*/**/*tvpaint*/**/*'
|
||||
|
||||
'host: CelAction':
|
||||
- '*/**/*celaction*'
|
||||
- '*/**/*celaction*/**/*'
|
||||
|
||||
'host: After Effects':
|
||||
- '*/**/*aftereffects*'
|
||||
- '*/**/*aftereffects*/**/*'
|
||||
|
||||
'host: Substance Painter':
|
||||
- '*/**/*substancepainter*'
|
||||
- '*/**/*substancepainter*/**/*'
|
||||
|
||||
# modules triage
|
||||
'module: Deadline':
|
||||
- '*/**/*deadline*'
|
||||
- '*/**/*deadline*/**/*'
|
||||
|
||||
'module: RoyalRender':
|
||||
- '*/**/*royalrender*'
|
||||
- '*/**/*royalrender*/**/*'
|
||||
|
||||
'module: Sitesync':
|
||||
- '*/**/*sync_server*'
|
||||
- '*/**/*sync_server*/**/*'
|
||||
|
||||
'module: Ftrack':
|
||||
- '*/**/*ftrack*'
|
||||
- '*/**/*ftrack*/**/*'
|
||||
|
||||
'module: Shotgrid':
|
||||
- '*/**/*shotgrid*'
|
||||
- '*/**/*shotgrid*/**/*'
|
||||
|
||||
'module: Kitsu':
|
||||
- '*/**/*kitsu*'
|
||||
- '*/**/*kitsu*/**/*'
|
||||
118
.github/workflows/project_actions.yml
vendored
Normal file
118
.github/workflows/project_actions.yml
vendored
Normal file
|
|
@ -0,0 +1,118 @@
|
|||
name: project-actions
|
||||
|
||||
on:
|
||||
pull_request_target:
|
||||
types: [opened, assigned]
|
||||
pull_request_review:
|
||||
types: [submitted]
|
||||
issue_comment:
|
||||
types: [created]
|
||||
pull_request_review_comment:
|
||||
types: [created]
|
||||
|
||||
jobs:
|
||||
|
||||
pr_review_started:
|
||||
name: pr_review_started
|
||||
runs-on: ubuntu-latest
|
||||
# -----------------------------
|
||||
# conditions are:
|
||||
# - PR issue comment which is not form Ynbot
|
||||
# - PR review comment which is not Hound (or any other bot)
|
||||
# - PR review submitted which is not from Hound (or any other bot) and is not 'Changes requested'
|
||||
# - make sure it only runs if not forked repo
|
||||
# -----------------------------
|
||||
if: |
|
||||
(github.event_name == 'issue_comment' && github.event.pull_request.head.repo.owner.login == 'ynput' && github.event.comment.user.id != 82967070) ||
|
||||
(github.event_name == 'pull_request_review_comment' && github.event.pull_request.head.repo.owner.login == 'ynput' && github.event.comment.user.type != 'Bot') ||
|
||||
(github.event_name == 'pull_request_review' && github.event.pull_request.head.repo.owner.login == 'ynput' && github.event.review.state != 'changes_requested' && github.event.review.user.type != 'Bot')
|
||||
steps:
|
||||
- name: Move PR to 'Review In Progress'
|
||||
uses: leonsteinhaeuser/project-beta-automations@v2.1.0
|
||||
with:
|
||||
gh_token: ${{ secrets.YNPUT_BOT_TOKEN }}
|
||||
organization: ynput
|
||||
project_id: 11
|
||||
resource_node_id: ${{ github.event.pull_request.node_id || github.event.issue.node_id }}
|
||||
status_value: Review In Progress
|
||||
|
||||
pr_review_requested:
|
||||
# -----------------------------
|
||||
# Resets Clickup Task status to 'In Progress' after 'Changes Requested' were submitted to PR
|
||||
# It only runs if custom clickup task id was found in ref branch of PR
|
||||
# -----------------------------
|
||||
name: pr_review_requested
|
||||
runs-on: ubuntu-latest
|
||||
if: github.event_name == 'pull_request_review' && github.event.pull_request.head.repo.owner.login == 'ynput' && github.event.review.state == 'changes_requested'
|
||||
steps:
|
||||
- name: Set branch env
|
||||
run: echo "BRANCH_NAME=${{ github.event.pull_request.head.ref}}" >> $GITHUB_ENV
|
||||
- name: Get ClickUp ID from ref head name
|
||||
id: get_cuID
|
||||
run: |
|
||||
echo ${{ env.BRANCH_NAME }}
|
||||
echo "cuID=$(echo $BRANCH_NAME | sed 's/.*\/\(OP\-[0-9]\{4\}\).*/\1/')" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Print ClickUp ID
|
||||
run: echo ${{ steps.get_cuID.outputs.cuID }}
|
||||
|
||||
- name: Move found Clickup task to 'Review in Progress'
|
||||
if: steps.get_cuID.outputs.cuID
|
||||
run: |
|
||||
curl -i -X PUT \
|
||||
'https://api.clickup.com/api/v2/task/${{ steps.get_cuID.outputs.cuID }}?custom_task_ids=true&team_id=${{secrets.CLICKUP_TEAM_ID}}' \
|
||||
-H 'Authorization: ${{secrets.CLICKUP_API_KEY}}' \
|
||||
-H 'Content-Type: application/json' \
|
||||
-d '{
|
||||
"status": "in progress"
|
||||
}'
|
||||
|
||||
size-label:
|
||||
name: pr_size_label
|
||||
runs-on: ubuntu-latest
|
||||
if: |
|
||||
(github.event_name == 'pull_request' && github.event.action == 'assigned') ||
|
||||
(github.event_name == 'pull_request' && github.event.action == 'opened')
|
||||
|
||||
steps:
|
||||
- name: Add size label
|
||||
uses: "pascalgn/size-label-action@v0.4.3"
|
||||
env:
|
||||
GITHUB_TOKEN: "${{ secrets.YNPUT_BOT_TOKEN }}"
|
||||
IGNORED: ".gitignore\n*.md\n*.json"
|
||||
with:
|
||||
sizes: >
|
||||
{
|
||||
"0": "XS",
|
||||
"100": "S",
|
||||
"500": "M",
|
||||
"1000": "L",
|
||||
"1500": "XL",
|
||||
"2500": "XXL"
|
||||
}
|
||||
|
||||
label_prs_branch:
|
||||
name: pr_branch_label
|
||||
runs-on: ubuntu-latest
|
||||
if: |
|
||||
(github.event_name == 'pull_request' && github.event.action == 'assigned') ||
|
||||
(github.event_name == 'pull_request' && github.event.action == 'opened')
|
||||
steps:
|
||||
- name: Label PRs - Branch name detection
|
||||
uses: ffittschen/pr-branch-labeler@v1
|
||||
with:
|
||||
repo-token: ${{ secrets.YNPUT_BOT_TOKEN }}
|
||||
|
||||
label_prs_globe:
|
||||
name: pr_globe_label
|
||||
runs-on: ubuntu-latest
|
||||
if: |
|
||||
(github.event_name == 'pull_request' && github.event.action == 'assigned') ||
|
||||
(github.event_name == 'pull_request' && github.event.action == 'opened')
|
||||
steps:
|
||||
- name: Label PRs - Globe detection
|
||||
uses: actions/labeler@v4.0.3
|
||||
with:
|
||||
repo-token: ${{ secrets.YNPUT_BOT_TOKEN }}
|
||||
configuration-path: ".github/pr-glob-labeler.yml"
|
||||
sync-labels: false
|
||||
77
ARCHITECTURE.md
Normal file
77
ARCHITECTURE.md
Normal file
|
|
@ -0,0 +1,77 @@
|
|||
# Architecture
|
||||
|
||||
OpenPype is a monolithic Python project that bundles several parts, this document will try to give a birds eye overview of the project and, to a certain degree, each of the sub-projects.
|
||||
The current file structure looks like this:
|
||||
|
||||
```
|
||||
.
|
||||
├── common - Code in this folder is backend portion of Addon distribution logic for v4 server.
|
||||
├── docs - Documentation of the source code.
|
||||
├── igniter - The OpenPype bootstrapper, deals with running version resolution and setting up the connection to the mongodb.
|
||||
├── openpype - The actual OpenPype core package.
|
||||
├── schema - Collection of JSON files describing schematics of objects. This follows Avalon's convention.
|
||||
├── tests - Integration and unit tests.
|
||||
├── tools - Conveninece scripts to perform common actions (in both bash and ps1).
|
||||
├── vendor - When using the igniter, it deploys third party tools in here, such as ffmpeg.
|
||||
└── website - Source files for https://openpype.io/ which is Docusaursus (https://docusaurus.io/).
|
||||
```
|
||||
|
||||
The core functionality of the pipeline can be found in `igniter` and `openpype`, which in turn rely on the `schema` files, whenever you build (or download a pre-built) version of OpenPype, these two are bundled in there, and `Igniter` is the entry point.
|
||||
|
||||
|
||||
## Igniter
|
||||
|
||||
It's the setup and update tool for OpenPype, unless you want to package `openpype` separately and deal with all the config manually, this will most likely be your entry point.
|
||||
|
||||
```
|
||||
igniter/
|
||||
├── bootstrap_repos.py - Module that will find or install OpenPype versions in the system.
|
||||
├── __init__.py - Igniter entry point.
|
||||
├── install_dialog.py- Show dialog for choosing central pype repository.
|
||||
├── install_thread.py - Threading helpers for the install process.
|
||||
├── __main__.py - Like `__init__.py` ?
|
||||
├── message_dialog.py - Qt Dialog with a message and "Ok" button.
|
||||
├── nice_progress_bar.py - Fancy Qt progress bar.
|
||||
├── splash.txt - ASCII art for the terminal installer.
|
||||
├── stylesheet.css - Installer Qt styles.
|
||||
├── terminal_splash.py - Terminal installer animation, relies in `splash.txt`.
|
||||
├── tools.py - Collection of methods that don't fit in other modules.
|
||||
├── update_thread.py - Threading helper to update existing OpenPype installs.
|
||||
├── update_window.py - Qt UI to update OpenPype installs.
|
||||
├── user_settings.py - Interface for the OpenPype user settings.
|
||||
└── version.py - Igniter's version number.
|
||||
```
|
||||
|
||||
## OpenPype
|
||||
|
||||
This is the main package of the OpenPype logic, it could be loosely described as a combination of [Avalon](https://getavalon.github.io), [Pyblish](https://pyblish.com/) and glue around those with custom OpenPype only elements, things are in progress of being moved around to better prepare for V4, which will be released under a new name AYON.
|
||||
|
||||
```
|
||||
openpype/
|
||||
├── client - Interface for the MongoDB.
|
||||
├── hooks - Hooks to be executed on certain OpenPype Applications defined in `openpype.lib.applications`.
|
||||
├── host - Base class for the different hosts.
|
||||
├── hosts - Integration with the different DCCs (hosts) using the `host` base class.
|
||||
├── lib - Libraries that stitch together the package, some have been moved into other parts.
|
||||
├── modules - OpenPype modules should contain separated logic of specific kind of implementation, such as Ftrack connection and its python API.
|
||||
├── pipeline - Core of the OpenPype pipeline, handles creation of data, publishing, etc.
|
||||
├── plugins - Global/core plugins for loader and publisher tool.
|
||||
├── resources - Icons, fonts, etc.
|
||||
├── scripts - Loose scipts that get run by tools/publishers.
|
||||
├── settings - OpenPype settings interface.
|
||||
├── style - Qt styling.
|
||||
├── tests - Unit tests.
|
||||
├── tools - Core tools, check out https://openpype.io/docs/artist_tools.
|
||||
├── vendor - Vendoring of needed required Python packes.
|
||||
├── widgets - Common re-usable Qt Widgets.
|
||||
├── action.py - LEGACY: Lives now in `openpype.pipeline.publish.action` Pyblish actions.
|
||||
├── cli.py - Command line interface, leverages `click`.
|
||||
├── __init__.py - Sets two constants.
|
||||
├── __main__.py - Entry point, calls the `cli.py`
|
||||
├── plugin.py - Pyblish plugins.
|
||||
├── pype_commands.py - Implementation of OpenPype commands.
|
||||
└── version.py - Current version number.
|
||||
```
|
||||
|
||||
|
||||
|
||||
926
CHANGELOG.md
926
CHANGELOG.md
|
|
@ -1,5 +1,931 @@
|
|||
# Changelog
|
||||
|
||||
## [3.15.3](https://github.com/ynput/OpenPype/tree/3.15.3)
|
||||
|
||||
|
||||
[Full Changelog](https://github.com/ynput/OpenPype/compare/3.15.2...3.15.3)
|
||||
|
||||
### **🆕 New features**
|
||||
|
||||
|
||||
<details>
|
||||
<summary>Blender: Extract Review <a href="https://github.com/ynput/OpenPype/pull/3616">#3616</a></summary>
|
||||
|
||||
<strong>Added Review to Blender.
|
||||
|
||||
</strong>This implementation is based on #3508 but made compatible for the current implementation of OpenPype for Blender.
|
||||
|
||||
|
||||
___
|
||||
|
||||
</details>
|
||||
|
||||
|
||||
<details>
|
||||
<summary>Data Exchanges: Point Cloud for 3dsMax <a href="https://github.com/ynput/OpenPype/pull/4532">#4532</a></summary>
|
||||
|
||||
<strong>Publish PRT format with tyFlow in 3dsmax
|
||||
|
||||
</strong>Publish PRT format with tyFlow in 3dsmax and possibly set up loader to load the format too.
|
||||
- [x] creator
|
||||
- [x] extractor
|
||||
- [x] validator
|
||||
- [x] loader
|
||||
|
||||
|
||||
___
|
||||
|
||||
</details>
|
||||
|
||||
|
||||
<details>
|
||||
<summary>Global: persistent staging directory for renders <a href="https://github.com/ynput/OpenPype/pull/4583">#4583</a></summary>
|
||||
|
||||
<strong>Allows configure if staging directory (`stagingDir`) should be persistent with use of profiles.
|
||||
|
||||
</strong>With this feature, users can specify a transient data folder path based on presets, which can be used during the creation and publishing stages. In some cases, these DCCs automatically add a rendering path during the creation stage, which is then used in publishing.One of the key advantages of this feature is that it allows users to take advantage of faster storages for rendering, which can help improve workflow efficiency. Additionally, this feature allows users to keep their rendered data persistent, and use their own infrastructure for regular cleaning.However, it should be noted that some productions may want to use this feature without persistency. Furthermore, there may be a need for retargeting the rendering folder to faster storages, which is also not supported at the moment.It is studio responsibility to clean up obsolete folders with data.Location of the folder is configured in `project_anatomy/templates/others`. ('transient' key is expected, with 'folder' key, could be more templates)Which family/task type/subset is applicable is configured in:`project_settings/global/tools/publish/transient_dir_profiles`
|
||||
|
||||
|
||||
___
|
||||
|
||||
</details>
|
||||
|
||||
|
||||
<details>
|
||||
<summary>Kitsu custom comment template <a href="https://github.com/ynput/OpenPype/pull/4599">#4599</a></summary>
|
||||
|
||||
Kitsu allows to write markdown in its comment field. This can be something very powerful to deliver dynamic comments with the help the data from the instance.This feature is defaults to off so the admin have to manually set up the comment field the way they want.I have added a basic example on how the comment can look like as the comment-fields default value.To this I want to add some documentation also but that's on its way when the code itself looks good for the reviewers.
|
||||
|
||||
|
||||
___
|
||||
|
||||
</details>
|
||||
|
||||
|
||||
<details>
|
||||
<summary>MaxScene Family <a href="https://github.com/ynput/OpenPype/pull/4615">#4615</a></summary>
|
||||
|
||||
Introduction of the Max Scene Family
|
||||
|
||||
|
||||
___
|
||||
|
||||
</details>
|
||||
|
||||
### **🚀 Enhancements**
|
||||
|
||||
|
||||
<details>
|
||||
<summary>Maya: Multiple values on single render attribute - OP-4131 <a href="https://github.com/ynput/OpenPype/pull/4631">#4631</a></summary>
|
||||
|
||||
When validating render attributes, this adds support for multiple values. When repairing first value in list is used.
|
||||
|
||||
|
||||
___
|
||||
|
||||
</details>
|
||||
|
||||
|
||||
<details>
|
||||
<summary>Maya: enable 2D Pan/Zoom for playblasts - OP-5213 <a href="https://github.com/ynput/OpenPype/pull/4687">#4687</a></summary>
|
||||
|
||||
Setting for enabling 2D Pan/Zoom on reviews.
|
||||
|
||||
|
||||
___
|
||||
|
||||
</details>
|
||||
|
||||
|
||||
<details>
|
||||
<summary>Copy existing or generate new Fusion profile on prelaunch <a href="https://github.com/ynput/OpenPype/pull/4572">#4572</a></summary>
|
||||
|
||||
<strong>Fusion preferences will be copied to the predefined `~/.openpype/hosts/fusion/prefs` folder (or any other folder set in system settings) on launch.
|
||||
|
||||
</strong>The idea is to create a copy of existing Fusion profile, adding an OpenPype menu to the Fusion instance.By default the copy setting is turned off, so no file copying is performed. Instead the clean Fusion profile is created by Fusion in the predefined folder. The default locaion is set to `~/.openpype/hosts/fusion/prefs`, to better comply with the other os platforms. After creating the default profile, some modifications are applied:
|
||||
- forced Python3
|
||||
- forced English interface
|
||||
- setup Openpype specific path maps.If the `copy_prefs` checkbox is toggled, a copy of existing Fusion profile folder will be placed in the mentioned location. Then they are altered the same way as described above. The operation is run only once, on the first launch, unless the `force_sync [Resync profile on each launch]` is toggled.English interface is forced because the `FUSION16_PROFILE_DIR` environment variable is not read otherwise (seems to be a Fusion bug).
|
||||
|
||||
|
||||
___
|
||||
|
||||
</details>
|
||||
|
||||
|
||||
<details>
|
||||
<summary>Houdini: Create button open new publisher's "create" tab <a href="https://github.com/ynput/OpenPype/pull/4601">#4601</a></summary>
|
||||
|
||||
During a talk with @maxpareschi he mentioned that the new publisher in Houdini felt super confusing due to "Create" going to the older creator but now being completely empty and the publish button directly went to the publish tab.This resolves that by fixing the Create button to now open the new publisher but on the Create tab.Also made publish button enforce going to the "publish" tab for consistency in usage.@antirotor I think changing the Create button's callback was just missed in this commit or was there a specific reason to not change that around yet?
|
||||
|
||||
|
||||
___
|
||||
|
||||
</details>
|
||||
|
||||
|
||||
<details>
|
||||
<summary>Clockify: refresh and fix the integration <a href="https://github.com/ynput/OpenPype/pull/4607">#4607</a></summary>
|
||||
|
||||
Due to recent API changes, Clockify requires `user_id` to operate with the timers. I updated this part and currently it is a WIP for making it fully functional. Most functions, such as start and stop timer, and projects sync are currently working. For the rate limiting task new dependency is added: https://pypi.org/project/ratelimiter/
|
||||
|
||||
|
||||
___
|
||||
|
||||
</details>
|
||||
|
||||
|
||||
<details>
|
||||
<summary>Fusion publish existing frames <a href="https://github.com/ynput/OpenPype/pull/4611">#4611</a></summary>
|
||||
|
||||
This PR adds the function to publish existing frames instead of having to re-render all of them for each new publish.I have split the render_locally plugin so the review-part is its own plugin now.I also change the saver-creator-plugin's label from Saver to Render (saver) as I intend to add a Prerender creator like in Nuke.
|
||||
|
||||
|
||||
___
|
||||
|
||||
</details>
|
||||
|
||||
|
||||
<details>
|
||||
<summary>Resolution settings referenced from DB record for 3dsMax <a href="https://github.com/ynput/OpenPype/pull/4652">#4652</a></summary>
|
||||
|
||||
- Add Callback for setting the resolution according to DB after the new scene is created.
|
||||
- Add a new Action into openpype menu which allows the user to reset the resolution in 3dsMax
|
||||
|
||||
|
||||
___
|
||||
|
||||
</details>
|
||||
|
||||
|
||||
<details>
|
||||
<summary>3dsmax: render instance settings in Publish tab <a href="https://github.com/ynput/OpenPype/pull/4658">#4658</a></summary>
|
||||
|
||||
Allows user preset the pools, group and use_published settings in Render Creator in the Max Hosts.User can set the settings before or after creating instance in the new publisher
|
||||
|
||||
|
||||
___
|
||||
|
||||
</details>
|
||||
|
||||
|
||||
<details>
|
||||
<summary>scene length setting referenced from DB record for 3dsMax <a href="https://github.com/ynput/OpenPype/pull/4665">#4665</a></summary>
|
||||
|
||||
Setting the timeline length based on DB record in 3dsMax Hosts
|
||||
|
||||
|
||||
___
|
||||
|
||||
</details>
|
||||
|
||||
|
||||
<details>
|
||||
<summary>Publisher: Windows reduce command window pop-ups during Publishing <a href="https://github.com/ynput/OpenPype/pull/4672">#4672</a></summary>
|
||||
|
||||
Reduce the command line pop-ups that show on Windows during publishing.
|
||||
|
||||
|
||||
___
|
||||
|
||||
</details>
|
||||
|
||||
|
||||
<details>
|
||||
<summary>Publisher: Explicit save <a href="https://github.com/ynput/OpenPype/pull/4676">#4676</a></summary>
|
||||
|
||||
Publisher have explicit button to save changes, so reset can happen without saving any changes. Save still happens automatically when publishing is started or on publisher window close. But a popup is shown if context of host has changed. Important context was enhanced by workfile path (if host integration supports it) so workfile changes are captured too. In that case a dialog with confirmation is shown to user. All callbacks that may require save of context were moved to main window to be able handle dialog show at one place. Save changes now returns success so the rest of logic is skipped -> publishing won't start, when save of instances fails.Save and reset buttons have shortcuts (Ctrl + s and Ctrls + r).
|
||||
|
||||
|
||||
___
|
||||
|
||||
</details>
|
||||
|
||||
|
||||
<details>
|
||||
<summary>CelAction: conditional workfile parameters from settings <a href="https://github.com/ynput/OpenPype/pull/4677">#4677</a></summary>
|
||||
|
||||
Since some productions were requesting excluding some workfile parameters from publishing submission, we needed to move them to settings so those could be altered per project.
|
||||
|
||||
|
||||
___
|
||||
|
||||
</details>
|
||||
|
||||
|
||||
<details>
|
||||
<summary>Improve logging of used app + tool envs on application launch <a href="https://github.com/ynput/OpenPype/pull/4682">#4682</a></summary>
|
||||
|
||||
Improve logging of what apps + tool environments got loaded for an application launch.
|
||||
|
||||
|
||||
___
|
||||
|
||||
</details>
|
||||
|
||||
|
||||
<details>
|
||||
<summary>Fix name and docstring for Create Workdir Extra Folders prelaunch hook <a href="https://github.com/ynput/OpenPype/pull/4683">#4683</a></summary>
|
||||
|
||||
Fix class name and docstring for Create Workdir Extra Folders prelaunch hookThe class name and docstring were originally copied from another plug-in and didn't match the plug-in logic.This also fixes potentially seeing this twice in your logs. Before:After:Where it was actually running both this prelaunch hook and the actual `AddLastWorkfileToLaunchArgs` plugin.
|
||||
|
||||
|
||||
___
|
||||
|
||||
</details>
|
||||
|
||||
|
||||
<details>
|
||||
<summary>Application launch context: Include app group name in logger <a href="https://github.com/ynput/OpenPype/pull/4684">#4684</a></summary>
|
||||
|
||||
Clarify in logs better what app group the ApplicationLaunchContext belongs to and what application is being launched.Before:After:
|
||||
|
||||
|
||||
___
|
||||
|
||||
</details>
|
||||
|
||||
|
||||
<details>
|
||||
<summary>increment workfile version 3dsmax <a href="https://github.com/ynput/OpenPype/pull/4685">#4685</a></summary>
|
||||
|
||||
increment workfile version in 3dsmax as if in blender and maya hosts.
|
||||
|
||||
|
||||
___
|
||||
|
||||
</details>
|
||||
|
||||
### **🐛 Bug fixes**
|
||||
|
||||
|
||||
<details>
|
||||
<summary>Maya: Fix getting non-active model panel. <a href="https://github.com/ynput/OpenPype/pull/2968">#2968</a></summary>
|
||||
|
||||
<strong>When capturing multiple cameras with image planes that have file sequences playing, only the active (first) camera will play through the file sequence.
|
||||
|
||||
</strong>
|
||||
___
|
||||
|
||||
</details>
|
||||
|
||||
|
||||
<details>
|
||||
<summary>Maya: Fix broken review publishing. <a href="https://github.com/ynput/OpenPype/pull/4549">#4549</a></summary>
|
||||
|
||||
<strong>Resolves #4547
|
||||
|
||||
</strong>
|
||||
___
|
||||
|
||||
</details>
|
||||
|
||||
|
||||
<details>
|
||||
<summary>Maya: Avoid error on right click in Loader if `mtoa` is not loaded <a href="https://github.com/ynput/OpenPype/pull/4616">#4616</a></summary>
|
||||
|
||||
Fix an error on right clicking in the Loader when `mtoa` is not a loaded plug-in.Additionally if `mtoa` isn't loaded the loader will now load the plug-in before trying to create the arnold standin.
|
||||
|
||||
|
||||
___
|
||||
|
||||
</details>
|
||||
|
||||
|
||||
<details>
|
||||
<summary>Maya: Fix extract look colorspace detection <a href="https://github.com/ynput/OpenPype/pull/4618">#4618</a></summary>
|
||||
|
||||
Fix the logic which guesses the colorspace using `arnold` python library.
|
||||
- Previously it'd error if `mtoa` was not available on path so it still required `mtoa` to be available.
|
||||
- The guessing colorspace logic doesn't actually require `mtoa` to be loaded, but just the `arnold` python library to be available. This changes the logic so it doesn't require the `mtoa` plugin to get loaded to guess the colorspace.
|
||||
- The if/else branch was likely not doing what was intended `cmds.loadPlugin("mtoa", quiet=True)` returns None if the plug-in was already loaded. So this would only ever be true if it ends up loading the `mtoa` plugin the first time.
|
||||
```python
|
||||
# Tested in Maya 2022.1
|
||||
print(cmds.loadPlugin("mtoa", quiet=True))
|
||||
# ['mtoa']
|
||||
print(cmds.loadPlugin("mtoa", quiet=True))
|
||||
# None
|
||||
```
|
||||
|
||||
|
||||
___
|
||||
|
||||
</details>
|
||||
|
||||
|
||||
<details>
|
||||
<summary>Maya: Maya Playblast Options overrides - OP-3847 <a href="https://github.com/ynput/OpenPype/pull/4634">#4634</a></summary>
|
||||
|
||||
When publishing a review in Maya, the extractor would fail due to wrong (long) panel name.
|
||||
|
||||
|
||||
___
|
||||
|
||||
</details>
|
||||
|
||||
|
||||
<details>
|
||||
<summary>Bugfix/op 2834 fix extract playblast <a href="https://github.com/ynput/OpenPype/pull/4701">#4701</a></summary>
|
||||
|
||||
Paragraphs contain detailed information on the changes made to the product or service, providing an in-depth description of the updates and enhancements. They can be used to explain the reasoning behind the changes, or to highlight the importance of the new features. Paragraphs can often include links to further information or support documentation.
|
||||
|
||||
|
||||
___
|
||||
|
||||
</details>
|
||||
|
||||
|
||||
<details>
|
||||
<summary>Bugfix/op 2834 fix extract playblast <a href="https://github.com/ynput/OpenPype/pull/4704">#4704</a></summary>
|
||||
|
||||
Paragraphs contain detailed information on the changes made to the product or service, providing an in-depth description of the updates and enhancements. They can be used to explain the reasoning behind the changes, or to highlight the importance of the new features. Paragraphs can often include links to further information or support documentation.
|
||||
|
||||
|
||||
___
|
||||
|
||||
</details>
|
||||
|
||||
|
||||
<details>
|
||||
<summary>Maya: bug fix for passing zoom settings if review is attached to subset <a href="https://github.com/ynput/OpenPype/pull/4716">#4716</a></summary>
|
||||
|
||||
Fix for attaching review to subset with pan/zoom option.
|
||||
|
||||
|
||||
___
|
||||
|
||||
</details>
|
||||
|
||||
|
||||
<details>
|
||||
<summary>Maya: tile assembly fail in draft - OP-4820 <a href="https://github.com/ynput/OpenPype/pull/4416">#4416</a></summary>
|
||||
|
||||
<strong>Tile assembly in Deadline was broken.
|
||||
|
||||
</strong>Initial bug report revealed other areas of the tile assembly that needed fixing.
|
||||
|
||||
|
||||
___
|
||||
|
||||
</details>
|
||||
|
||||
|
||||
<details>
|
||||
<summary>Maya: Yeti Validate Rig Input - OP-3454 <a href="https://github.com/ynput/OpenPype/pull/4554">#4554</a></summary>
|
||||
|
||||
<strong>Fix Yeti Validate Rig Input
|
||||
|
||||
</strong>Existing workflow was broken due to this #3297.
|
||||
|
||||
|
||||
___
|
||||
|
||||
</details>
|
||||
|
||||
|
||||
<details>
|
||||
<summary>Scene inventory: Fix code errors when "not found" entries are found <a href="https://github.com/ynput/OpenPype/pull/4594">#4594</a></summary>
|
||||
|
||||
Whenever a "NOT FOUND" entry is present a lot of errors happened in the Scene Inventory:
|
||||
- It started spamming a lot of errors for the VersionDelegate since it had no numeric version (no version at all).Error reported on Discord:
|
||||
```python
|
||||
Traceback (most recent call last):
|
||||
File "C:\Users\videopro\Documents\github\OpenPype\openpype\tools\utils\delegates.py", line 65, in paint
|
||||
text = self.displayText(
|
||||
File "C:\Users\videopro\Documents\github\OpenPype\openpype\tools\utils\delegates.py", line 33, in displayText
|
||||
assert isinstance(value, numbers.Integral), (
|
||||
AssertionError: Version is not integer. "None" <class 'NoneType'>
|
||||
```
|
||||
- Right click menu would error on NOT FOUND entries, and thus not show. With this PR it will now _disregard_ not found items for "Set version" and "Remove" but still allow actions.This PR resolves those.
|
||||
|
||||
|
||||
___
|
||||
|
||||
</details>
|
||||
|
||||
|
||||
<details>
|
||||
<summary>Kitsu: Sync OP with zou, make sure value-data is int or float <a href="https://github.com/ynput/OpenPype/pull/4596">#4596</a></summary>
|
||||
|
||||
Currently the data zou pulls is a string and not a value causing some bugs in the pipe where a value is expected (like `Set frame range` in Fusion).
|
||||
|
||||
|
||||
|
||||
This PR makes sure each value is set with int() or float() so these bugs can't happen later on.
|
||||
|
||||
|
||||
|
||||
_(A request to cgwire has also bin sent to allow force values only for some metadata columns, but currently the user can enter what ever they want in there)_
|
||||
|
||||
|
||||
___
|
||||
|
||||
</details>
|
||||
|
||||
|
||||
<details>
|
||||
<summary>Max: fix the bug of removing an instance <a href="https://github.com/ynput/OpenPype/pull/4617">#4617</a></summary>
|
||||
|
||||
fix the bug of removing an instance in 3dsMax
|
||||
|
||||
|
||||
___
|
||||
|
||||
</details>
|
||||
|
||||
|
||||
<details>
|
||||
<summary>Global | Nuke: fixing farm publishing workflow <a href="https://github.com/ynput/OpenPype/pull/4623">#4623</a></summary>
|
||||
|
||||
After Nuke had adopted new publisher with new creators new issues were introduced. Those issues were addressed with this PR. Those are for example broken reviewable video files publishing if published via farm. Also fixed local publishing.
|
||||
|
||||
|
||||
___
|
||||
|
||||
</details>
|
||||
|
||||
|
||||
<details>
|
||||
<summary>Ftrack: Ftrack additional families filtering <a href="https://github.com/ynput/OpenPype/pull/4633">#4633</a></summary>
|
||||
|
||||
Ftrack family collector makes sure the subset family is also in instance families for additional families filtering.
|
||||
|
||||
|
||||
___
|
||||
|
||||
</details>
|
||||
|
||||
|
||||
<details>
|
||||
<summary>Ftrack: Hierarchical <> Non-Hierarchical attributes sync fix <a href="https://github.com/ynput/OpenPype/pull/4635">#4635</a></summary>
|
||||
|
||||
Sync between hierarchical and non-hierarchical attributes should be fixed and work as expected. Action should sync the values as expected and event handler should do it too and only on newly created entities.
|
||||
|
||||
|
||||
___
|
||||
|
||||
</details>
|
||||
|
||||
|
||||
<details>
|
||||
<summary>bugfix for 3dsmax publishing error <a href="https://github.com/ynput/OpenPype/pull/4637">#4637</a></summary>
|
||||
|
||||
fix the bug of failing publishing job in 3dsMax
|
||||
|
||||
|
||||
___
|
||||
|
||||
</details>
|
||||
|
||||
|
||||
<details>
|
||||
<summary>General: Use right validation for ffmpeg executable <a href="https://github.com/ynput/OpenPype/pull/4640">#4640</a></summary>
|
||||
|
||||
Use ffmpeg exec validation for ffmpeg executables instead of oiio exec validation. The validation is used as last possible source of ffmpeg from `PATH` environment variables, which is an edge case but can cause issues.
|
||||
|
||||
|
||||
___
|
||||
|
||||
</details>
|
||||
|
||||
|
||||
<details>
|
||||
<summary>3dsmax: opening last workfile <a href="https://github.com/ynput/OpenPype/pull/4644">#4644</a></summary>
|
||||
|
||||
Supports opening last saved workfile in 3dsmax host.
|
||||
|
||||
|
||||
___
|
||||
|
||||
</details>
|
||||
|
||||
|
||||
<details>
|
||||
<summary>Fixed a bug where a QThread in the splash screen could be destroyed before finishing execution <a href="https://github.com/ynput/OpenPype/pull/4647">#4647</a></summary>
|
||||
|
||||
This should fix the occasional behavior of the QThread being destroyed before even its worker returns from the `run()` function.After quiting, it should wait for the QThread object to properly close itself.
|
||||
|
||||
|
||||
___
|
||||
|
||||
</details>
|
||||
|
||||
|
||||
<details>
|
||||
<summary>General: Use right plugin class for Collect Comment <a href="https://github.com/ynput/OpenPype/pull/4653">#4653</a></summary>
|
||||
|
||||
Collect Comment plugin is instance plugin so should inherit from `InstancePlugin` instead of `ContextPlugin`.
|
||||
|
||||
|
||||
___
|
||||
|
||||
</details>
|
||||
|
||||
|
||||
<details>
|
||||
<summary>Global: add tags field to thumbnail representation <a href="https://github.com/ynput/OpenPype/pull/4660">#4660</a></summary>
|
||||
|
||||
Thumbnail representation might be missing tags field.
|
||||
|
||||
|
||||
___
|
||||
|
||||
</details>
|
||||
|
||||
|
||||
<details>
|
||||
<summary>Integrator: Enforce unique destination transfers, disallow overwrites in queued transfers <a href="https://github.com/ynput/OpenPype/pull/4662">#4662</a></summary>
|
||||
|
||||
Fix #4656 by enforcing unique destination transfers in the Integrator. It's now disallowed to a destination in the file transaction queue with a new source path during the publish.
|
||||
|
||||
|
||||
___
|
||||
|
||||
</details>
|
||||
|
||||
|
||||
<details>
|
||||
<summary>Hiero: Creator with correct workfile numeric padding input <a href="https://github.com/ynput/OpenPype/pull/4666">#4666</a></summary>
|
||||
|
||||
Creator was showing 99 in workfile input for long time, even if users set default value to 1001 in studio settings. This has been fixed now.
|
||||
|
||||
|
||||
___
|
||||
|
||||
</details>
|
||||
|
||||
|
||||
<details>
|
||||
<summary>Nuke: Nukenodes family instance without frame range <a href="https://github.com/ynput/OpenPype/pull/4669">#4669</a></summary>
|
||||
|
||||
No need to add frame range data into `nukenodes` (backdrop) family publishes - since those are timeless.
|
||||
|
||||
|
||||
___
|
||||
|
||||
</details>
|
||||
|
||||
|
||||
<details>
|
||||
<summary>TVPaint: Optional Validation plugins can be de/activated by user <a href="https://github.com/ynput/OpenPype/pull/4674">#4674</a></summary>
|
||||
|
||||
Added `OptionalPyblishPluginMixin` to TVpaint plugins that can be optional.
|
||||
|
||||
|
||||
___
|
||||
|
||||
</details>
|
||||
|
||||
|
||||
<details>
|
||||
<summary>Kitsu: Slightly less strict with instance data <a href="https://github.com/ynput/OpenPype/pull/4678">#4678</a></summary>
|
||||
|
||||
- Allow to take task name from context if asset doesn't have any. Fixes an issue with Photoshop's review instance not having `task` in data.
|
||||
- Allow to match "review" against both `instance.data["family"]` and `instance.data["families"]` because some instances don't have the primary family in families, e.g. in Photoshop and TVPaint.
|
||||
- Do not error on Integrate Kitsu Review whenever for whatever reason Integrate Kitsu Note did not created a comment but just log the message that it was unable to connect a review.
|
||||
|
||||
|
||||
___
|
||||
|
||||
</details>
|
||||
|
||||
|
||||
<details>
|
||||
<summary>Publisher: Fix reset shortcut sequence <a href="https://github.com/ynput/OpenPype/pull/4694">#4694</a></summary>
|
||||
|
||||
Fix bug created in https://github.com/ynput/OpenPype/pull/4676 where key sequence is checked using unsupported method. The check was changed to convert event into `QKeySequence` object which can be compared to prepared sequence.
|
||||
|
||||
|
||||
___
|
||||
|
||||
</details>
|
||||
|
||||
|
||||
<details>
|
||||
<summary>Refactor _capture <a href="https://github.com/ynput/OpenPype/pull/4702">#4702</a></summary>
|
||||
|
||||
Paragraphs contain detailed information on the changes made to the product or service, providing an in-depth description of the updates and enhancements. They can be used to explain the reasoning behind the changes, or to highlight the importance of the new features. Paragraphs can often include links to further information or support documentation.
|
||||
|
||||
|
||||
___
|
||||
|
||||
</details>
|
||||
|
||||
|
||||
<details>
|
||||
<summary>Hiero: correct container colors if UpToDate <a href="https://github.com/ynput/OpenPype/pull/4708">#4708</a></summary>
|
||||
|
||||
Colors on loaded containers are now correctly identifying real state of version. `Red` for out of date and `green` for up to date.
|
||||
|
||||
|
||||
___
|
||||
|
||||
</details>
|
||||
|
||||
### **🔀 Refactored code**
|
||||
|
||||
|
||||
<details>
|
||||
<summary>Look Assigner: Move Look Assigner tool since it's Maya only <a href="https://github.com/ynput/OpenPype/pull/4604">#4604</a></summary>
|
||||
|
||||
Fix #4357: Move Look Assigner tool to maya since it's Maya only
|
||||
|
||||
|
||||
___
|
||||
|
||||
</details>
|
||||
|
||||
|
||||
<details>
|
||||
<summary>Maya: Remove unused functions from Extract Look <a href="https://github.com/ynput/OpenPype/pull/4671">#4671</a></summary>
|
||||
|
||||
Remove unused functions from Maya Extract Look plug-in
|
||||
|
||||
|
||||
___
|
||||
|
||||
</details>
|
||||
|
||||
|
||||
<details>
|
||||
<summary>Extract Review code refactor <a href="https://github.com/ynput/OpenPype/pull/3930">#3930</a></summary>
|
||||
|
||||
<strong>Trying to reduce complexity of Extract Review plug-in
|
||||
- Re-use profile filtering from lib
|
||||
- Remove "combination families" additional filtering which supposedly was from OP v2
|
||||
- Simplify 'formatting' for filling gaps
|
||||
- Use `legacy_io.Session` over `os.environ`
|
||||
|
||||
</strong>
|
||||
___
|
||||
|
||||
</details>
|
||||
|
||||
|
||||
<details>
|
||||
<summary>Maya: Replace last usages of Qt module <a href="https://github.com/ynput/OpenPype/pull/4610">#4610</a></summary>
|
||||
|
||||
Replace last usage of `Qt` module with `qtpy`. This change is needed for `PySide6` support. All changes happened in Maya loader plugins.
|
||||
|
||||
|
||||
___
|
||||
|
||||
</details>
|
||||
|
||||
|
||||
<details>
|
||||
<summary>Update tests and documentation for `ColormanagedPyblishPluginMixin` <a href="https://github.com/ynput/OpenPype/pull/4612">#4612</a></summary>
|
||||
|
||||
Refactor `ExtractorColormanaged` to `ColormanagedPyblishPluginMixin` in tests and documentation.
|
||||
|
||||
|
||||
___
|
||||
|
||||
</details>
|
||||
|
||||
|
||||
<details>
|
||||
<summary>Improve logging of used app + tool envs on application launch (minor tweak) <a href="https://github.com/ynput/OpenPype/pull/4686">#4686</a></summary>
|
||||
|
||||
Use `app.full_name` for change done in #4682
|
||||
|
||||
|
||||
___
|
||||
|
||||
</details>
|
||||
|
||||
### **📃 Documentation**
|
||||
|
||||
|
||||
<details>
|
||||
<summary>Docs/add architecture document <a href="https://github.com/ynput/OpenPype/pull/4344">#4344</a></summary>
|
||||
|
||||
<strong>Add `ARCHITECTURE.md` document.
|
||||
|
||||
</strong>his document attemps to give a quick overview of the project to help onboarding, it's not an extensive documentation but more of a elevator pitch one-line descriptions of files/directories and what the attempt to do.
|
||||
|
||||
|
||||
___
|
||||
|
||||
</details>
|
||||
|
||||
|
||||
<details>
|
||||
<summary>Documentation: Tweak grammar and fix some typos <a href="https://github.com/ynput/OpenPype/pull/4613">#4613</a></summary>
|
||||
|
||||
This resolves some grammar and typos in the documentation.Also fixes the extension of some images in after effects docs which used uppercase extension even though files were lowercase extension.
|
||||
|
||||
|
||||
___
|
||||
|
||||
</details>
|
||||
|
||||
|
||||
<details>
|
||||
<summary>Docs: Fix some minor grammar/typos <a href="https://github.com/ynput/OpenPype/pull/4680">#4680</a></summary>
|
||||
|
||||
Typo/grammar fixes in documentation.
|
||||
|
||||
|
||||
___
|
||||
|
||||
</details>
|
||||
|
||||
### **Merged pull requests**
|
||||
|
||||
|
||||
<details>
|
||||
<summary>Maya: Implement image file node loader <a href="https://github.com/ynput/OpenPype/pull/4313">#4313</a></summary>
|
||||
|
||||
<strong>Implements a loader for loading texture image into a `file` node in Maya.
|
||||
|
||||
</strong>Similar to Maya's hypershade creation of textures on load you have the option to choose for three modes of creating:
|
||||
- Texture
|
||||
- Projection
|
||||
- StencilThese should match what Maya generates if you create those in Maya.
|
||||
- [x] Load and manage file nodes
|
||||
- [x] Apply color spaces after #4195
|
||||
- [x] Support for _either_ UDIM or image sequence - currently it seems to always load sequences as UDIM automatically.
|
||||
- [ ] Add support for animation sequences of UDIM textures using the `<f>.<udim>.exr` path format?
|
||||
|
||||
|
||||
___
|
||||
|
||||
</details>
|
||||
|
||||
|
||||
<details>
|
||||
<summary>Maya Look Assigner: Don't rely on containers for get all assets <a href="https://github.com/ynput/OpenPype/pull/4600">#4600</a></summary>
|
||||
|
||||
This resolves #4044 by not actually relying on containers in the scene but instead just rely on finding nodes with `cbId` attributes. As such, imported nodes would also be found and a shader can be assigned (similar to when using get from selection).**Please take into consideration the potential downsides below**Potential downsides would be:
|
||||
- IF an already loaded look has any dagNodes, say a 3D Projection node - then that will also show up as a loaded asset where previously nodes from loaded looks were ignored.
|
||||
- If any dag nodes were created locally - they would have gotten `cbId` attributes on scene save and thus the current asset would almost always show?
|
||||
|
||||
|
||||
___
|
||||
|
||||
</details>
|
||||
|
||||
|
||||
<details>
|
||||
<summary>Maya: Unify menu labels for "Set Frame Range" and "Set Resolution" <a href="https://github.com/ynput/OpenPype/pull/4605">#4605</a></summary>
|
||||
|
||||
Fix #4109: Unify menu labels for "Set Frame Range" and "Set Resolution"This also tweaks it in Houdini from Reset Frame Range to Set Frame Range.
|
||||
|
||||
|
||||
___
|
||||
|
||||
</details>
|
||||
|
||||
|
||||
<details>
|
||||
<summary>Resolve missing OPENPYPE_MONGO in deadline global job preload <a href="https://github.com/ynput/OpenPype/pull/4484">#4484</a></summary>
|
||||
|
||||
<strong>In the GlobalJobPreLoad plugin, we propose to replace the SpawnProcess by a sub-process and to pass the environment variables in the parameters, since the SpawnProcess under Centos Linux does not pass the environment variables.
|
||||
|
||||
</strong>In the GlobalJobPreLoad plugin, the Deadline SpawnProcess is used to start the OpenPype process. The problem is that the SpawnProcess does not pass environment variables, including OPENPYPE_MONGO, to the process when it is under Centos7 linux, and the process gets stuck. We propose to replace it by a subprocess and to pass the variable in the parameters.
|
||||
|
||||
|
||||
___
|
||||
|
||||
</details>
|
||||
|
||||
|
||||
<details>
|
||||
<summary>Tests: Added setup_only to tests <a href="https://github.com/ynput/OpenPype/pull/4591">#4591</a></summary>
|
||||
|
||||
Allows to download test zip, unzip and restore DB in preparation for new test.
|
||||
|
||||
|
||||
___
|
||||
|
||||
</details>
|
||||
|
||||
|
||||
<details>
|
||||
<summary>Maya: Arnold don't reset maya timeline frame range on render creation (or setting render settings) <a href="https://github.com/ynput/OpenPype/pull/4603">#4603</a></summary>
|
||||
|
||||
Fix #4429: Do not reset fps or playback timeline on applying or creating render settings
|
||||
|
||||
|
||||
___
|
||||
|
||||
</details>
|
||||
|
||||
|
||||
<details>
|
||||
<summary>Bump @sideway/formula from 3.0.0 to 3.0.1 in /website <a href="https://github.com/ynput/OpenPype/pull/4609">#4609</a></summary>
|
||||
|
||||
Bumps [@sideway/formula](https://github.com/sideway/formula) from 3.0.0 to 3.0.1.
|
||||
<details>
|
||||
<summary>Commits</summary>
|
||||
<ul>
|
||||
<li><a href="https://github.com/hapijs/formula/commit/5b44c1bffc38135616fb91d5ad46eaf64f03d23b"><code>5b44c1b</code></a> 3.0.1</li>
|
||||
<li><a href="https://github.com/hapijs/formula/commit/9fbc20a02d75ae809c37a610a57802cd1b41b3fe"><code>9fbc20a</code></a> chore: better number regex</li>
|
||||
<li><a href="https://github.com/hapijs/formula/commit/41ae98e0421913b100886adb0107a25d552d9e1a"><code>41ae98e</code></a> Cleanup</li>
|
||||
<li><a href="https://github.com/hapijs/formula/commit/c59f35ec401e18cead10e0cedfb44291517610b1"><code>c59f35e</code></a> Move to Sideway</li>
|
||||
<li>See full diff in <a href="https://github.com/sideway/formula/compare/v3.0.0...v3.0.1">compare view</a></li>
|
||||
</ul>
|
||||
</details>
|
||||
<details>
|
||||
<summary>Maintainer changes</summary>
|
||||
<p>This version was pushed to npm by <a href="https://www.npmjs.com/~marsup">marsup</a>, a new releaser for <code>@sideway/formula</code> since your current version.</p>
|
||||
</details>
|
||||
<br />
|
||||
|
||||
|
||||
[](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores)
|
||||
|
||||
Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`.
|
||||
|
||||
[//]: # (dependabot-automerge-start)
|
||||
[//]: # (dependabot-automerge-end)
|
||||
|
||||
---
|
||||
|
||||
<details>
|
||||
<summary>Dependabot commands and options</summary>
|
||||
<br />
|
||||
|
||||
You can trigger Dependabot actions by commenting on this PR:
|
||||
- `@dependabot rebase` will rebase this PR
|
||||
- `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it
|
||||
- `@dependabot merge` will merge this PR after your CI passes on it
|
||||
- `@dependabot squash and merge` will squash and merge this PR after your CI passes on it
|
||||
- `@dependabot cancel merge` will cancel a previously requested merge and block automerging
|
||||
- `@dependabot reopen` will reopen this PR if it is closed
|
||||
- `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually
|
||||
- `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself)
|
||||
- `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself)
|
||||
- `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
|
||||
- `@dependabot use these labels` will set the current labels as the default for future PRs for this repo and language
|
||||
- `@dependabot use these reviewers` will set the current reviewers as the default for future PRs for this repo and language
|
||||
- `@dependabot use these assignees` will set the current assignees as the default for future PRs for this repo and language
|
||||
- `@dependabot use this milestone` will set the current milestone as the default for future PRs for this repo and language
|
||||
|
||||
You can disable automated security fix PRs for this repo from the [Security Alerts page](https://github.com/ynput/OpenPype/network/alerts).
|
||||
|
||||
</details>
|
||||
___
|
||||
|
||||
</details>
|
||||
|
||||
|
||||
<details>
|
||||
<summary>Update artist_hosts_maya_arnold.md <a href="https://github.com/ynput/OpenPype/pull/4626">#4626</a></summary>
|
||||
|
||||
Correct Arnold docs.
|
||||
___
|
||||
|
||||
</details>
|
||||
|
||||
|
||||
<details>
|
||||
<summary>Maya: Add "Include Parent Hierarchy" option in animation creator plugin <a href="https://github.com/ynput/OpenPype/pull/4645">#4645</a></summary>
|
||||
|
||||
Add an option in Project Settings > Maya > Creator Plugins > Create Animation to include (or not) parent hierarchy. This is to avoid artists to check manually the option for all create animation.
|
||||
|
||||
|
||||
___
|
||||
|
||||
</details>
|
||||
|
||||
|
||||
<details>
|
||||
<summary>General: Filter available applications <a href="https://github.com/ynput/OpenPype/pull/4667">#4667</a></summary>
|
||||
|
||||
Added option to filter applications that don't have valid executable available in settings in launcher and ftrack actions. This option can be disabled in new settings category `Applications`. The filtering is by default disabled.
|
||||
|
||||
|
||||
___
|
||||
|
||||
</details>
|
||||
|
||||
|
||||
<details>
|
||||
<summary>3dsmax: make sure that startup script executes <a href="https://github.com/ynput/OpenPype/pull/4695">#4695</a></summary>
|
||||
|
||||
Fixing reliability of OpenPype startup in 3dsmax.
|
||||
|
||||
|
||||
___
|
||||
|
||||
</details>
|
||||
|
||||
|
||||
<details>
|
||||
<summary>Project Manager: Change minimum frame start/end to '0' <a href="https://github.com/ynput/OpenPype/pull/4719">#4719</a></summary>
|
||||
|
||||
Project manager can have frame start/end set to `0`.
|
||||
|
||||
|
||||
___
|
||||
|
||||
</details>
|
||||
|
||||
|
||||
|
||||
## [3.15.2](https://github.com/ynput/OpenPype/tree/3.15.2)
|
||||
|
||||
[Full Changelog](https://github.com/ynput/OpenPype/compare/3.15.1...3.15.2)
|
||||
|
||||
|
|
|
|||
|
|
@ -3,7 +3,7 @@
|
|||
Goal is that most of functions here are called on (or with) an object
|
||||
that has project name as a context (e.g. on 'ProjectEntity'?).
|
||||
|
||||
+ We will need more specific functions doing wery specific queires really fast.
|
||||
+ We will need more specific functions doing very specific queries really fast.
|
||||
"""
|
||||
|
||||
import re
|
||||
|
|
@ -193,7 +193,7 @@ def _get_assets(
|
|||
be found.
|
||||
asset_names (Iterable[str]): Name assets that should be found.
|
||||
parent_ids (Iterable[Union[str, ObjectId]]): Parent asset ids.
|
||||
standard (bool): Query standart assets (type 'asset').
|
||||
standard (bool): Query standard assets (type 'asset').
|
||||
archived (bool): Query archived assets (type 'archived_asset').
|
||||
fields (Iterable[str]): Fields that should be returned. All fields are
|
||||
returned if 'None' is passed.
|
||||
|
|
@ -1185,7 +1185,7 @@ def get_representations(
|
|||
standard=True,
|
||||
fields=None
|
||||
):
|
||||
"""Representaion entities data from one project filtered by filters.
|
||||
"""Representation entities data from one project filtered by filters.
|
||||
|
||||
Filters are additive (all conditions must pass to return subset).
|
||||
|
||||
|
|
@ -1231,7 +1231,7 @@ def get_archived_representations(
|
|||
names_by_version_ids=None,
|
||||
fields=None
|
||||
):
|
||||
"""Archived representaion entities data from project with applied filters.
|
||||
"""Archived representation entities data from project with applied filters.
|
||||
|
||||
Filters are additive (all conditions must pass to return subset).
|
||||
|
||||
|
|
|
|||
|
|
@ -2,7 +2,7 @@
|
|||
## Reason
|
||||
Preparation for OpenPype v4 server. Goal is to remove direct mongo calls in code to prepare a little bit for different source of data for code before. To start think about database calls less as mongo calls but more universally. To do so was implemented simple wrapper around database calls to not use pymongo specific code.
|
||||
|
||||
Current goal is not to make universal database model which can be easily replaced with any different source of data but to make it close as possible. Current implementation of OpenPype is too tighly connected to pymongo and it's abilities so we're trying to get closer with long term changes that can be used even in current state.
|
||||
Current goal is not to make universal database model which can be easily replaced with any different source of data but to make it close as possible. Current implementation of OpenPype is too tightly connected to pymongo and it's abilities so we're trying to get closer with long term changes that can be used even in current state.
|
||||
|
||||
## Queries
|
||||
Query functions don't use full potential of mongo queries like very specific queries based on subdictionaries or unknown structures. We try to avoid these calls as much as possible because they'll probably won't be available in future. If it's really necessary a new function can be added but only if it's reasonable for overall logic. All query functions were moved to `~/client/entities.py`. Each function has arguments with available filters and possible reduce of returned keys for each entity.
|
||||
|
|
@ -14,7 +14,7 @@ Changes are a little bit complicated. Mongo has many options how update can happ
|
|||
Create operations expect already prepared document data, for that are prepared functions creating skeletal structures of documents (do not fill all required data), except `_id` all data should be right. Existence of entity is not validated so if the same creation operation is send n times it will create the entity n times which can cause issues.
|
||||
|
||||
### Update
|
||||
Update operation require entity id and keys that should be changed, update dictionary must have {"key": value}. If value should be set in nested dictionary the key must have also all subkeys joined with dot `.` (e.g. `{"data": {"fps": 25}}` -> `{"data.fps": 25}`). To simplify update dictionaries were prepared functions which does that for you, their name has template `prepare_<entity type>_update_data` - they work on comparison of previous document and new document. If there is missing function for requested entity type it is because we didn't need it yet and require implementaion.
|
||||
Update operation require entity id and keys that should be changed, update dictionary must have {"key": value}. If value should be set in nested dictionary the key must have also all subkeys joined with dot `.` (e.g. `{"data": {"fps": 25}}` -> `{"data.fps": 25}`). To simplify update dictionaries were prepared functions which does that for you, their name has template `prepare_<entity type>_update_data` - they work on comparison of previous document and new document. If there is missing function for requested entity type it is because we didn't need it yet and require implementation.
|
||||
|
||||
### Delete
|
||||
Delete operation need entity id. Entity will be deleted from mongo.
|
||||
|
|
|
|||
|
|
@ -368,7 +368,7 @@ def prepare_workfile_info_update_data(old_doc, new_doc, replace=True):
|
|||
class AbstractOperation(object):
|
||||
"""Base operation class.
|
||||
|
||||
Opration represent a call into database. The call can create, change or
|
||||
Operation represent a call into database. The call can create, change or
|
||||
remove data.
|
||||
|
||||
Args:
|
||||
|
|
@ -409,7 +409,7 @@ class AbstractOperation(object):
|
|||
pass
|
||||
|
||||
def to_data(self):
|
||||
"""Convert opration to data that can be converted to json or others.
|
||||
"""Convert operation to data that can be converted to json or others.
|
||||
|
||||
Warning:
|
||||
Current state returns ObjectId objects which cannot be parsed by
|
||||
|
|
@ -428,7 +428,7 @@ class AbstractOperation(object):
|
|||
|
||||
|
||||
class CreateOperation(AbstractOperation):
|
||||
"""Opeartion to create an entity.
|
||||
"""Operation to create an entity.
|
||||
|
||||
Args:
|
||||
project_name (str): On which project operation will happen.
|
||||
|
|
@ -485,7 +485,7 @@ class CreateOperation(AbstractOperation):
|
|||
|
||||
|
||||
class UpdateOperation(AbstractOperation):
|
||||
"""Opeartion to update an entity.
|
||||
"""Operation to update an entity.
|
||||
|
||||
Args:
|
||||
project_name (str): On which project operation will happen.
|
||||
|
|
@ -552,7 +552,7 @@ class UpdateOperation(AbstractOperation):
|
|||
|
||||
|
||||
class DeleteOperation(AbstractOperation):
|
||||
"""Opeartion to delete an entity.
|
||||
"""Operation to delete an entity.
|
||||
|
||||
Args:
|
||||
project_name (str): On which project operation will happen.
|
||||
|
|
|
|||
|
|
@ -3,10 +3,13 @@ from openpype.lib import PreLaunchHook
|
|||
from openpype.pipeline.workfile import create_workdir_extra_folders
|
||||
|
||||
|
||||
class AddLastWorkfileToLaunchArgs(PreLaunchHook):
|
||||
"""Add last workfile path to launch arguments.
|
||||
class CreateWorkdirExtraFolders(PreLaunchHook):
|
||||
"""Create extra folders for the work directory.
|
||||
|
||||
Based on setting `project_settings/global/tools/Workfiles/extra_folders`
|
||||
profile filtering will decide whether extra folders need to be created in
|
||||
the work directory.
|
||||
|
||||
This is not possible to do for all applications the same way.
|
||||
"""
|
||||
|
||||
# Execute after workfile template copy
|
||||
|
|
|
|||
|
|
@ -7,7 +7,7 @@ class LaunchFoundryAppsWindows(PreLaunchHook):
|
|||
|
||||
Nuke is executed "like" python process so it is required to pass
|
||||
`CREATE_NEW_CONSOLE` flag on windows to trigger creation of new console.
|
||||
At the same time the newly created console won't create it's own stdout
|
||||
At the same time the newly created console won't create its own stdout
|
||||
and stderr handlers so they should not be redirected to DEVNULL.
|
||||
"""
|
||||
|
||||
|
|
@ -18,7 +18,7 @@ class LaunchFoundryAppsWindows(PreLaunchHook):
|
|||
|
||||
def execute(self):
|
||||
# Change `creationflags` to CREATE_NEW_CONSOLE
|
||||
# - on Windows will nuke create new window using it's console
|
||||
# - on Windows nuke will create new window using its console
|
||||
# Set `stdout` and `stderr` to None so new created console does not
|
||||
# have redirected output to DEVNULL in build
|
||||
self.launch_context.kwargs.update({
|
||||
|
|
|
|||
|
|
@ -2,7 +2,7 @@
|
|||
|
||||
Idea for current dirmap implementation was used from Maya where is possible to
|
||||
enter source and destination roots and maya will try each found source
|
||||
in referenced file replace with each destionation paths. First path which
|
||||
in referenced file replace with each destination paths. First path which
|
||||
exists is used.
|
||||
"""
|
||||
|
||||
|
|
@ -183,7 +183,7 @@ class HostDirmap(object):
|
|||
project_name, remote_site
|
||||
)
|
||||
# dirmap has sense only with regular disk provider, in the workfile
|
||||
# wont be root on cloud or sftp provider
|
||||
# won't be root on cloud or sftp provider
|
||||
if remote_provider != "local_drive":
|
||||
remote_site = "studio"
|
||||
for root_name, active_site_dir in active_overrides.items():
|
||||
|
|
|
|||
|
|
@ -18,7 +18,7 @@ class HostBase(object):
|
|||
Compared to 'avalon' concept:
|
||||
What was before considered as functions in host implementation folder. The
|
||||
host implementation should primarily care about adding ability of creation
|
||||
(mark subsets to be published) and optionaly about referencing published
|
||||
(mark subsets to be published) and optionally about referencing published
|
||||
representations as containers.
|
||||
|
||||
Host may need extend some functionality like working with workfiles
|
||||
|
|
@ -129,9 +129,9 @@ class HostBase(object):
|
|||
"""Get current context information.
|
||||
|
||||
This method should be used to get current context of host. Usage of
|
||||
this method can be crutial for host implementations in DCCs where
|
||||
this method can be crucial for host implementations in DCCs where
|
||||
can be opened multiple workfiles at one moment and change of context
|
||||
can't be catched properly.
|
||||
can't be caught properly.
|
||||
|
||||
Default implementation returns values from 'legacy_io.Session'.
|
||||
|
||||
|
|
|
|||
|
|
@ -81,7 +81,7 @@ class ILoadHost:
|
|||
|
||||
@abstractmethod
|
||||
def get_containers(self):
|
||||
"""Retreive referenced containers from scene.
|
||||
"""Retrieve referenced containers from scene.
|
||||
|
||||
This can be implemented in hosts where referencing can be used.
|
||||
|
||||
|
|
@ -191,7 +191,7 @@ class IWorkfileHost:
|
|||
|
||||
@abstractmethod
|
||||
def get_current_workfile(self):
|
||||
"""Retreive path to current opened file.
|
||||
"""Retrieve path to current opened file.
|
||||
|
||||
Returns:
|
||||
str: Path to file which is currently opened.
|
||||
|
|
@ -220,8 +220,8 @@ class IWorkfileHost:
|
|||
Default implementation keeps workdir untouched.
|
||||
|
||||
Warnings:
|
||||
We must handle this modification with more sofisticated way because
|
||||
this can't be called out of DCC so opening of last workfile
|
||||
We must handle this modification with more sophisticated way
|
||||
because this can't be called out of DCC so opening of last workfile
|
||||
(calculated before DCC is launched) is complicated. Also breaking
|
||||
defined work template is not a good idea.
|
||||
Only place where it's really used and can make sense is Maya. There
|
||||
|
|
@ -302,7 +302,7 @@ class IPublishHost:
|
|||
required methods.
|
||||
|
||||
Returns:
|
||||
list[str]: Missing method implementations for new publsher
|
||||
list[str]: Missing method implementations for new publisher
|
||||
workflow.
|
||||
"""
|
||||
|
||||
|
|
|
|||
|
|
@ -504,7 +504,7 @@ function addItemAsLayerToComp(comp_id, item_id, found_comp){
|
|||
* Args:
|
||||
* comp_id (int): id of target composition
|
||||
* item_id (int): FootageItem.id
|
||||
* found_comp (CompItem, optional): to limit quering if
|
||||
* found_comp (CompItem, optional): to limit querying if
|
||||
* comp already found previously
|
||||
*/
|
||||
var comp = found_comp || app.project.itemByID(comp_id);
|
||||
|
|
|
|||
|
|
@ -80,7 +80,7 @@ class AfterEffectsServerStub():
|
|||
Get complete stored JSON with metadata from AE.Metadata.Label
|
||||
field.
|
||||
|
||||
It contains containers loaded by any Loader OR instances creted
|
||||
It contains containers loaded by any Loader OR instances created
|
||||
by Creator.
|
||||
|
||||
Returns:
|
||||
|
|
|
|||
|
|
@ -24,7 +24,7 @@ from .workio import OpenFileCacher
|
|||
PREVIEW_COLLECTIONS: Dict = dict()
|
||||
|
||||
# This seems like a good value to keep the Qt app responsive and doesn't slow
|
||||
# down Blender. At least on macOS I the interace of Blender gets very laggy if
|
||||
# down Blender. At least on macOS I the interface of Blender gets very laggy if
|
||||
# you make it smaller.
|
||||
TIMER_INTERVAL: float = 0.01 if platform.system() == "Windows" else 0.1
|
||||
|
||||
|
|
@ -84,11 +84,11 @@ class MainThreadItem:
|
|||
self.kwargs = kwargs
|
||||
|
||||
def execute(self):
|
||||
"""Execute callback and store it's result.
|
||||
"""Execute callback and store its result.
|
||||
|
||||
Method must be called from main thread. Item is marked as `done`
|
||||
when callback execution finished. Store output of callback of exception
|
||||
information when callback raise one.
|
||||
information when callback raises one.
|
||||
"""
|
||||
print("Executing process in main thread")
|
||||
if self.done:
|
||||
|
|
|
|||
|
|
@ -50,7 +50,7 @@ class ExtractPlayblast(publish.Extractor):
|
|||
# get isolate objects list
|
||||
isolate = instance.data("isolate", None)
|
||||
|
||||
# get ouput path
|
||||
# get output path
|
||||
stagingdir = self.staging_dir(instance)
|
||||
filename = instance.name
|
||||
path = os.path.join(stagingdir, filename)
|
||||
|
|
@ -116,7 +116,6 @@ class ExtractPlayblast(publish.Extractor):
|
|||
"frameStart": start,
|
||||
"frameEnd": end,
|
||||
"fps": fps,
|
||||
"preview": True,
|
||||
"tags": tags,
|
||||
"camera_name": camera
|
||||
}
|
||||
|
|
|
|||
|
|
@ -38,8 +38,9 @@ class CelactionPrelaunchHook(PreLaunchHook):
|
|||
)
|
||||
|
||||
path_to_cli = os.path.join(CELACTION_SCRIPTS_DIR, "publish_cli.py")
|
||||
subproces_args = get_openpype_execute_args("run", path_to_cli)
|
||||
openpype_executable = subproces_args.pop(0)
|
||||
subprocess_args = get_openpype_execute_args("run", path_to_cli)
|
||||
openpype_executable = subprocess_args.pop(0)
|
||||
workfile_settings = self.get_workfile_settings()
|
||||
|
||||
winreg.SetValueEx(
|
||||
hKey,
|
||||
|
|
@ -49,20 +50,34 @@ class CelactionPrelaunchHook(PreLaunchHook):
|
|||
openpype_executable
|
||||
)
|
||||
|
||||
parameters = subproces_args + [
|
||||
"--currentFile", "*SCENE*",
|
||||
"--chunk", "*CHUNK*",
|
||||
"--frameStart", "*START*",
|
||||
"--frameEnd", "*END*",
|
||||
"--resolutionWidth", "*X*",
|
||||
"--resolutionHeight", "*Y*"
|
||||
# add required arguments for workfile path
|
||||
parameters = subprocess_args + [
|
||||
"--currentFile", "*SCENE*"
|
||||
]
|
||||
|
||||
# Add custom parameters from workfile settings
|
||||
if "render_chunk" in workfile_settings["submission_overrides"]:
|
||||
parameters += [
|
||||
"--chunk", "*CHUNK*"
|
||||
]
|
||||
if "resolution" in workfile_settings["submission_overrides"]:
|
||||
parameters += [
|
||||
"--resolutionWidth", "*X*",
|
||||
"--resolutionHeight", "*Y*"
|
||||
]
|
||||
if "frame_range" in workfile_settings["submission_overrides"]:
|
||||
parameters += [
|
||||
"--frameStart", "*START*",
|
||||
"--frameEnd", "*END*"
|
||||
]
|
||||
|
||||
winreg.SetValueEx(
|
||||
hKey, "SubmitParametersTitle", 0, winreg.REG_SZ,
|
||||
subprocess.list2cmdline(parameters)
|
||||
)
|
||||
|
||||
self.log.debug(f"__ parameters: \"{parameters}\"")
|
||||
|
||||
# setting resolution parameters
|
||||
path_submit = "\\".join([
|
||||
path_user_settings, "Dialogs", "SubmitOutput"
|
||||
|
|
@ -135,3 +150,6 @@ class CelactionPrelaunchHook(PreLaunchHook):
|
|||
self.log.info(f"Workfile to open: \"{workfile_path}\"")
|
||||
|
||||
return workfile_path
|
||||
|
||||
def get_workfile_settings(self):
|
||||
return self.data["project_settings"]["celaction"]["workfile"]
|
||||
|
|
|
|||
|
|
@ -39,7 +39,7 @@ class CollectCelactionCliKwargs(pyblish.api.Collector):
|
|||
passing_kwargs[key] = value
|
||||
|
||||
if missing_kwargs:
|
||||
raise RuntimeError("Missing arguments {}".format(
|
||||
self.log.debug("Missing arguments {}".format(
|
||||
", ".join(
|
||||
[f'"{key}"' for key in missing_kwargs]
|
||||
)
|
||||
|
|
|
|||
|
|
@ -773,7 +773,7 @@ class MediaInfoFile(object):
|
|||
if logger:
|
||||
self.log = logger
|
||||
|
||||
# test if `dl_get_media_info` paht exists
|
||||
# test if `dl_get_media_info` path exists
|
||||
self._validate_media_script_path()
|
||||
|
||||
# derivate other feed variables
|
||||
|
|
@ -993,7 +993,7 @@ class MediaInfoFile(object):
|
|||
|
||||
def _validate_media_script_path(self):
|
||||
if not os.path.isfile(self.MEDIA_SCRIPT_PATH):
|
||||
raise IOError("Media Scirpt does not exist: `{}`".format(
|
||||
raise IOError("Media Script does not exist: `{}`".format(
|
||||
self.MEDIA_SCRIPT_PATH))
|
||||
|
||||
def _generate_media_info_file(self, fpath, feed_ext, feed_dir):
|
||||
|
|
|
|||
|
|
@ -38,7 +38,7 @@ def install():
|
|||
pyblish.register_plugin_path(PUBLISH_PATH)
|
||||
register_loader_plugin_path(LOAD_PATH)
|
||||
register_creator_plugin_path(CREATE_PATH)
|
||||
log.info("OpenPype Flame plug-ins registred ...")
|
||||
log.info("OpenPype Flame plug-ins registered ...")
|
||||
|
||||
# register callback for switching publishable
|
||||
pyblish.register_callback("instanceToggled", on_pyblish_instance_toggled)
|
||||
|
|
|
|||
|
|
@ -157,7 +157,7 @@ class CreatorWidget(QtWidgets.QDialog):
|
|||
# convert label text to normal capitalized text with spaces
|
||||
label_text = self.camel_case_split(text)
|
||||
|
||||
# assign the new text to lable widget
|
||||
# assign the new text to label widget
|
||||
label = QtWidgets.QLabel(label_text)
|
||||
label.setObjectName("LineLabel")
|
||||
|
||||
|
|
@ -345,8 +345,8 @@ class PublishableClip:
|
|||
"track": "sequence",
|
||||
}
|
||||
|
||||
# parents search patern
|
||||
parents_search_patern = r"\{([a-z]*?)\}"
|
||||
# parents search pattern
|
||||
parents_search_pattern = r"\{([a-z]*?)\}"
|
||||
|
||||
# default templates for non-ui use
|
||||
rename_default = False
|
||||
|
|
@ -445,7 +445,7 @@ class PublishableClip:
|
|||
return self.current_segment
|
||||
|
||||
def _populate_segment_default_data(self):
|
||||
""" Populate default formating data from segment. """
|
||||
""" Populate default formatting data from segment. """
|
||||
|
||||
self.current_segment_default_data = {
|
||||
"_folder_": "shots",
|
||||
|
|
@ -538,7 +538,7 @@ class PublishableClip:
|
|||
if not self.index_from_segment:
|
||||
self.count_steps *= self.rename_index
|
||||
|
||||
hierarchy_formating_data = {}
|
||||
hierarchy_formatting_data = {}
|
||||
hierarchy_data = deepcopy(self.hierarchy_data)
|
||||
_data = self.current_segment_default_data.copy()
|
||||
if self.ui_inputs:
|
||||
|
|
@ -552,7 +552,7 @@ class PublishableClip:
|
|||
# mark review layer
|
||||
if self.review_track and (
|
||||
self.review_track not in self.review_track_default):
|
||||
# if review layer is defined and not the same as defalut
|
||||
# if review layer is defined and not the same as default
|
||||
self.review_layer = self.review_track
|
||||
|
||||
# shot num calculate
|
||||
|
|
@ -578,13 +578,13 @@ class PublishableClip:
|
|||
|
||||
# fill up pythonic expresisons in hierarchy data
|
||||
for k, _v in hierarchy_data.items():
|
||||
hierarchy_formating_data[k] = _v["value"].format(**_data)
|
||||
hierarchy_formatting_data[k] = _v["value"].format(**_data)
|
||||
else:
|
||||
# if no gui mode then just pass default data
|
||||
hierarchy_formating_data = hierarchy_data
|
||||
hierarchy_formatting_data = hierarchy_data
|
||||
|
||||
tag_hierarchy_data = self._solve_tag_hierarchy_data(
|
||||
hierarchy_formating_data
|
||||
hierarchy_formatting_data
|
||||
)
|
||||
|
||||
tag_hierarchy_data.update({"heroTrack": True})
|
||||
|
|
@ -615,27 +615,27 @@ class PublishableClip:
|
|||
# in case track name and subset name is the same then add
|
||||
if self.subset_name == self.track_name:
|
||||
_hero_data["subset"] = self.subset
|
||||
# assing data to return hierarchy data to tag
|
||||
# assign data to return hierarchy data to tag
|
||||
tag_hierarchy_data = _hero_data
|
||||
break
|
||||
|
||||
# add data to return data dict
|
||||
self.marker_data.update(tag_hierarchy_data)
|
||||
|
||||
def _solve_tag_hierarchy_data(self, hierarchy_formating_data):
|
||||
def _solve_tag_hierarchy_data(self, hierarchy_formatting_data):
|
||||
""" Solve marker data from hierarchy data and templates. """
|
||||
# fill up clip name and hierarchy keys
|
||||
hierarchy_filled = self.hierarchy.format(**hierarchy_formating_data)
|
||||
clip_name_filled = self.clip_name.format(**hierarchy_formating_data)
|
||||
hierarchy_filled = self.hierarchy.format(**hierarchy_formatting_data)
|
||||
clip_name_filled = self.clip_name.format(**hierarchy_formatting_data)
|
||||
|
||||
# remove shot from hierarchy data: is not needed anymore
|
||||
hierarchy_formating_data.pop("shot")
|
||||
hierarchy_formatting_data.pop("shot")
|
||||
|
||||
return {
|
||||
"newClipName": clip_name_filled,
|
||||
"hierarchy": hierarchy_filled,
|
||||
"parents": self.parents,
|
||||
"hierarchyData": hierarchy_formating_data,
|
||||
"hierarchyData": hierarchy_formatting_data,
|
||||
"subset": self.subset,
|
||||
"family": self.subset_family,
|
||||
"families": [self.family]
|
||||
|
|
@ -650,17 +650,17 @@ class PublishableClip:
|
|||
type
|
||||
)
|
||||
|
||||
# first collect formating data to use for formating template
|
||||
formating_data = {}
|
||||
# first collect formatting data to use for formatting template
|
||||
formatting_data = {}
|
||||
for _k, _v in self.hierarchy_data.items():
|
||||
value = _v["value"].format(
|
||||
**self.current_segment_default_data)
|
||||
formating_data[_k] = value
|
||||
formatting_data[_k] = value
|
||||
|
||||
return {
|
||||
"entity_type": entity_type,
|
||||
"entity_name": template.format(
|
||||
**formating_data
|
||||
**formatting_data
|
||||
)
|
||||
}
|
||||
|
||||
|
|
@ -668,9 +668,9 @@ class PublishableClip:
|
|||
""" Create parents and return it in list. """
|
||||
self.parents = []
|
||||
|
||||
patern = re.compile(self.parents_search_patern)
|
||||
pattern = re.compile(self.parents_search_pattern)
|
||||
|
||||
par_split = [(patern.findall(t).pop(), t)
|
||||
par_split = [(pattern.findall(t).pop(), t)
|
||||
for t in self.hierarchy.split("/")]
|
||||
|
||||
for type, template in par_split:
|
||||
|
|
@ -902,22 +902,22 @@ class OpenClipSolver(flib.MediaInfoFile):
|
|||
):
|
||||
return
|
||||
|
||||
formating_data = self._update_formating_data(
|
||||
formatting_data = self._update_formatting_data(
|
||||
layerName=layer_name,
|
||||
layerUID=layer_uid
|
||||
)
|
||||
name_obj.text = StringTemplate(
|
||||
self.layer_rename_template
|
||||
).format(formating_data)
|
||||
).format(formatting_data)
|
||||
|
||||
def _update_formating_data(self, **kwargs):
|
||||
""" Updating formating data for layer rename
|
||||
def _update_formatting_data(self, **kwargs):
|
||||
""" Updating formatting data for layer rename
|
||||
|
||||
Attributes:
|
||||
key=value (optional): will be included to formating data
|
||||
key=value (optional): will be included to formatting data
|
||||
as {key: value}
|
||||
Returns:
|
||||
dict: anatomy context data for formating
|
||||
dict: anatomy context data for formatting
|
||||
"""
|
||||
self.log.debug(">> self.clip_data: {}".format(self.clip_data))
|
||||
clip_name_obj = self.clip_data.find("name")
|
||||
|
|
|
|||
|
|
@ -203,7 +203,7 @@ class WireTapCom(object):
|
|||
list: all available volumes in server
|
||||
|
||||
Rises:
|
||||
AttributeError: unable to get any volumes childs from server
|
||||
AttributeError: unable to get any volumes children from server
|
||||
"""
|
||||
root = WireTapNodeHandle(self._server, "/volumes")
|
||||
children_num = WireTapInt(0)
|
||||
|
|
|
|||
|
|
@ -108,7 +108,7 @@ def _sync_utility_scripts(env=None):
|
|||
shutil.copy2(src, dst)
|
||||
except (PermissionError, FileExistsError) as msg:
|
||||
log.warning(
|
||||
"Not able to coppy to: `{}`, Problem with: `{}`".format(
|
||||
"Not able to copy to: `{}`, Problem with: `{}`".format(
|
||||
dst,
|
||||
msg
|
||||
)
|
||||
|
|
|
|||
|
|
@ -153,7 +153,7 @@ class FlamePrelaunch(PreLaunchHook):
|
|||
def _add_pythonpath(self):
|
||||
pythonpath = self.launch_context.env.get("PYTHONPATH")
|
||||
|
||||
# separate it explicity by `;` that is what we use in settings
|
||||
# separate it explicitly by `;` that is what we use in settings
|
||||
new_pythonpath = self.flame_pythonpath.split(os.pathsep)
|
||||
new_pythonpath += pythonpath.split(os.pathsep)
|
||||
|
||||
|
|
|
|||
|
|
@ -209,7 +209,7 @@ class CreateShotClip(opfapi.Creator):
|
|||
"type": "QComboBox",
|
||||
"label": "Subset Name",
|
||||
"target": "ui",
|
||||
"toolTip": "chose subset name patern, if [ track name ] is selected, name of track layer will be used", # noqa
|
||||
"toolTip": "chose subset name pattern, if [ track name ] is selected, name of track layer will be used", # noqa
|
||||
"order": 0},
|
||||
"subsetFamily": {
|
||||
"value": ["plate", "take"],
|
||||
|
|
|
|||
|
|
@ -61,9 +61,9 @@ class LoadClip(opfapi.ClipLoader):
|
|||
self.layer_rename_template = self.layer_rename_template.replace(
|
||||
"output", "representation")
|
||||
|
||||
formating_data = deepcopy(context["representation"]["context"])
|
||||
formatting_data = deepcopy(context["representation"]["context"])
|
||||
clip_name = StringTemplate(self.clip_name_template).format(
|
||||
formating_data)
|
||||
formatting_data)
|
||||
|
||||
# convert colorspace with ocio to flame mapping
|
||||
# in imageio flame section
|
||||
|
|
@ -88,7 +88,7 @@ class LoadClip(opfapi.ClipLoader):
|
|||
"version": "v{:0>3}".format(version_name),
|
||||
"layer_rename_template": self.layer_rename_template,
|
||||
"layer_rename_patterns": self.layer_rename_patterns,
|
||||
"context_data": formating_data
|
||||
"context_data": formatting_data
|
||||
}
|
||||
self.log.debug(pformat(
|
||||
loading_context
|
||||
|
|
|
|||
|
|
@ -58,11 +58,11 @@ class LoadClipBatch(opfapi.ClipLoader):
|
|||
self.layer_rename_template = self.layer_rename_template.replace(
|
||||
"output", "representation")
|
||||
|
||||
formating_data = deepcopy(context["representation"]["context"])
|
||||
formating_data["batch"] = self.batch.name.get_value()
|
||||
formatting_data = deepcopy(context["representation"]["context"])
|
||||
formatting_data["batch"] = self.batch.name.get_value()
|
||||
|
||||
clip_name = StringTemplate(self.clip_name_template).format(
|
||||
formating_data)
|
||||
formatting_data)
|
||||
|
||||
# convert colorspace with ocio to flame mapping
|
||||
# in imageio flame section
|
||||
|
|
@ -88,7 +88,7 @@ class LoadClipBatch(opfapi.ClipLoader):
|
|||
"version": "v{:0>3}".format(version_name),
|
||||
"layer_rename_template": self.layer_rename_template,
|
||||
"layer_rename_patterns": self.layer_rename_patterns,
|
||||
"context_data": formating_data
|
||||
"context_data": formatting_data
|
||||
}
|
||||
self.log.debug(pformat(
|
||||
loading_context
|
||||
|
|
|
|||
|
|
@ -203,7 +203,7 @@ class CollectTimelineInstances(pyblish.api.ContextPlugin):
|
|||
self._get_xml_preset_attrs(
|
||||
attributes, split)
|
||||
|
||||
# add xml overides resolution to instance data
|
||||
# add xml overrides resolution to instance data
|
||||
xml_overrides = attributes["xml_overrides"]
|
||||
if xml_overrides.get("width"):
|
||||
attributes.update({
|
||||
|
|
@ -284,7 +284,7 @@ class CollectTimelineInstances(pyblish.api.ContextPlugin):
|
|||
self.log.debug("__ head: `{}`".format(head))
|
||||
self.log.debug("__ tail: `{}`".format(tail))
|
||||
|
||||
# HACK: it is here to serve for versions bellow 2021.1
|
||||
# HACK: it is here to serve for versions below 2021.1
|
||||
if not any([head, tail]):
|
||||
retimed_attributes = get_media_range_with_retimes(
|
||||
otio_clip, handle_start, handle_end)
|
||||
|
|
|
|||
|
|
@ -227,7 +227,7 @@ class ExtractSubsetResources(publish.Extractor):
|
|||
self.hide_others(
|
||||
exporting_clip, segment_name, s_track_name)
|
||||
|
||||
# change name patern
|
||||
# change name pattern
|
||||
name_patern_xml = (
|
||||
"<segment name>_<shot name>_{}.").format(
|
||||
unique_name)
|
||||
|
|
@ -358,7 +358,7 @@ class ExtractSubsetResources(publish.Extractor):
|
|||
representation_data["stagingDir"] = n_stage_dir
|
||||
files = n_files
|
||||
|
||||
# add files to represetation but add
|
||||
# add files to representation but add
|
||||
# imagesequence as list
|
||||
if (
|
||||
# first check if path in files is not mov extension
|
||||
|
|
|
|||
|
|
@ -50,7 +50,7 @@ class IntegrateBatchGroup(pyblish.api.InstancePlugin):
|
|||
self._load_clip_to_context(instance, bgroup)
|
||||
|
||||
def _add_nodes_to_batch_with_links(self, instance, task_data, batch_group):
|
||||
# get write file node properties > OrederDict because order does mater
|
||||
# get write file node properties > OrederDict because order does matter
|
||||
write_pref_data = self._get_write_prefs(instance, task_data)
|
||||
|
||||
batch_nodes = [
|
||||
|
|
|
|||
|
|
@ -6,12 +6,13 @@ from openpype.pipeline.publish import get_errored_instances_from_context
|
|||
|
||||
|
||||
class SelectInvalidAction(pyblish.api.Action):
|
||||
"""Select invalid nodes in Maya when plug-in failed.
|
||||
"""Select invalid nodes in Fusion when plug-in failed.
|
||||
|
||||
To retrieve the invalid nodes this assumes a static `get_invalid()`
|
||||
method is available on the plugin.
|
||||
|
||||
"""
|
||||
|
||||
label = "Select invalid"
|
||||
on = "failed" # This action is only available on a failed plug-in
|
||||
icon = "search" # Icon from Awesome Icon
|
||||
|
|
@ -31,8 +32,10 @@ class SelectInvalidAction(pyblish.api.Action):
|
|||
if isinstance(invalid_nodes, (list, tuple)):
|
||||
invalid.extend(invalid_nodes)
|
||||
else:
|
||||
self.log.warning("Plug-in returned to be invalid, "
|
||||
"but has no selectable nodes.")
|
||||
self.log.warning(
|
||||
"Plug-in returned to be invalid, "
|
||||
"but has no selectable nodes."
|
||||
)
|
||||
|
||||
if not invalid:
|
||||
# Assume relevant comp is current comp and clear selection
|
||||
|
|
@ -51,4 +54,6 @@ class SelectInvalidAction(pyblish.api.Action):
|
|||
for tool in invalid:
|
||||
flow.Select(tool, True)
|
||||
names.add(tool.Name)
|
||||
self.log.info("Selecting invalid tools: %s" % ", ".join(sorted(names)))
|
||||
self.log.info(
|
||||
"Selecting invalid tools: %s" % ", ".join(sorted(names))
|
||||
)
|
||||
|
|
|
|||
|
|
@ -6,7 +6,6 @@ from openpype.tools.utils import host_tools
|
|||
from openpype.style import load_stylesheet
|
||||
from openpype.lib import register_event_callback
|
||||
from openpype.hosts.fusion.scripts import (
|
||||
set_rendermode,
|
||||
duplicate_with_inputs,
|
||||
)
|
||||
from openpype.hosts.fusion.api.lib import (
|
||||
|
|
@ -60,7 +59,6 @@ class OpenPypeMenu(QtWidgets.QWidget):
|
|||
publish_btn = QtWidgets.QPushButton("Publish...", self)
|
||||
manager_btn = QtWidgets.QPushButton("Manage...", self)
|
||||
libload_btn = QtWidgets.QPushButton("Library...", self)
|
||||
rendermode_btn = QtWidgets.QPushButton("Set render mode...", self)
|
||||
set_framerange_btn = QtWidgets.QPushButton("Set Frame Range", self)
|
||||
set_resolution_btn = QtWidgets.QPushButton("Set Resolution", self)
|
||||
duplicate_with_inputs_btn = QtWidgets.QPushButton(
|
||||
|
|
@ -91,7 +89,6 @@ class OpenPypeMenu(QtWidgets.QWidget):
|
|||
|
||||
layout.addWidget(set_framerange_btn)
|
||||
layout.addWidget(set_resolution_btn)
|
||||
layout.addWidget(rendermode_btn)
|
||||
|
||||
layout.addSpacing(20)
|
||||
|
||||
|
|
@ -108,7 +105,6 @@ class OpenPypeMenu(QtWidgets.QWidget):
|
|||
load_btn.clicked.connect(self.on_load_clicked)
|
||||
manager_btn.clicked.connect(self.on_manager_clicked)
|
||||
libload_btn.clicked.connect(self.on_libload_clicked)
|
||||
rendermode_btn.clicked.connect(self.on_rendermode_clicked)
|
||||
duplicate_with_inputs_btn.clicked.connect(
|
||||
self.on_duplicate_with_inputs_clicked
|
||||
)
|
||||
|
|
@ -162,15 +158,6 @@ class OpenPypeMenu(QtWidgets.QWidget):
|
|||
def on_libload_clicked(self):
|
||||
host_tools.show_library_loader()
|
||||
|
||||
def on_rendermode_clicked(self):
|
||||
if self.render_mode_widget is None:
|
||||
window = set_rendermode.SetRenderMode()
|
||||
window.setStyleSheet(load_stylesheet())
|
||||
window.show()
|
||||
self.render_mode_widget = window
|
||||
else:
|
||||
self.render_mode_widget.show()
|
||||
|
||||
def on_duplicate_with_inputs_clicked(self):
|
||||
duplicate_with_inputs.duplicate_with_input_connections()
|
||||
|
||||
|
|
|
|||
|
|
@ -4,29 +4,34 @@ import qtawesome
|
|||
|
||||
from openpype.hosts.fusion.api import (
|
||||
get_current_comp,
|
||||
comp_lock_and_undo_chunk
|
||||
comp_lock_and_undo_chunk,
|
||||
)
|
||||
|
||||
from openpype.lib import BoolDef
|
||||
from openpype.lib import (
|
||||
BoolDef,
|
||||
EnumDef,
|
||||
)
|
||||
from openpype.pipeline import (
|
||||
legacy_io,
|
||||
Creator,
|
||||
CreatedInstance
|
||||
CreatedInstance,
|
||||
)
|
||||
from openpype.client import (
|
||||
get_asset_by_name,
|
||||
)
|
||||
from openpype.client import get_asset_by_name
|
||||
|
||||
|
||||
class CreateSaver(Creator):
|
||||
identifier = "io.openpype.creators.fusion.saver"
|
||||
name = "saver"
|
||||
label = "Saver"
|
||||
label = "Render (saver)"
|
||||
name = "render"
|
||||
family = "render"
|
||||
default_variants = ["Main"]
|
||||
|
||||
default_variants = ["Main", "Mask"]
|
||||
description = "Fusion Saver to generate image sequence"
|
||||
|
||||
def create(self, subset_name, instance_data, pre_create_data):
|
||||
instance_attributes = ["reviewable"]
|
||||
|
||||
def create(self, subset_name, instance_data, pre_create_data):
|
||||
# TODO: Add pre_create attributes to choose file format?
|
||||
file_format = "OpenEXRFormat"
|
||||
|
||||
|
|
@ -58,7 +63,8 @@ class CreateSaver(Creator):
|
|||
family=self.family,
|
||||
subset_name=subset_name,
|
||||
data=instance_data,
|
||||
creator=self)
|
||||
creator=self,
|
||||
)
|
||||
|
||||
# Insert the transient data
|
||||
instance.transient_data["tool"] = saver
|
||||
|
|
@ -68,11 +74,9 @@ class CreateSaver(Creator):
|
|||
return instance
|
||||
|
||||
def collect_instances(self):
|
||||
|
||||
comp = get_current_comp()
|
||||
tools = comp.GetToolList(False, "Saver").values()
|
||||
for tool in tools:
|
||||
|
||||
data = self.get_managed_tool_data(tool)
|
||||
if not data:
|
||||
data = self._collect_unmanaged_saver(tool)
|
||||
|
|
@ -90,7 +94,6 @@ class CreateSaver(Creator):
|
|||
|
||||
def update_instances(self, update_list):
|
||||
for created_inst, _changes in update_list:
|
||||
|
||||
new_data = created_inst.data_to_store()
|
||||
tool = created_inst.transient_data["tool"]
|
||||
self._update_tool_with_data(tool, new_data)
|
||||
|
|
@ -139,7 +142,6 @@ class CreateSaver(Creator):
|
|||
tool.SetAttrs({"TOOLS_Name": subset})
|
||||
|
||||
def _collect_unmanaged_saver(self, tool):
|
||||
|
||||
# TODO: this should not be done this way - this should actually
|
||||
# get the data as stored on the tool explicitly (however)
|
||||
# that would disallow any 'regular saver' to be collected
|
||||
|
|
@ -153,8 +155,7 @@ class CreateSaver(Creator):
|
|||
asset = legacy_io.Session["AVALON_ASSET"]
|
||||
task = legacy_io.Session["AVALON_TASK"]
|
||||
|
||||
asset_doc = get_asset_by_name(project_name=project,
|
||||
asset_name=asset)
|
||||
asset_doc = get_asset_by_name(project_name=project, asset_name=asset)
|
||||
|
||||
path = tool["Clip"][comp.TIME_UNDEFINED]
|
||||
fname = os.path.basename(path)
|
||||
|
|
@ -178,21 +179,20 @@ class CreateSaver(Creator):
|
|||
"variant": variant,
|
||||
"active": not passthrough,
|
||||
"family": self.family,
|
||||
|
||||
# Unique identifier for instance and this creator
|
||||
"id": "pyblish.avalon.instance",
|
||||
"creator_identifier": self.identifier
|
||||
"creator_identifier": self.identifier,
|
||||
}
|
||||
|
||||
def get_managed_tool_data(self, tool):
|
||||
"""Return data of the tool if it matches creator identifier"""
|
||||
data = tool.GetData('openpype')
|
||||
data = tool.GetData("openpype")
|
||||
if not isinstance(data, dict):
|
||||
return
|
||||
|
||||
required = {
|
||||
"id": "pyblish.avalon.instance",
|
||||
"creator_identifier": self.identifier
|
||||
"creator_identifier": self.identifier,
|
||||
}
|
||||
for key, value in required.items():
|
||||
if key not in data or data[key] != value:
|
||||
|
|
@ -205,11 +205,40 @@ class CreateSaver(Creator):
|
|||
|
||||
return data
|
||||
|
||||
def get_instance_attr_defs(self):
|
||||
return [
|
||||
BoolDef(
|
||||
"review",
|
||||
default=True,
|
||||
label="Review"
|
||||
)
|
||||
def get_pre_create_attr_defs(self):
|
||||
"""Settings for create page"""
|
||||
attr_defs = [
|
||||
self._get_render_target_enum(),
|
||||
self._get_reviewable_bool(),
|
||||
]
|
||||
return attr_defs
|
||||
|
||||
def get_instance_attr_defs(self):
|
||||
"""Settings for publish page"""
|
||||
attr_defs = [
|
||||
self._get_render_target_enum(),
|
||||
self._get_reviewable_bool(),
|
||||
]
|
||||
return attr_defs
|
||||
|
||||
# These functions below should be moved to another file
|
||||
# so it can be used by other plugins. plugin.py ?
|
||||
|
||||
def _get_render_target_enum(self):
|
||||
rendering_targets = {
|
||||
"local": "Local machine rendering",
|
||||
"frames": "Use existing frames",
|
||||
}
|
||||
if "farm_rendering" in self.instance_attributes:
|
||||
rendering_targets["farm"] = "Farm rendering"
|
||||
|
||||
return EnumDef(
|
||||
"render_target", items=rendering_targets, label="Render target"
|
||||
)
|
||||
|
||||
def _get_reviewable_bool(self):
|
||||
return BoolDef(
|
||||
"review",
|
||||
default=("reviewable" in self.instance_attributes),
|
||||
label="Review",
|
||||
)
|
||||
|
|
|
|||
|
|
@ -72,8 +72,7 @@ class FusionSetFrameRangeWithHandlesLoader(load.LoaderPlugin):
|
|||
return
|
||||
|
||||
# Include handles
|
||||
handles = version_data.get("handles", 0)
|
||||
start -= handles
|
||||
end += handles
|
||||
start -= version_data.get("handleStart", 0)
|
||||
end += version_data.get("handleEnd", 0)
|
||||
|
||||
lib.update_frame_range(start, end)
|
||||
|
|
|
|||
|
|
@ -0,0 +1,50 @@
|
|||
import pyblish.api
|
||||
from openpype.pipeline import publish
|
||||
import os
|
||||
|
||||
|
||||
class CollectFusionExpectedFrames(
|
||||
pyblish.api.InstancePlugin, publish.ColormanagedPyblishPluginMixin
|
||||
):
|
||||
"""Collect all frames needed to publish expected frames"""
|
||||
|
||||
order = pyblish.api.CollectorOrder + 0.5
|
||||
label = "Collect Expected Frames"
|
||||
hosts = ["fusion"]
|
||||
families = ["render"]
|
||||
|
||||
def process(self, instance):
|
||||
context = instance.context
|
||||
|
||||
frame_start = context.data["frameStartHandle"]
|
||||
frame_end = context.data["frameEndHandle"]
|
||||
path = instance.data["path"]
|
||||
output_dir = instance.data["outputDir"]
|
||||
|
||||
basename = os.path.basename(path)
|
||||
head, ext = os.path.splitext(basename)
|
||||
files = [
|
||||
f"{head}{str(frame).zfill(4)}{ext}"
|
||||
for frame in range(frame_start, frame_end + 1)
|
||||
]
|
||||
repre = {
|
||||
"name": ext[1:],
|
||||
"ext": ext[1:],
|
||||
"frameStart": f"%0{len(str(frame_end))}d" % frame_start,
|
||||
"files": files,
|
||||
"stagingDir": output_dir,
|
||||
}
|
||||
|
||||
self.set_representation_colorspace(
|
||||
representation=repre,
|
||||
context=context,
|
||||
)
|
||||
|
||||
# review representation
|
||||
if instance.data.get("review", False):
|
||||
repre["tags"] = ["review"]
|
||||
|
||||
# add the repre to the instance
|
||||
if "representations" not in instance.data:
|
||||
instance.data["representations"] = []
|
||||
instance.data["representations"].append(repre)
|
||||
|
|
@ -1,44 +0,0 @@
|
|||
import pyblish.api
|
||||
|
||||
|
||||
class CollectFusionRenderMode(pyblish.api.InstancePlugin):
|
||||
"""Collect current comp's render Mode
|
||||
|
||||
Options:
|
||||
local
|
||||
farm
|
||||
|
||||
Note that this value is set for each comp separately. When you save the
|
||||
comp this information will be stored in that file. If for some reason the
|
||||
available tool does not visualize which render mode is set for the
|
||||
current comp, please run the following line in the console (Py2)
|
||||
|
||||
comp.GetData("openpype.rendermode")
|
||||
|
||||
This will return the name of the current render mode as seen above under
|
||||
Options.
|
||||
|
||||
"""
|
||||
|
||||
order = pyblish.api.CollectorOrder + 0.4
|
||||
label = "Collect Render Mode"
|
||||
hosts = ["fusion"]
|
||||
families = ["render"]
|
||||
|
||||
def process(self, instance):
|
||||
"""Collect all image sequence tools"""
|
||||
options = ["local", "farm"]
|
||||
|
||||
comp = instance.context.data.get("currentComp")
|
||||
if not comp:
|
||||
raise RuntimeError("No comp previously collected, unable to "
|
||||
"retrieve Fusion version.")
|
||||
|
||||
rendermode = comp.GetData("openpype.rendermode") or "local"
|
||||
assert rendermode in options, "Must be supported render mode"
|
||||
|
||||
self.log.info("Render mode: {0}".format(rendermode))
|
||||
|
||||
# Append family
|
||||
family = "render.{0}".format(rendermode)
|
||||
instance.data["families"].append(family)
|
||||
25
openpype/hosts/fusion/plugins/publish/collect_renders.py
Normal file
25
openpype/hosts/fusion/plugins/publish/collect_renders.py
Normal file
|
|
@ -0,0 +1,25 @@
|
|||
import pyblish.api
|
||||
|
||||
|
||||
class CollectFusionRenders(pyblish.api.InstancePlugin):
|
||||
"""Collect current saver node's render Mode
|
||||
|
||||
Options:
|
||||
local (Render locally)
|
||||
frames (Use existing frames)
|
||||
|
||||
"""
|
||||
|
||||
order = pyblish.api.CollectorOrder + 0.4
|
||||
label = "Collect Renders"
|
||||
hosts = ["fusion"]
|
||||
families = ["render"]
|
||||
|
||||
def process(self, instance):
|
||||
render_target = instance.data["render_target"]
|
||||
family = instance.data["family"]
|
||||
|
||||
# add targeted family to families
|
||||
instance.data["families"].append(
|
||||
"{}.{}".format(family, render_target)
|
||||
)
|
||||
109
openpype/hosts/fusion/plugins/publish/extract_render_local.py
Normal file
109
openpype/hosts/fusion/plugins/publish/extract_render_local.py
Normal file
|
|
@ -0,0 +1,109 @@
|
|||
import logging
|
||||
import contextlib
|
||||
import pyblish.api
|
||||
from openpype.hosts.fusion.api import comp_lock_and_undo_chunk
|
||||
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@contextlib.contextmanager
|
||||
def enabled_savers(comp, savers):
|
||||
"""Enable only the `savers` in Comp during the context.
|
||||
|
||||
Any Saver tool in the passed composition that is not in the savers list
|
||||
will be set to passthrough during the context.
|
||||
|
||||
Args:
|
||||
comp (object): Fusion composition object.
|
||||
savers (list): List of Saver tool objects.
|
||||
|
||||
"""
|
||||
passthrough_key = "TOOLB_PassThrough"
|
||||
original_states = {}
|
||||
enabled_save_names = {saver.Name for saver in savers}
|
||||
try:
|
||||
all_savers = comp.GetToolList(False, "Saver").values()
|
||||
for saver in all_savers:
|
||||
original_state = saver.GetAttrs()[passthrough_key]
|
||||
original_states[saver] = original_state
|
||||
|
||||
# The passthrough state we want to set (passthrough != enabled)
|
||||
state = saver.Name not in enabled_save_names
|
||||
if state != original_state:
|
||||
saver.SetAttrs({passthrough_key: state})
|
||||
yield
|
||||
finally:
|
||||
for saver, original_state in original_states.items():
|
||||
saver.SetAttrs({"TOOLB_PassThrough": original_state})
|
||||
|
||||
|
||||
class FusionRenderLocal(pyblish.api.InstancePlugin):
|
||||
"""Render the current Fusion composition locally."""
|
||||
|
||||
order = pyblish.api.ExtractorOrder - 0.2
|
||||
label = "Render Local"
|
||||
hosts = ["fusion"]
|
||||
families = ["render.local"]
|
||||
|
||||
def process(self, instance):
|
||||
context = instance.context
|
||||
|
||||
# Start render
|
||||
self.render_once(context)
|
||||
|
||||
# Log render status
|
||||
self.log.info(
|
||||
"Rendered '{nm}' for asset '{ast}' under the task '{tsk}'".format(
|
||||
nm=instance.data["name"],
|
||||
ast=instance.data["asset"],
|
||||
tsk=instance.data["task"],
|
||||
)
|
||||
)
|
||||
|
||||
def render_once(self, context):
|
||||
"""Render context comp only once, even with more render instances"""
|
||||
|
||||
# This plug-in assumes all render nodes get rendered at the same time
|
||||
# to speed up the rendering. The check below makes sure that we only
|
||||
# execute the rendering once and not for each instance.
|
||||
key = f"__hasRun{self.__class__.__name__}"
|
||||
|
||||
savers_to_render = [
|
||||
# Get the saver tool from the instance
|
||||
instance[0] for instance in context if
|
||||
# Only active instances
|
||||
instance.data.get("publish", True) and
|
||||
# Only render.local instances
|
||||
"render.local" in instance.data["families"]
|
||||
]
|
||||
|
||||
if key not in context.data:
|
||||
# We initialize as false to indicate it wasn't successful yet
|
||||
# so we can keep track of whether Fusion succeeded
|
||||
context.data[key] = False
|
||||
|
||||
current_comp = context.data["currentComp"]
|
||||
frame_start = context.data["frameStartHandle"]
|
||||
frame_end = context.data["frameEndHandle"]
|
||||
|
||||
self.log.info("Starting Fusion render")
|
||||
self.log.info(f"Start frame: {frame_start}")
|
||||
self.log.info(f"End frame: {frame_end}")
|
||||
saver_names = ", ".join(saver.Name for saver in savers_to_render)
|
||||
self.log.info(f"Rendering tools: {saver_names}")
|
||||
|
||||
with comp_lock_and_undo_chunk(current_comp):
|
||||
with enabled_savers(current_comp, savers_to_render):
|
||||
result = current_comp.Render(
|
||||
{
|
||||
"Start": frame_start,
|
||||
"End": frame_end,
|
||||
"Wait": True,
|
||||
}
|
||||
)
|
||||
|
||||
context.data[key] = bool(result)
|
||||
|
||||
if context.data[key] is False:
|
||||
raise RuntimeError("Comp render failed")
|
||||
|
|
@ -1,100 +0,0 @@
|
|||
import os
|
||||
import pyblish.api
|
||||
from openpype.pipeline import publish
|
||||
from openpype.hosts.fusion.api import comp_lock_and_undo_chunk
|
||||
|
||||
|
||||
class Fusionlocal(pyblish.api.InstancePlugin,
|
||||
publish.ColormanagedPyblishPluginMixin):
|
||||
"""Render the current Fusion composition locally.
|
||||
|
||||
Extract the result of savers by starting a comp render
|
||||
This will run the local render of Fusion.
|
||||
|
||||
"""
|
||||
|
||||
order = pyblish.api.ExtractorOrder - 0.1
|
||||
label = "Render Local"
|
||||
hosts = ["fusion"]
|
||||
families = ["render.local"]
|
||||
|
||||
def process(self, instance):
|
||||
context = instance.context
|
||||
|
||||
# Start render
|
||||
self.render_once(context)
|
||||
|
||||
# Log render status
|
||||
self.log.info(
|
||||
"Rendered '{nm}' for asset '{ast}' under the task '{tsk}'".format(
|
||||
nm=instance.data["name"],
|
||||
ast=instance.data["asset"],
|
||||
tsk=instance.data["task"],
|
||||
)
|
||||
)
|
||||
|
||||
frame_start = context.data["frameStartHandle"]
|
||||
frame_end = context.data["frameEndHandle"]
|
||||
path = instance.data["path"]
|
||||
output_dir = instance.data["outputDir"]
|
||||
|
||||
basename = os.path.basename(path)
|
||||
head, ext = os.path.splitext(basename)
|
||||
files = [
|
||||
f"{head}{str(frame).zfill(4)}{ext}"
|
||||
for frame in range(frame_start, frame_end + 1)
|
||||
]
|
||||
repre = {
|
||||
"name": ext[1:],
|
||||
"ext": ext[1:],
|
||||
"frameStart": f"%0{len(str(frame_end))}d" % frame_start,
|
||||
"files": files,
|
||||
"stagingDir": output_dir,
|
||||
}
|
||||
|
||||
self.set_representation_colorspace(
|
||||
representation=repre,
|
||||
context=context,
|
||||
)
|
||||
|
||||
if "representations" not in instance.data:
|
||||
instance.data["representations"] = []
|
||||
instance.data["representations"].append(repre)
|
||||
|
||||
# review representation
|
||||
if instance.data.get("review", False):
|
||||
repre["tags"] = ["review", "ftrackreview"]
|
||||
|
||||
def render_once(self, context):
|
||||
"""Render context comp only once, even with more render instances"""
|
||||
|
||||
# This plug-in assumes all render nodes get rendered at the same time
|
||||
# to speed up the rendering. The check below makes sure that we only
|
||||
# execute the rendering once and not for each instance.
|
||||
key = f"__hasRun{self.__class__.__name__}"
|
||||
if key not in context.data:
|
||||
# We initialize as false to indicate it wasn't successful yet
|
||||
# so we can keep track of whether Fusion succeeded
|
||||
context.data[key] = False
|
||||
|
||||
current_comp = context.data["currentComp"]
|
||||
frame_start = context.data["frameStartHandle"]
|
||||
frame_end = context.data["frameEndHandle"]
|
||||
|
||||
self.log.info("Starting Fusion render")
|
||||
self.log.info(f"Start frame: {frame_start}")
|
||||
self.log.info(f"End frame: {frame_end}")
|
||||
|
||||
with comp_lock_and_undo_chunk(current_comp):
|
||||
result = current_comp.Render(
|
||||
{
|
||||
"Start": frame_start,
|
||||
"End": frame_end,
|
||||
"Wait": True,
|
||||
}
|
||||
)
|
||||
|
||||
context.data[key] = bool(result)
|
||||
|
||||
if context.data[key] is False:
|
||||
raise RuntimeError("Comp render failed")
|
||||
|
|
@ -14,22 +14,19 @@ class ValidateCreateFolderChecked(pyblish.api.InstancePlugin):
|
|||
"""
|
||||
|
||||
order = pyblish.api.ValidatorOrder
|
||||
actions = [RepairAction]
|
||||
label = "Validate Create Folder Checked"
|
||||
families = ["render"]
|
||||
hosts = ["fusion"]
|
||||
actions = [SelectInvalidAction]
|
||||
actions = [RepairAction, SelectInvalidAction]
|
||||
|
||||
@classmethod
|
||||
def get_invalid(cls, instance):
|
||||
active = instance.data.get("active", instance.data.get("publish"))
|
||||
if not active:
|
||||
return []
|
||||
|
||||
tool = instance[0]
|
||||
create_dir = tool.GetInput("CreateDir")
|
||||
if create_dir == 0.0:
|
||||
cls.log.error("%s has Create Folder turned off" % instance[0].Name)
|
||||
cls.log.error(
|
||||
"%s has Create Folder turned off" % instance[0].Name
|
||||
)
|
||||
return [tool]
|
||||
|
||||
def process(self, instance):
|
||||
|
|
@ -37,7 +34,8 @@ class ValidateCreateFolderChecked(pyblish.api.InstancePlugin):
|
|||
if invalid:
|
||||
raise PublishValidationError(
|
||||
"Found Saver with Create Folder During Render checked off",
|
||||
title=self.label)
|
||||
title=self.label,
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def repair(cls, instance):
|
||||
|
|
|
|||
|
|
@ -0,0 +1,78 @@
|
|||
import os
|
||||
import pyblish.api
|
||||
|
||||
from openpype.pipeline.publish import RepairAction
|
||||
from openpype.pipeline import PublishValidationError
|
||||
|
||||
from openpype.hosts.fusion.api.action import SelectInvalidAction
|
||||
|
||||
|
||||
class ValidateLocalFramesExistence(pyblish.api.InstancePlugin):
|
||||
"""Checks if files for savers that's set
|
||||
to publish expected frames exists
|
||||
"""
|
||||
|
||||
order = pyblish.api.ValidatorOrder
|
||||
label = "Validate Expected Frames Exists"
|
||||
families = ["render"]
|
||||
hosts = ["fusion"]
|
||||
actions = [RepairAction, SelectInvalidAction]
|
||||
|
||||
@classmethod
|
||||
def get_invalid(cls, instance, non_existing_frames=None):
|
||||
if non_existing_frames is None:
|
||||
non_existing_frames = []
|
||||
|
||||
if instance.data.get("render_target") == "frames":
|
||||
tool = instance[0]
|
||||
|
||||
frame_start = instance.data["frameStart"]
|
||||
frame_end = instance.data["frameEnd"]
|
||||
path = instance.data["path"]
|
||||
output_dir = instance.data["outputDir"]
|
||||
|
||||
basename = os.path.basename(path)
|
||||
head, ext = os.path.splitext(basename)
|
||||
files = [
|
||||
f"{head}{str(frame).zfill(4)}{ext}"
|
||||
for frame in range(frame_start, frame_end + 1)
|
||||
]
|
||||
|
||||
for file in files:
|
||||
if not os.path.exists(os.path.join(output_dir, file)):
|
||||
cls.log.error(
|
||||
f"Missing file: {os.path.join(output_dir, file)}"
|
||||
)
|
||||
non_existing_frames.append(file)
|
||||
|
||||
if len(non_existing_frames) > 0:
|
||||
cls.log.error(f"Some of {tool.Name}'s files does not exist")
|
||||
return [tool]
|
||||
|
||||
def process(self, instance):
|
||||
non_existing_frames = []
|
||||
invalid = self.get_invalid(instance, non_existing_frames)
|
||||
if invalid:
|
||||
raise PublishValidationError(
|
||||
"{} is set to publish existing frames but "
|
||||
"some frames are missing. "
|
||||
"The missing file(s) are:\n\n{}".format(
|
||||
invalid[0].Name,
|
||||
"\n\n".join(non_existing_frames),
|
||||
),
|
||||
title=self.label,
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def repair(cls, instance):
|
||||
invalid = cls.get_invalid(instance)
|
||||
if invalid:
|
||||
tool = invalid[0]
|
||||
|
||||
# Change render target to local to render locally
|
||||
tool.SetData("openpype.creator_attributes.render_target", "local")
|
||||
|
||||
cls.log.info(
|
||||
f"Reload the publisher and {tool.Name} "
|
||||
"will be set to render locally"
|
||||
)
|
||||
|
|
@ -1,112 +0,0 @@
|
|||
from qtpy import QtWidgets
|
||||
import qtawesome
|
||||
from openpype.hosts.fusion.api import get_current_comp
|
||||
|
||||
|
||||
_help = {"local": "Render the comp on your own machine and publish "
|
||||
"it from that the destination folder",
|
||||
"farm": "Submit a Fusion render job to a Render farm to use all other"
|
||||
" computers and add a publish job"}
|
||||
|
||||
|
||||
class SetRenderMode(QtWidgets.QWidget):
|
||||
|
||||
def __init__(self, parent=None):
|
||||
QtWidgets.QWidget.__init__(self, parent)
|
||||
|
||||
self._comp = get_current_comp()
|
||||
self._comp_name = self._get_comp_name()
|
||||
|
||||
self.setWindowTitle("Set Render Mode")
|
||||
self.setFixedSize(300, 175)
|
||||
|
||||
layout = QtWidgets.QVBoxLayout()
|
||||
|
||||
# region comp info
|
||||
comp_info_layout = QtWidgets.QHBoxLayout()
|
||||
|
||||
update_btn = QtWidgets.QPushButton(qtawesome.icon("fa.refresh",
|
||||
color="white"), "")
|
||||
update_btn.setFixedWidth(25)
|
||||
update_btn.setFixedHeight(25)
|
||||
|
||||
comp_information = QtWidgets.QLineEdit()
|
||||
comp_information.setEnabled(False)
|
||||
|
||||
comp_info_layout.addWidget(comp_information)
|
||||
comp_info_layout.addWidget(update_btn)
|
||||
# endregion comp info
|
||||
|
||||
# region modes
|
||||
mode_options = QtWidgets.QComboBox()
|
||||
mode_options.addItems(_help.keys())
|
||||
|
||||
mode_information = QtWidgets.QTextEdit()
|
||||
mode_information.setReadOnly(True)
|
||||
# endregion modes
|
||||
|
||||
accept_btn = QtWidgets.QPushButton("Accept")
|
||||
|
||||
layout.addLayout(comp_info_layout)
|
||||
layout.addWidget(mode_options)
|
||||
layout.addWidget(mode_information)
|
||||
layout.addWidget(accept_btn)
|
||||
|
||||
self.setLayout(layout)
|
||||
|
||||
self.comp_information = comp_information
|
||||
self.update_btn = update_btn
|
||||
|
||||
self.mode_options = mode_options
|
||||
self.mode_information = mode_information
|
||||
|
||||
self.accept_btn = accept_btn
|
||||
|
||||
self.connections()
|
||||
self.update()
|
||||
|
||||
# Force updated render mode help text
|
||||
self._update_rendermode_info()
|
||||
|
||||
def connections(self):
|
||||
"""Build connections between code and buttons"""
|
||||
|
||||
self.update_btn.clicked.connect(self.update)
|
||||
self.accept_btn.clicked.connect(self._set_comp_rendermode)
|
||||
self.mode_options.currentIndexChanged.connect(
|
||||
self._update_rendermode_info)
|
||||
|
||||
def update(self):
|
||||
"""Update all information in the UI"""
|
||||
|
||||
self._comp = get_current_comp()
|
||||
self._comp_name = self._get_comp_name()
|
||||
self.comp_information.setText(self._comp_name)
|
||||
|
||||
# Update current comp settings
|
||||
mode = self._get_comp_rendermode()
|
||||
index = self.mode_options.findText(mode)
|
||||
self.mode_options.setCurrentIndex(index)
|
||||
|
||||
def _update_rendermode_info(self):
|
||||
rendermode = self.mode_options.currentText()
|
||||
self.mode_information.setText(_help[rendermode])
|
||||
|
||||
def _get_comp_name(self):
|
||||
return self._comp.GetAttrs("COMPS_Name")
|
||||
|
||||
def _get_comp_rendermode(self):
|
||||
return self._comp.GetData("openpype.rendermode") or "local"
|
||||
|
||||
def _set_comp_rendermode(self):
|
||||
rendermode = self.mode_options.currentText()
|
||||
self._comp.SetData("openpype.rendermode", rendermode)
|
||||
|
||||
self._comp.Print("Updated render mode to '%s'\n" % rendermode)
|
||||
self.hide()
|
||||
|
||||
def _validation(self):
|
||||
ui_mode = self.mode_options.currentText()
|
||||
comp_mode = self._get_comp_rendermode()
|
||||
|
||||
return comp_mode == ui_mode
|
||||
|
|
@ -432,11 +432,11 @@ copy_files = """function copyFile(srcFilename, dstFilename)
|
|||
|
||||
import_files = """function %s_import_files()
|
||||
{
|
||||
var PNGTransparencyMode = 0; // Premultiplied wih Black
|
||||
var TGATransparencyMode = 0; // Premultiplied wih Black
|
||||
var SGITransparencyMode = 0; // Premultiplied wih Black
|
||||
var PNGTransparencyMode = 0; // Premultiplied with Black
|
||||
var TGATransparencyMode = 0; // Premultiplied with Black
|
||||
var SGITransparencyMode = 0; // Premultiplied with Black
|
||||
var LayeredPSDTransparencyMode = 1; // Straight
|
||||
var FlatPSDTransparencyMode = 2; // Premultiplied wih White
|
||||
var FlatPSDTransparencyMode = 2; // Premultiplied with White
|
||||
|
||||
function getUniqueColumnName( column_prefix )
|
||||
{
|
||||
|
|
|
|||
|
|
@ -142,10 +142,10 @@ function Client() {
|
|||
};
|
||||
|
||||
/**
|
||||
* Process recieved request. This will eval recieved function and produce
|
||||
* Process received request. This will eval received function and produce
|
||||
* results.
|
||||
* @function
|
||||
* @param {object} request - recieved request JSON
|
||||
* @param {object} request - received request JSON
|
||||
* @return {object} result of evaled function.
|
||||
*/
|
||||
self.processRequest = function(request) {
|
||||
|
|
@ -245,7 +245,7 @@ function Client() {
|
|||
var request = JSON.parse(to_parse);
|
||||
var mid = request.message_id;
|
||||
// self.logDebug('[' + mid + '] - Request: ' + '\n' + JSON.stringify(request));
|
||||
self.logDebug('[' + mid + '] Recieved.');
|
||||
self.logDebug('[' + mid + '] Received.');
|
||||
|
||||
request.result = self.processRequest(request);
|
||||
self.logDebug('[' + mid + '] Processing done.');
|
||||
|
|
@ -286,8 +286,8 @@ function Client() {
|
|||
/** Harmony 21.1 doesn't have QDataStream anymore.
|
||||
|
||||
This means we aren't able to write bytes into QByteArray so we had
|
||||
modify how content lenght is sent do the server.
|
||||
Content lenght is sent as string of 8 char convertible into integer
|
||||
modify how content length is sent do the server.
|
||||
Content length is sent as string of 8 char convertible into integer
|
||||
(instead of 0x00000001[4 bytes] > "000000001"[8 bytes]) */
|
||||
var codec_name = new QByteArray().append("UTF-8");
|
||||
|
||||
|
|
@ -476,6 +476,25 @@ function start() {
|
|||
action.triggered.connect(self.onSubsetManage);
|
||||
}
|
||||
|
||||
/**
|
||||
* Set scene settings from DB to the scene
|
||||
*/
|
||||
self.onSetSceneSettings = function() {
|
||||
app.avalonClient.send(
|
||||
{
|
||||
"module": "openpype.hosts.harmony.api",
|
||||
"method": "ensure_scene_settings",
|
||||
"args": []
|
||||
},
|
||||
false
|
||||
);
|
||||
};
|
||||
// add Set Scene Settings
|
||||
if (app.avalonMenu == null) {
|
||||
action = menu.addAction('Set Scene Settings...');
|
||||
action.triggered.connect(self.onSetSceneSettings);
|
||||
}
|
||||
|
||||
/**
|
||||
* Show Experimental dialog
|
||||
*/
|
||||
|
|
|
|||
|
|
@ -394,7 +394,7 @@ def get_scene_data():
|
|||
"function": "AvalonHarmony.getSceneData"
|
||||
})["result"]
|
||||
except json.decoder.JSONDecodeError:
|
||||
# Means no sceen metadata has been made before.
|
||||
# Means no scene metadata has been made before.
|
||||
return {}
|
||||
except KeyError:
|
||||
# Means no existing scene metadata has been made.
|
||||
|
|
@ -465,7 +465,7 @@ def imprint(node_id, data, remove=False):
|
|||
Example:
|
||||
>>> from openpype.hosts.harmony.api import lib
|
||||
>>> node = "Top/Display"
|
||||
>>> data = {"str": "someting", "int": 1, "float": 0.32, "bool": True}
|
||||
>>> data = {"str": "something", "int": 1, "float": 0.32, "bool": True}
|
||||
>>> lib.imprint(layer, data)
|
||||
"""
|
||||
scene_data = get_scene_data()
|
||||
|
|
@ -550,7 +550,7 @@ def save_scene():
|
|||
method prevents this double request and safely saves the scene.
|
||||
|
||||
"""
|
||||
# Need to turn off the backgound watcher else the communication with
|
||||
# Need to turn off the background watcher else the communication with
|
||||
# the server gets spammed with two requests at the same time.
|
||||
scene_path = send(
|
||||
{"function": "AvalonHarmony.saveScene"})["result"]
|
||||
|
|
|
|||
|
|
@ -142,7 +142,7 @@ def application_launch(event):
|
|||
harmony.send({"script": script})
|
||||
inject_avalon_js()
|
||||
|
||||
ensure_scene_settings()
|
||||
# ensure_scene_settings()
|
||||
check_inventory()
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -61,7 +61,7 @@ class Server(threading.Thread):
|
|||
"module": (str), # Module of method.
|
||||
"method" (str), # Name of method in module.
|
||||
"args" (list), # Arguments to pass to method.
|
||||
"kwargs" (dict), # Keywork arguments to pass to method.
|
||||
"kwargs" (dict), # Keyword arguments to pass to method.
|
||||
"reply" (bool), # Optional wait for method completion.
|
||||
}
|
||||
"""
|
||||
|
|
|
|||
|
|
@ -25,8 +25,9 @@ class ExtractRender(pyblish.api.InstancePlugin):
|
|||
application_path = instance.context.data.get("applicationPath")
|
||||
scene_path = instance.context.data.get("scenePath")
|
||||
frame_rate = instance.context.data.get("frameRate")
|
||||
frame_start = instance.context.data.get("frameStart")
|
||||
frame_end = instance.context.data.get("frameEnd")
|
||||
# real value from timeline
|
||||
frame_start = instance.context.data.get("frameStartHandle")
|
||||
frame_end = instance.context.data.get("frameEndHandle")
|
||||
audio_path = instance.context.data.get("audioPath")
|
||||
|
||||
if audio_path and os.path.exists(audio_path):
|
||||
|
|
@ -55,9 +56,13 @@ class ExtractRender(pyblish.api.InstancePlugin):
|
|||
|
||||
# Execute rendering. Ignoring error cause Harmony returns error code
|
||||
# always.
|
||||
self.log.info(f"running [ {application_path} -batch {scene_path}")
|
||||
|
||||
args = [application_path, "-batch",
|
||||
"-frames", str(frame_start), str(frame_end),
|
||||
"-scene", scene_path]
|
||||
self.log.info(f"running [ {application_path} {' '.join(args)}")
|
||||
proc = subprocess.Popen(
|
||||
[application_path, "-batch", scene_path],
|
||||
args,
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=subprocess.STDOUT,
|
||||
stdin=subprocess.PIPE
|
||||
|
|
|
|||
|
|
@ -60,7 +60,8 @@ class ValidateSceneSettings(pyblish.api.InstancePlugin):
|
|||
# which is available on 'context.data["assetEntity"]'
|
||||
# - the same approach can be used in 'ValidateSceneSettingsRepair'
|
||||
expected_settings = harmony.get_asset_settings()
|
||||
self.log.info("scene settings from DB:".format(expected_settings))
|
||||
self.log.info("scene settings from DB:{}".format(expected_settings))
|
||||
expected_settings.pop("entityType") # not useful for the validation
|
||||
|
||||
expected_settings = _update_frames(dict.copy(expected_settings))
|
||||
expected_settings["frameEndHandle"] = expected_settings["frameEnd"] +\
|
||||
|
|
@ -68,21 +69,32 @@ class ValidateSceneSettings(pyblish.api.InstancePlugin):
|
|||
|
||||
if (any(re.search(pattern, os.getenv('AVALON_TASK'))
|
||||
for pattern in self.skip_resolution_check)):
|
||||
self.log.info("Skipping resolution check because of "
|
||||
"task name and pattern {}".format(
|
||||
self.skip_resolution_check))
|
||||
expected_settings.pop("resolutionWidth")
|
||||
expected_settings.pop("resolutionHeight")
|
||||
|
||||
entity_type = expected_settings.get("entityType")
|
||||
if (any(re.search(pattern, entity_type)
|
||||
if (any(re.search(pattern, os.getenv('AVALON_TASK'))
|
||||
for pattern in self.skip_timelines_check)):
|
||||
self.log.info("Skipping frames check because of "
|
||||
"task name and pattern {}".format(
|
||||
self.skip_timelines_check))
|
||||
expected_settings.pop('frameStart', None)
|
||||
expected_settings.pop('frameEnd', None)
|
||||
|
||||
expected_settings.pop("entityType") # not useful after the check
|
||||
expected_settings.pop('frameStartHandle', None)
|
||||
expected_settings.pop('frameEndHandle', None)
|
||||
|
||||
asset_name = instance.context.data['anatomyData']['asset']
|
||||
if any(re.search(pattern, asset_name)
|
||||
for pattern in self.frame_check_filter):
|
||||
expected_settings.pop("frameEnd")
|
||||
self.log.info("Skipping frames check because of "
|
||||
"task name and pattern {}".format(
|
||||
self.frame_check_filter))
|
||||
expected_settings.pop('frameStart', None)
|
||||
expected_settings.pop('frameEnd', None)
|
||||
expected_settings.pop('frameStartHandle', None)
|
||||
expected_settings.pop('frameEndHandle', None)
|
||||
|
||||
# handle case where ftrack uses only two decimal places
|
||||
# 23.976023976023978 vs. 23.98
|
||||
|
|
@ -99,6 +111,7 @@ class ValidateSceneSettings(pyblish.api.InstancePlugin):
|
|||
"frameEnd": instance.context.data["frameEnd"],
|
||||
"handleStart": instance.context.data.get("handleStart"),
|
||||
"handleEnd": instance.context.data.get("handleEnd"),
|
||||
"frameStartHandle": instance.context.data.get("frameStartHandle"),
|
||||
"frameEndHandle": instance.context.data.get("frameEndHandle"),
|
||||
"resolutionWidth": instance.context.data.get("resolutionWidth"),
|
||||
"resolutionHeight": instance.context.data.get("resolutionHeight"),
|
||||
|
|
|
|||
|
|
@ -6,7 +6,7 @@ Ever tried to make a simple script for toonboom Harmony, then got stumped by the
|
|||
|
||||
Toonboom Harmony is a very powerful software, with hundreds of functions and tools, and it unlocks a great amount of possibilities for animation studios around the globe. And... being the produce of the hard work of a small team forced to prioritise, it can also be a bit rustic at times!
|
||||
|
||||
We are users at heart, animators and riggers, who just want to interact with the software as simply as possible. Simplicity is at the heart of the design of openHarmony. But we also are developpers, and we made the library for people like us who can't resist tweaking the software and bend it in all possible ways, and are looking for powerful functions to help them do it.
|
||||
We are users at heart, animators and riggers, who just want to interact with the software as simply as possible. Simplicity is at the heart of the design of openHarmony. But we also are developers, and we made the library for people like us who can't resist tweaking the software and bend it in all possible ways, and are looking for powerful functions to help them do it.
|
||||
|
||||
This library's aim is to create a more direct way to interact with Toonboom through scripts, by providing a more intuitive way to access its elements, and help with the cumbersome and repetitive tasks as well as help unlock untapped potential in its many available systems. So we can go from having to do things like this:
|
||||
|
||||
|
|
@ -78,7 +78,7 @@ All you have to do is call :
|
|||
```javascript
|
||||
include("openHarmony.js");
|
||||
```
|
||||
at the beggining of your script.
|
||||
at the beginning of your script.
|
||||
|
||||
You can ask your users to download their copy of the library and store it alongside, or bundle it as you wish as long as you include the license file provided on this repository.
|
||||
|
||||
|
|
@ -129,7 +129,7 @@ Check that the environment variable `LIB_OPENHARMONY_PATH` is set correctly to t
|
|||
## How to add openHarmony to vscode intellisense for autocompletion
|
||||
|
||||
Although not fully supported, you can get most of the autocompletion features to work by adding the following lines to a `jsconfig.json` file placed at the root of your working folder.
|
||||
The paths need to be relative which means the openHarmony source code must be placed directly in your developping environnement.
|
||||
The paths need to be relative which means the openHarmony source code must be placed directly in your developping environment.
|
||||
|
||||
For example, if your working folder contains the openHarmony source in a folder called `OpenHarmony` and your working scripts in a folder called `myScripts`, place the `jsconfig.json` file at the root of the folder and add these lines to the file:
|
||||
|
||||
|
|
|
|||
|
|
@ -4,7 +4,7 @@
|
|||
// openHarmony Library
|
||||
//
|
||||
//
|
||||
// Developped by Mathieu Chaptel, Chris Fourney
|
||||
// Developed by Mathieu Chaptel, Chris Fourney
|
||||
//
|
||||
//
|
||||
// This library is an open source implementation of a Document Object Model
|
||||
|
|
@ -16,7 +16,7 @@
|
|||
// and by hiding the heavy lifting required by the official API.
|
||||
//
|
||||
// This library is provided as is and is a work in progress. As such, not every
|
||||
// function has been implemented or is garanteed to work. Feel free to contribute
|
||||
// function has been implemented or is guaranteed to work. Feel free to contribute
|
||||
// improvements to its official github. If you do make sure you follow the provided
|
||||
// template and naming conventions and document your new methods properly.
|
||||
//
|
||||
|
|
@ -78,7 +78,7 @@
|
|||
* $.log("hello"); // prints out a message to the MessageLog.
|
||||
* var myPoint = new $.oPoint(0,0,0); // create a new class instance from an openHarmony class.
|
||||
*
|
||||
* // function members of the $ objects get published to the global scope, which means $ can be ommited
|
||||
* // function members of the $ objects get published to the global scope, which means $ can be omitted
|
||||
*
|
||||
* log("hello");
|
||||
* var myPoint = new oPoint(0,0,0); // This is all valid
|
||||
|
|
@ -118,7 +118,7 @@ Object.defineProperty( $, "directory", {
|
|||
|
||||
|
||||
/**
|
||||
* Wether Harmony is run with the interface or simply from command line
|
||||
* Whether Harmony is run with the interface or simply from command line
|
||||
*/
|
||||
Object.defineProperty( $, "batchMode", {
|
||||
get: function(){
|
||||
|
|
|
|||
|
|
@ -4,7 +4,7 @@
|
|||
// openHarmony Library
|
||||
//
|
||||
//
|
||||
// Developped by Mathieu Chaptel, Chris Fourney
|
||||
// Developed by Mathieu Chaptel, Chris Fourney
|
||||
//
|
||||
//
|
||||
// This library is an open source implementation of a Document Object Model
|
||||
|
|
@ -16,7 +16,7 @@
|
|||
// and by hiding the heavy lifting required by the official API.
|
||||
//
|
||||
// This library is provided as is and is a work in progress. As such, not every
|
||||
// function has been implemented or is garanteed to work. Feel free to contribute
|
||||
// function has been implemented or is guaranteed to work. Feel free to contribute
|
||||
// improvements to its official github. If you do make sure you follow the provided
|
||||
// template and naming conventions and document your new methods properly.
|
||||
//
|
||||
|
|
@ -67,7 +67,7 @@
|
|||
* @hideconstructor
|
||||
* @namespace
|
||||
* @example
|
||||
* // To check wether an action is available, call the synthax:
|
||||
* // To check whether an action is available, call the synthax:
|
||||
* Action.validate (<actionName>, <responder>);
|
||||
*
|
||||
* // To launch an action, call the synthax:
|
||||
|
|
|
|||
|
|
@ -4,7 +4,7 @@
|
|||
// openHarmony Library
|
||||
//
|
||||
//
|
||||
// Developped by Mathieu Chaptel, Chris Fourney
|
||||
// Developed by Mathieu Chaptel, Chris Fourney
|
||||
//
|
||||
//
|
||||
// This library is an open source implementation of a Document Object Model
|
||||
|
|
@ -16,7 +16,7 @@
|
|||
// and by hiding the heavy lifting required by the official API.
|
||||
//
|
||||
// This library is provided as is and is a work in progress. As such, not every
|
||||
// function has been implemented or is garanteed to work. Feel free to contribute
|
||||
// function has been implemented or is guaranteed to work. Feel free to contribute
|
||||
// improvements to its official github. If you do make sure you follow the provided
|
||||
// template and naming conventions and document your new methods properly.
|
||||
//
|
||||
|
|
@ -409,7 +409,7 @@ $.oApp.prototype.getToolByName = function(toolName){
|
|||
|
||||
|
||||
/**
|
||||
* returns the list of stencils useable by the specified tool
|
||||
* returns the list of stencils usable by the specified tool
|
||||
* @param {$.oTool} tool the tool object we want valid stencils for
|
||||
* @return {$.oStencil[]} the list of stencils compatible with the specified tool
|
||||
*/
|
||||
|
|
|
|||
|
|
@ -4,7 +4,7 @@
|
|||
// openHarmony Library v0.01
|
||||
//
|
||||
//
|
||||
// Developped by Mathieu Chaptel, Chris Fourney...
|
||||
// Developed by Mathieu Chaptel, Chris Fourney...
|
||||
//
|
||||
//
|
||||
// This library is an open source implementation of a Document Object Model
|
||||
|
|
@ -16,7 +16,7 @@
|
|||
// and by hiding the heavy lifting required by the official API.
|
||||
//
|
||||
// This library is provided as is and is a work in progress. As such, not every
|
||||
// function has been implemented or is garanteed to work. Feel free to contribute
|
||||
// function has been implemented or is guaranteed to work. Feel free to contribute
|
||||
// improvements to its official github. If you do make sure you follow the provided
|
||||
// template and naming conventions and document your new methods properly.
|
||||
//
|
||||
|
|
@ -338,7 +338,7 @@ Object.defineProperty($.oAttribute.prototype, "useSeparate", {
|
|||
* Returns the default value of the attribute for most keywords
|
||||
* @name $.oAttribute#defaultValue
|
||||
* @type {bool}
|
||||
* @todo switch the implentation to types?
|
||||
* @todo switch the implementation to types?
|
||||
* @example
|
||||
* // to reset an attribute to its default value:
|
||||
* // (mostly used for position/angle/skew parameters of pegs and drawing nodes)
|
||||
|
|
@ -449,7 +449,7 @@ $.oAttribute.prototype.getLinkedColumns = function(){
|
|||
|
||||
/**
|
||||
* Recursively sets an attribute to the same value as another. Both must have the same keyword.
|
||||
* @param {bool} [duplicateColumns=false] In the case that the attribute has a column, wether to duplicate the column before linking
|
||||
* @param {bool} [duplicateColumns=false] In the case that the attribute has a column, whether to duplicate the column before linking
|
||||
* @private
|
||||
*/
|
||||
$.oAttribute.prototype.setToAttributeValue = function(attributeToCopy, duplicateColumns){
|
||||
|
|
|
|||
|
|
@ -4,7 +4,7 @@
|
|||
// openHarmony Library
|
||||
//
|
||||
//
|
||||
// Developped by Mathieu Chaptel, Chris Fourney
|
||||
// Developed by Mathieu Chaptel, Chris Fourney
|
||||
//
|
||||
//
|
||||
// This library is an open source implementation of a Document Object Model
|
||||
|
|
@ -16,7 +16,7 @@
|
|||
// and by hiding the heavy lifting required by the official API.
|
||||
//
|
||||
// This library is provided as is and is a work in progress. As such, not every
|
||||
// function has been implemented or is garanteed to work. Feel free to contribute
|
||||
// function has been implemented or is guaranteed to work. Feel free to contribute
|
||||
// improvements to its official github. If you do make sure you follow the provided
|
||||
// template and naming conventions and document your new methods properly.
|
||||
//
|
||||
|
|
|
|||
|
|
@ -4,7 +4,7 @@
|
|||
// openHarmony Library
|
||||
//
|
||||
//
|
||||
// Developped by Mathieu Chaptel, Chris Fourney
|
||||
// Developed by Mathieu Chaptel, Chris Fourney
|
||||
//
|
||||
//
|
||||
// This library is an open source implementation of a Document Object Model
|
||||
|
|
@ -16,7 +16,7 @@
|
|||
// and by hiding the heavy lifting required by the official API.
|
||||
//
|
||||
// This library is provided as is and is a work in progress. As such, not every
|
||||
// function has been implemented or is garanteed to work. Feel free to contribute
|
||||
// function has been implemented or is guaranteed to work. Feel free to contribute
|
||||
// improvements to its official github. If you do make sure you follow the provided
|
||||
// template and naming conventions and document your new methods properly.
|
||||
//
|
||||
|
|
@ -158,7 +158,7 @@ $.oColorValue.prototype.fromColorString = function (hexString){
|
|||
|
||||
|
||||
/**
|
||||
* Uses a color integer (used in backdrops) and parses the INT; applies the RGBA components of the INT to thos oColorValue
|
||||
* Uses a color integer (used in backdrops) and parses the INT; applies the RGBA components of the INT to the oColorValue
|
||||
* @param { int } colorInt 24 bit-shifted integer containing RGBA values
|
||||
*/
|
||||
$.oColorValue.prototype.parseColorFromInt = function(colorInt){
|
||||
|
|
|
|||
|
|
@ -4,7 +4,7 @@
|
|||
// openHarmony Library
|
||||
//
|
||||
//
|
||||
// Developped by Mathieu Chaptel, Chris Fourney
|
||||
// Developed by Mathieu Chaptel, Chris Fourney
|
||||
//
|
||||
//
|
||||
// This library is an open source implementation of a Document Object Model
|
||||
|
|
@ -16,7 +16,7 @@
|
|||
// and by hiding the heavy lifting required by the official API.
|
||||
//
|
||||
// This library is provided as is and is a work in progress. As such, not every
|
||||
// function has been implemented or is garanteed to work. Feel free to contribute
|
||||
// function has been implemented or is guaranteed to work. Feel free to contribute
|
||||
// improvements to its official github. If you do make sure you follow the provided
|
||||
// template and naming conventions and document your new methods properly.
|
||||
//
|
||||
|
|
|
|||
|
|
@ -4,7 +4,7 @@
|
|||
// openHarmony Library
|
||||
//
|
||||
//
|
||||
// Developped by Mathieu Chaptel, Chris Fourney
|
||||
// Developed by Mathieu Chaptel, Chris Fourney
|
||||
//
|
||||
//
|
||||
// This library is an open source implementation of a Document Object Model
|
||||
|
|
@ -16,7 +16,7 @@
|
|||
// and by hiding the heavy lifting required by the official API.
|
||||
//
|
||||
// This library is provided as is and is a work in progress. As such, not every
|
||||
// function has been implemented or is garanteed to work. Feel free to contribute
|
||||
// function has been implemented or is guaranteed to work. Feel free to contribute
|
||||
// improvements to its official github. If you do make sure you follow the provided
|
||||
// template and naming conventions and document your new methods properly.
|
||||
//
|
||||
|
|
|
|||
|
|
@ -5,7 +5,7 @@
|
|||
// openHarmony Library
|
||||
//
|
||||
//
|
||||
// Developped by Mathieu Chaptel, Chris Fourney
|
||||
// Developed by Mathieu Chaptel, Chris Fourney
|
||||
//
|
||||
//
|
||||
// This library is an open source implementation of a Document Object Model
|
||||
|
|
@ -17,7 +17,7 @@
|
|||
// and by hiding the heavy lifting required by the official API.
|
||||
//
|
||||
// This library is provided as is and is a work in progress. As such, not every
|
||||
// function has been implemented or is garanteed to work. Feel free to contribute
|
||||
// function has been implemented or is guaranteed to work. Feel free to contribute
|
||||
// improvements to its official github. If you do make sure you follow the provided
|
||||
// template and naming conventions and document your new methods properly.
|
||||
//
|
||||
|
|
@ -250,7 +250,7 @@ $.oDialog.prototype.prompt = function( labelText, title, prefilledText){
|
|||
/**
|
||||
* Prompts with a file selector window
|
||||
* @param {string} [text="Select a file:"] The title of the confirmation dialog.
|
||||
* @param {string} [filter="*"] The filter for the file type and/or file name that can be selected. Accepts wildcard charater "*".
|
||||
* @param {string} [filter="*"] The filter for the file type and/or file name that can be selected. Accepts wildcard character "*".
|
||||
* @param {string} [getExisting=true] Whether to select an existing file or a save location
|
||||
* @param {string} [acceptMultiple=false] Whether or not selecting more than one file is ok. Is ignored if getExisting is falses.
|
||||
* @param {string} [startDirectory] The directory showed at the opening of the dialog.
|
||||
|
|
@ -327,14 +327,14 @@ $.oDialog.prototype.browseForFolder = function(text, startDirectory){
|
|||
* @constructor
|
||||
* @classdesc An simple progress dialog to display the progress of a task.
|
||||
* To react to the user clicking the cancel button, connect a function to $.oProgressDialog.canceled() signal.
|
||||
* When $.batchmode is true, the progress will be outputed as a "Progress : value/range" string to the Harmony stdout.
|
||||
* When $.batchmode is true, the progress will be outputted as a "Progress : value/range" string to the Harmony stdout.
|
||||
* @param {string} [labelText] The text displayed above the progress bar.
|
||||
* @param {string} [range=100] The maximum value that represents a full progress bar.
|
||||
* @param {string} [title] The title of the dialog
|
||||
* @param {bool} [show=false] Whether to immediately show the dialog.
|
||||
*
|
||||
* @property {bool} wasCanceled Whether the progress bar was cancelled.
|
||||
* @property {$.oSignal} canceled A Signal emited when the dialog is canceled. Can be connected to a callback.
|
||||
* @property {$.oSignal} canceled A Signal emitted when the dialog is canceled. Can be connected to a callback.
|
||||
*/
|
||||
$.oProgressDialog = function( labelText, range, title, show ){
|
||||
if (typeof title === 'undefined') var title = "Progress";
|
||||
|
|
@ -608,7 +608,7 @@ $.oPieMenu = function( name, widgets, show, minAngle, maxAngle, radius, position
|
|||
this.maxAngle = maxAngle;
|
||||
this.globalCenter = position;
|
||||
|
||||
// how wide outisde the icons is the slice drawn
|
||||
// how wide outside the icons is the slice drawn
|
||||
this._circleMargin = 30;
|
||||
|
||||
// set these values before calling show() to customize the menu appearance
|
||||
|
|
@ -974,7 +974,7 @@ $.oPieMenu.prototype.getMenuRadius = function(){
|
|||
var _minRadius = UiLoader.dpiScale(30);
|
||||
var _speed = 10; // the higher the value, the slower the progression
|
||||
|
||||
// hyperbolic tangent function to determin the radius
|
||||
// hyperbolic tangent function to determine the radius
|
||||
var exp = Math.exp(2*itemsNumber/_speed);
|
||||
var _radius = ((exp-1)/(exp+1))*_maxRadius+_minRadius;
|
||||
|
||||
|
|
@ -1383,7 +1383,7 @@ $.oActionButton.prototype.activate = function(){
|
|||
* This class is a subclass of QPushButton and all the methods from that class are available to modify this button.
|
||||
* @param {string} paletteName The name of the palette that contains the color
|
||||
* @param {string} colorName The name of the color (if more than one is present, will pick the first match)
|
||||
* @param {bool} showName Wether to display the name of the color on the button
|
||||
* @param {bool} showName Whether to display the name of the color on the button
|
||||
* @param {QWidget} parent The parent QWidget for the button. Automatically set during initialisation of the menu.
|
||||
*
|
||||
*/
|
||||
|
|
@ -1437,7 +1437,7 @@ $.oColorButton.prototype.activate = function(){
|
|||
* @name $.oScriptButton
|
||||
* @constructor
|
||||
* @classdescription This subclass of QPushButton provides an easy way to create a button for a widget that will launch a function from another script file.<br>
|
||||
* The buttons created this way automatically load the icon named after the script if it finds one named like the funtion in a script-icons folder next to the script file.<br>
|
||||
* The buttons created this way automatically load the icon named after the script if it finds one named like the function in a script-icons folder next to the script file.<br>
|
||||
* It will also automatically set the callback to lanch the function from the script.<br>
|
||||
* This class is a subclass of QPushButton and all the methods from that class are available to modify this button.
|
||||
* @param {string} scriptFile The path to the script file that will be launched
|
||||
|
|
|
|||
|
|
@ -4,7 +4,7 @@
|
|||
// openHarmony Library
|
||||
//
|
||||
//
|
||||
// Developped by Mathieu Chaptel, Chris Fourney
|
||||
// Developed by Mathieu Chaptel, Chris Fourney
|
||||
//
|
||||
//
|
||||
// This library is an open source implementation of a Document Object Model
|
||||
|
|
@ -16,7 +16,7 @@
|
|||
// and by hiding the heavy lifting required by the official API.
|
||||
//
|
||||
// This library is provided as is and is a work in progress. As such, not every
|
||||
// function has been implemented or is garanteed to work. Feel free to contribute
|
||||
// function has been implemented or is guaranteed to work. Feel free to contribute
|
||||
// improvements to its official github. If you do make sure you follow the provided
|
||||
// template and naming conventions and document your new methods properly.
|
||||
//
|
||||
|
|
@ -426,7 +426,7 @@ Object.defineProperty($.oDrawing.prototype, 'drawingData', {
|
|||
/**
|
||||
* Import a given file into an existing drawing.
|
||||
* @param {$.oFile} file The path to the file
|
||||
* @param {bool} [convertToTvg=false] Wether to convert the bitmap to the tvg format (this doesn't vectorise the drawing)
|
||||
* @param {bool} [convertToTvg=false] Whether to convert the bitmap to the tvg format (this doesn't vectorise the drawing)
|
||||
*
|
||||
* @return { $.oFile } the oFile object pointing to the drawing file after being it has been imported into the element folder.
|
||||
*/
|
||||
|
|
@ -878,8 +878,8 @@ $.oArtLayer.prototype.drawCircle = function(center, radius, lineStyle, fillStyle
|
|||
* @param {$.oVertex[]} path an array of $.oVertex objects that describe a path.
|
||||
* @param {$.oLineStyle} [lineStyle] the line style to draw with. (By default, will use the current stencil selection)
|
||||
* @param {$.oFillStyle} [fillStyle] the fill information for the path. (By default, will use the current palette selection)
|
||||
* @param {bool} [polygon] Wether bezier handles should be created for the points in the path (ignores "onCurve" properties of oVertex from path)
|
||||
* @param {bool} [createUnderneath] Wether the new shape will appear on top or underneath the contents of the layer. (not working yet)
|
||||
* @param {bool} [polygon] Whether bezier handles should be created for the points in the path (ignores "onCurve" properties of oVertex from path)
|
||||
* @param {bool} [createUnderneath] Whether the new shape will appear on top or underneath the contents of the layer. (not working yet)
|
||||
*/
|
||||
$.oArtLayer.prototype.drawShape = function(path, lineStyle, fillStyle, polygon, createUnderneath){
|
||||
if (typeof fillStyle === 'undefined') var fillStyle = new this.$.oFillStyle();
|
||||
|
|
@ -959,7 +959,7 @@ $.oArtLayer.prototype.drawContour = function(path, fillStyle){
|
|||
* @param {float} width the width of the rectangle.
|
||||
* @param {float} height the height of the rectangle.
|
||||
* @param {$.oLineStyle} lineStyle a line style to use for the rectangle stroke.
|
||||
* @param {$.oFillStyle} fillStyle a fill style to use for the rectange fill.
|
||||
* @param {$.oFillStyle} fillStyle a fill style to use for the rectangle fill.
|
||||
* @returns {$.oShape} the shape containing the added stroke.
|
||||
*/
|
||||
$.oArtLayer.prototype.drawRectangle = function(x, y, width, height, lineStyle, fillStyle){
|
||||
|
|
@ -1514,7 +1514,7 @@ Object.defineProperty($.oStroke.prototype, "path", {
|
|||
|
||||
|
||||
/**
|
||||
* The oVertex that are on the stroke (Bezier handles exluded.)
|
||||
* The oVertex that are on the stroke (Bezier handles excluded.)
|
||||
* The first is repeated at the last position when the stroke is closed.
|
||||
* @name $.oStroke#points
|
||||
* @type {$.oVertex[]}
|
||||
|
|
@ -1583,7 +1583,7 @@ Object.defineProperty($.oStroke.prototype, "style", {
|
|||
|
||||
|
||||
/**
|
||||
* wether the stroke is a closed shape.
|
||||
* whether the stroke is a closed shape.
|
||||
* @name $.oStroke#closed
|
||||
* @type {bool}
|
||||
*/
|
||||
|
|
@ -1919,7 +1919,7 @@ $.oContour.prototype.toString = function(){
|
|||
* @constructor
|
||||
* @classdesc
|
||||
* The $.oVertex class represents a single control point on a stroke. This class is used to get the index of the point in the stroke path sequence, as well as its position as a float along the stroke's length.
|
||||
* The onCurve property describes wether this control point is a bezier handle or a point on the curve.
|
||||
* The onCurve property describes whether this control point is a bezier handle or a point on the curve.
|
||||
*
|
||||
* @param {$.oStroke} stroke the stroke that this vertex belongs to
|
||||
* @param {float} x the x coordinate of the vertex, in drawing space
|
||||
|
|
|
|||
|
|
@ -4,7 +4,7 @@
|
|||
// openHarmony Library v0.01
|
||||
//
|
||||
//
|
||||
// Developped by Mathieu Chaptel, Chris Fourney...
|
||||
// Developed by Mathieu Chaptel, Chris Fourney...
|
||||
//
|
||||
//
|
||||
// This library is an open source implementation of a Document Object Model
|
||||
|
|
@ -16,7 +16,7 @@
|
|||
// and by hiding the heavy lifting required by the official API.
|
||||
//
|
||||
// This library is provided as is and is a work in progress. As such, not every
|
||||
// function has been implemented or is garanteed to work. Feel free to contribute
|
||||
// function has been implemented or is guaranteed to work. Feel free to contribute
|
||||
// improvements to its official github. If you do make sure you follow the provided
|
||||
// template and naming conventions and document your new methods properly.
|
||||
//
|
||||
|
|
|
|||
|
|
@ -4,7 +4,7 @@
|
|||
// openHarmony Library
|
||||
//
|
||||
//
|
||||
// Developped by Mathieu Chaptel, Chris Fourney
|
||||
// Developed by Mathieu Chaptel, Chris Fourney
|
||||
//
|
||||
//
|
||||
// This library is an open source implementation of a Document Object Model
|
||||
|
|
@ -16,7 +16,7 @@
|
|||
// and by hiding the heavy lifting required by the official API.
|
||||
//
|
||||
// This library is provided as is and is a work in progress. As such, not every
|
||||
// function has been implemented or is garanteed to work. Feel free to contribute
|
||||
// function has been implemented or is guaranteed to work. Feel free to contribute
|
||||
// improvements to its official github. If you do make sure you follow the provided
|
||||
// template and naming conventions and document your new methods properly.
|
||||
//
|
||||
|
|
@ -509,7 +509,7 @@ Object.defineProperty($.oFile.prototype, 'fullName', {
|
|||
|
||||
|
||||
/**
|
||||
* The name of the file without extenstion.
|
||||
* The name of the file without extension.
|
||||
* @name $.oFile#name
|
||||
* @type {string}
|
||||
*/
|
||||
|
|
|
|||
|
|
@ -4,7 +4,7 @@
|
|||
// openHarmony Library
|
||||
//
|
||||
//
|
||||
// Developped by Mathieu Chaptel, Chris Fourney
|
||||
// Developed by Mathieu Chaptel, Chris Fourney
|
||||
//
|
||||
//
|
||||
// This library is an open source implementation of a Document Object Model
|
||||
|
|
@ -16,7 +16,7 @@
|
|||
// and by hiding the heavy lifting required by the official API.
|
||||
//
|
||||
// This library is provided as is and is a work in progress. As such, not every
|
||||
// function has been implemented or is garanteed to work. Feel free to contribute
|
||||
// function has been implemented or is guaranteed to work. Feel free to contribute
|
||||
// improvements to its official github. If you do make sure you follow the provided
|
||||
// template and naming conventions and document your new methods properly.
|
||||
//
|
||||
|
|
@ -263,7 +263,7 @@ Object.defineProperty($.oFrame.prototype, 'duration', {
|
|||
return _sceneLength;
|
||||
}
|
||||
|
||||
// walk up the frames of the scene to the next keyFrame to determin duration
|
||||
// walk up the frames of the scene to the next keyFrame to determine duration
|
||||
var _frames = this.column.frames
|
||||
for (var i=this.frameNumber+1; i<_sceneLength; i++){
|
||||
if (_frames[i].isKeyframe) return _frames[i].frameNumber - _startFrame;
|
||||
|
|
@ -426,7 +426,7 @@ Object.defineProperty($.oFrame.prototype, 'velocity', {
|
|||
* easeIn : a $.oPoint object representing the left handle for bezier columns, or a {point, ease} object for ease columns.
|
||||
* easeOut : a $.oPoint object representing the left handle for bezier columns, or a {point, ease} object for ease columns.
|
||||
* continuity : the type of bezier used by the point.
|
||||
* constant : wether the frame is interpolated or a held value.
|
||||
* constant : whether the frame is interpolated or a held value.
|
||||
* @name $.oFrame#ease
|
||||
* @type {oPoint/object}
|
||||
*/
|
||||
|
|
@ -520,7 +520,7 @@ Object.defineProperty($.oFrame.prototype, 'easeOut', {
|
|||
|
||||
|
||||
/**
|
||||
* Determines the frame's continuity setting. Can take the values "CORNER", (two independant bezier handles on each side), "SMOOTH"(handles are aligned) or "STRAIGHT" (no handles and in straight lines).
|
||||
* Determines the frame's continuity setting. Can take the values "CORNER", (two independent bezier handles on each side), "SMOOTH"(handles are aligned) or "STRAIGHT" (no handles and in straight lines).
|
||||
* @name $.oFrame#continuity
|
||||
* @type {string}
|
||||
*/
|
||||
|
|
|
|||
|
|
@ -4,7 +4,7 @@
|
|||
// openHarmony Library v0.01
|
||||
//
|
||||
//
|
||||
// Developped by Mathieu Chaptel, Chris Fourney...
|
||||
// Developed by Mathieu Chaptel, Chris Fourney...
|
||||
//
|
||||
//
|
||||
// This library is an open source implementation of a Document Object Model
|
||||
|
|
@ -16,7 +16,7 @@
|
|||
// and by hiding the heavy lifting required by the official API.
|
||||
//
|
||||
// This library is provided as is and is a work in progress. As such, not every
|
||||
// function has been implemented or is garanteed to work. Feel free to contribute
|
||||
// function has been implemented or is guaranteed to work. Feel free to contribute
|
||||
// improvements to its official github. If you do make sure you follow the provided
|
||||
// template and naming conventions and document your new methods properly.
|
||||
//
|
||||
|
|
@ -516,5 +516,5 @@ Object.defineProperty($.oList.prototype, 'toString', {
|
|||
|
||||
|
||||
|
||||
//Needs all filtering, limiting. mapping, pop, concat, join, ect
|
||||
//Needs all filtering, limiting. mapping, pop, concat, join, etc
|
||||
//Speed up by finessing the way it extends and tracks the enumerable properties.
|
||||
|
|
@ -4,7 +4,7 @@
|
|||
// openHarmony Library
|
||||
//
|
||||
//
|
||||
// Developped by Mathieu Chaptel, Chris Fourney
|
||||
// Developed by Mathieu Chaptel, Chris Fourney
|
||||
//
|
||||
//
|
||||
// This library is an open source implementation of a Document Object Model
|
||||
|
|
@ -16,7 +16,7 @@
|
|||
// and by hiding the heavy lifting required by the official API.
|
||||
//
|
||||
// This library is provided as is and is a work in progress. As such, not every
|
||||
// function has been implemented or is garanteed to work. Feel free to contribute
|
||||
// function has been implemented or is guaranteed to work. Feel free to contribute
|
||||
// improvements to its official github. If you do make sure you follow the provided
|
||||
// template and naming conventions and document your new methods properly.
|
||||
//
|
||||
|
|
@ -193,7 +193,7 @@ $.oPoint.prototype.pointSubtract = function( sub_pt ){
|
|||
/**
|
||||
* Subtracts the point to the coordinates of the current oPoint and returns a new oPoint with the result.
|
||||
* @param {$.oPoint} point The point to subtract to this point.
|
||||
* @returns {$.oPoint} a new independant oPoint.
|
||||
* @returns {$.oPoint} a new independent oPoint.
|
||||
*/
|
||||
$.oPoint.prototype.subtractPoint = function( point ){
|
||||
var x = this.x - point.x;
|
||||
|
|
@ -298,9 +298,9 @@ $.oPoint.prototype.convertToWorldspace = function(){
|
|||
|
||||
|
||||
/**
|
||||
* Linearily Interpolate between this (0.0) and the provided point (1.0)
|
||||
* Linearly Interpolate between this (0.0) and the provided point (1.0)
|
||||
* @param {$.oPoint} point The target point at 100%
|
||||
* @param {double} perc 0-1.0 value to linearily interp
|
||||
* @param {double} perc 0-1.0 value to linearly interp
|
||||
*
|
||||
* @return: { $.oPoint } The interpolated value.
|
||||
*/
|
||||
|
|
@ -410,9 +410,9 @@ $.oBox.prototype.include = function(box){
|
|||
|
||||
|
||||
/**
|
||||
* Checks wether the box contains another $.oBox.
|
||||
* Checks whether the box contains another $.oBox.
|
||||
* @param {$.oBox} box The $.oBox to check for.
|
||||
* @param {bool} [partial=false] wether to accept partially contained boxes.
|
||||
* @param {bool} [partial=false] whether to accept partially contained boxes.
|
||||
*/
|
||||
$.oBox.prototype.contains = function(box, partial){
|
||||
if (typeof partial === 'undefined') var partial = false;
|
||||
|
|
@ -537,7 +537,7 @@ $.oMatrix.prototype.toString = function(){
|
|||
* @classdesc The $.oVector is a replacement for the Vector3d objects of Harmony.
|
||||
* @param {float} x a x coordinate for this vector.
|
||||
* @param {float} y a y coordinate for this vector.
|
||||
* @param {float} [z=0] a z coordinate for this vector. If ommited, will be set to 0 and vector will be 2D.
|
||||
* @param {float} [z=0] a z coordinate for this vector. If omitted, will be set to 0 and vector will be 2D.
|
||||
*/
|
||||
$.oVector = function(x, y, z){
|
||||
if (typeof z === "undefined" || isNaN(z)) var z = 0;
|
||||
|
|
|
|||
|
|
@ -4,7 +4,7 @@
|
|||
// openHarmony Library v0.01
|
||||
//
|
||||
//
|
||||
// Developped by Mathieu Chaptel, Chris Fourney...
|
||||
// Developed by Mathieu Chaptel, Chris Fourney...
|
||||
//
|
||||
//
|
||||
// This library is an open source implementation of a Document Object Model
|
||||
|
|
@ -16,7 +16,7 @@
|
|||
// and by hiding the heavy lifting required by the official API.
|
||||
//
|
||||
// This library is provided as is and is a work in progress. As such, not every
|
||||
// function has been implemented or is garanteed to work. Feel free to contribute
|
||||
// function has been implemented or is guaranteed to work. Feel free to contribute
|
||||
// improvements to its official github. If you do make sure you follow the provided
|
||||
// template and naming conventions and document your new methods properly.
|
||||
//
|
||||
|
|
|
|||
|
|
@ -4,7 +4,7 @@
|
|||
// openHarmony Library v0.01
|
||||
//
|
||||
//
|
||||
// Developped by Mathieu Chaptel, Chris Fourney...
|
||||
// Developed by Mathieu Chaptel, Chris Fourney...
|
||||
//
|
||||
//
|
||||
// This library is an open source implementation of a Document Object Model
|
||||
|
|
@ -16,7 +16,7 @@
|
|||
// and by hiding the heavy lifting required by the official API.
|
||||
//
|
||||
// This library is provided as is and is a work in progress. As such, not every
|
||||
// function has been implemented or is garanteed to work. Feel free to contribute
|
||||
// function has been implemented or is guaranteed to work. Feel free to contribute
|
||||
// improvements to its official github. If you do make sure you follow the provided
|
||||
// template and naming conventions and document your new methods properly.
|
||||
//
|
||||
|
|
@ -54,7 +54,7 @@
|
|||
|
||||
|
||||
/**
|
||||
* The $.oUtils helper class -- providing generic utilities. Doesn't need instanciation.
|
||||
* The $.oUtils helper class -- providing generic utilities. Doesn't need instantiation.
|
||||
* @classdesc $.oUtils utility Class
|
||||
*/
|
||||
$.oUtils = function(){
|
||||
|
|
|
|||
|
|
@ -4,7 +4,7 @@
|
|||
// openHarmony Library v0.01
|
||||
//
|
||||
//
|
||||
// Developped by Mathieu Chaptel, Chris Fourney...
|
||||
// Developed by Mathieu Chaptel, Chris Fourney...
|
||||
//
|
||||
//
|
||||
// This library is an open source implementation of a Document Object Model
|
||||
|
|
@ -16,7 +16,7 @@
|
|||
// and by hiding the heavy lifting required by the official API.
|
||||
//
|
||||
// This library is provided as is and is a work in progress. As such, not every
|
||||
// function has been implemented or is garanteed to work. Feel free to contribute
|
||||
// function has been implemented or is guaranteed to work. Feel free to contribute
|
||||
// improvements to its official github. If you do make sure you follow the provided
|
||||
// template and naming conventions and document your new methods properly.
|
||||
//
|
||||
|
|
@ -87,7 +87,7 @@ $.oNetwork = function( ){
|
|||
* @param {function} callback_func Providing a callback function prevents blocking, and will respond on this function. The callback function is in form func( results ){}
|
||||
* @param {bool} use_json In the event of a JSON api, this will return an object converted from the returned JSON.
|
||||
*
|
||||
* @return: {string/object} The resulting object/string from the query -- otherwise a bool as false when an error occured..
|
||||
* @return: {string/object} The resulting object/string from the query -- otherwise a bool as false when an error occurred..
|
||||
*/
|
||||
$.oNetwork.prototype.webQuery = function ( address, callback_func, use_json ){
|
||||
if (typeof callback_func === 'undefined') var callback_func = false;
|
||||
|
|
@ -272,7 +272,7 @@ $.oNetwork.prototype.webQuery = function ( address, callback_func, use_json ){
|
|||
* @param {function} path The local file path to save the download.
|
||||
* @param {bool} replace Replace the file if it exists.
|
||||
*
|
||||
* @return: {string/object} The resulting object/string from the query -- otherwise a bool as false when an error occured..
|
||||
* @return: {string/object} The resulting object/string from the query -- otherwise a bool as false when an error occurred..
|
||||
*/
|
||||
$.oNetwork.prototype.downloadSingle = function ( address, path, replace ){
|
||||
if (typeof replace === 'undefined') var replace = false;
|
||||
|
|
|
|||
|
|
@ -4,7 +4,7 @@
|
|||
// openHarmony Library
|
||||
//
|
||||
//
|
||||
// Developped by Mathieu Chaptel, Chris Fourney
|
||||
// Developed by Mathieu Chaptel, Chris Fourney
|
||||
//
|
||||
//
|
||||
// This library is an open source implementation of a Document Object Model
|
||||
|
|
@ -16,7 +16,7 @@
|
|||
// and by hiding the heavy lifting required by the official API.
|
||||
//
|
||||
// This library is provided as is and is a work in progress. As such, not every
|
||||
// function has been implemented or is garanteed to work. Feel free to contribute
|
||||
// function has been implemented or is guaranteed to work. Feel free to contribute
|
||||
// improvements to its official github. If you do make sure you follow the provided
|
||||
// template and naming conventions and document your new methods properly.
|
||||
//
|
||||
|
|
@ -562,7 +562,7 @@ Object.defineProperty($.oNode.prototype, 'height', {
|
|||
|
||||
|
||||
/**
|
||||
* The list of oNodeLinks objects descibing the connections to the inport of this node, in order of inport.
|
||||
* The list of oNodeLinks objects describing the connections to the inport of this node, in order of inport.
|
||||
* @name $.oNode#inLinks
|
||||
* @readonly
|
||||
* @deprecated returns $.oNodeLink instances but $.oLink is preferred. Use oNode.getInLinks() instead.
|
||||
|
|
@ -658,7 +658,7 @@ Object.defineProperty($.oNode.prototype, 'outPorts', {
|
|||
|
||||
|
||||
/**
|
||||
* The list of oNodeLinks objects descibing the connections to the outports of this node, in order of outport.
|
||||
* The list of oNodeLinks objects describing the connections to the outports of this node, in order of outport.
|
||||
* @name $.oNode#outLinks
|
||||
* @readonly
|
||||
* @type {$.oNodeLink[]}
|
||||
|
|
@ -1666,7 +1666,7 @@ $.oNode.prototype.refreshAttributes = function( ){
|
|||
* It represents peg nodes in the scene.
|
||||
* @constructor
|
||||
* @augments $.oNode
|
||||
* @classdesc Peg Moudle Class
|
||||
* @classdesc Peg Module Class
|
||||
* @param {string} path Path to the node in the network.
|
||||
* @param {oScene} oSceneObject Access to the oScene object of the DOM.
|
||||
*/
|
||||
|
|
@ -1886,7 +1886,7 @@ $.oDrawingNode.prototype.getDrawingAtFrame = function(frameNumber){
|
|||
|
||||
|
||||
/**
|
||||
* Gets the list of palettes containing colors used by a drawing node. This only gets palettes with the first occurence of the colors.
|
||||
* Gets the list of palettes containing colors used by a drawing node. This only gets palettes with the first occurrence of the colors.
|
||||
* @return {$.oPalette[]} The palettes that contain the color IDs used by the drawings of the node.
|
||||
*/
|
||||
$.oDrawingNode.prototype.getUsedPalettes = function(){
|
||||
|
|
@ -1968,7 +1968,7 @@ $.oDrawingNode.prototype.unlinkPalette = function(oPaletteObject){
|
|||
* Duplicates a node by creating an independent copy.
|
||||
* @param {string} [newName] The new name for the duplicated node.
|
||||
* @param {oPoint} [newPosition] The new position for the duplicated node.
|
||||
* @param {bool} [duplicateElement] Wether to also duplicate the element.
|
||||
* @param {bool} [duplicateElement] Whether to also duplicate the element.
|
||||
*/
|
||||
$.oDrawingNode.prototype.duplicate = function(newName, newPosition, duplicateElement){
|
||||
if (typeof newPosition === 'undefined') var newPosition = this.nodePosition;
|
||||
|
|
@ -2464,7 +2464,7 @@ $.oGroupNode.prototype.getNodeByName = function(name){
|
|||
* Returns all the nodes of a certain type in the group.
|
||||
* Pass a value to recurse to look into the groups as well.
|
||||
* @param {string} typeName The type of the nodes.
|
||||
* @param {bool} recurse Wether to look inside the groups.
|
||||
* @param {bool} recurse Whether to look inside the groups.
|
||||
*
|
||||
* @return {$.oNode[]} The nodes found.
|
||||
*/
|
||||
|
|
@ -2626,7 +2626,7 @@ $.oGroupNode.prototype.orderNodeView = function(recurse){
|
|||
*
|
||||
* peg.linkOutNode(drawingNode);
|
||||
*
|
||||
* //through all this we didn't specify nodePosition parameters so we'll sort evertything at once
|
||||
* //through all this we didn't specify nodePosition parameters so we'll sort everything at once
|
||||
*
|
||||
* sceneRoot.orderNodeView();
|
||||
*
|
||||
|
|
@ -3333,7 +3333,7 @@ $.oGroupNode.prototype.importImageAsTVG = function(path, alignment, nodePosition
|
|||
* imports an image sequence as a node into the current group.
|
||||
* @param {$.oFile[]} imagePaths a list of paths to the images to import (can pass a list of strings or $.oFile)
|
||||
* @param {number} [exposureLength=1] the number of frames each drawing should be exposed at. If set to 0/false, each drawing will use the numbering suffix of the file to set its frame.
|
||||
* @param {boolean} [convertToTvg=false] wether to convert the files to tvg during import
|
||||
* @param {boolean} [convertToTvg=false] whether to convert the files to tvg during import
|
||||
* @param {string} [alignment="ASIS"] the alignment to apply to the node
|
||||
* @param {$.oPoint} [nodePosition] the position of the node in the nodeview
|
||||
*
|
||||
|
|
@ -3346,7 +3346,7 @@ $.oGroupNode.prototype.importImageSequence = function(imagePaths, exposureLength
|
|||
|
||||
if (typeof extendScene === 'undefined') var extendScene = false;
|
||||
|
||||
// match anything but capture trailing numbers and separates punctuation preceeding it
|
||||
// match anything but capture trailing numbers and separates punctuation preceding it
|
||||
var numberingRe = /(.*?)([\W_]+)?(\d*)$/i;
|
||||
|
||||
// sanitize imagePaths
|
||||
|
|
|
|||
|
|
@ -4,7 +4,7 @@
|
|||
// openHarmony Library v0.01
|
||||
//
|
||||
//
|
||||
// Developped by Mathieu Chaptel, Chris Fourney...
|
||||
// Developed by Mathieu Chaptel, Chris Fourney...
|
||||
//
|
||||
//
|
||||
// This library is an open source implementation of a Document Object Model
|
||||
|
|
@ -16,7 +16,7 @@
|
|||
// and by hiding the heavy lifting required by the official API.
|
||||
//
|
||||
// This library is provided as is and is a work in progress. As such, not every
|
||||
// function has been implemented or is garanteed to work. Feel free to contribute
|
||||
// function has been implemented or is guaranteed to work. Feel free to contribute
|
||||
// improvements to its official github. If you do make sure you follow the provided
|
||||
// template and naming conventions and document your new methods properly.
|
||||
//
|
||||
|
|
@ -174,7 +174,7 @@ Object.defineProperty($.oNodeLink.prototype, 'outNode', {
|
|||
return;
|
||||
}
|
||||
|
||||
this.apply(); // do we really want to apply everytime we set?
|
||||
this.apply(); // do we really want to apply every time we set?
|
||||
}
|
||||
});
|
||||
|
||||
|
|
@ -198,7 +198,7 @@ Object.defineProperty($.oNodeLink.prototype, 'inNode', {
|
|||
return;
|
||||
}
|
||||
|
||||
this.apply(); // do we really want to apply everytime we set?
|
||||
this.apply(); // do we really want to apply every time we set?
|
||||
}
|
||||
});
|
||||
|
||||
|
|
@ -222,7 +222,7 @@ Object.defineProperty($.oNodeLink.prototype, 'outPort', {
|
|||
return;
|
||||
}
|
||||
|
||||
this.apply(); // do we really want to apply everytime we set?
|
||||
this.apply(); // do we really want to apply every time we set?
|
||||
}
|
||||
});
|
||||
|
||||
|
|
@ -256,7 +256,7 @@ Object.defineProperty($.oNodeLink.prototype, 'inPort', {
|
|||
return;
|
||||
}
|
||||
|
||||
this.apply(); // do we really want to apply everytime we set?
|
||||
this.apply(); // do we really want to apply every time we set?
|
||||
}
|
||||
});
|
||||
|
||||
|
|
@ -983,7 +983,7 @@ $.oNodeLink.prototype.validate = function ( ) {
|
|||
* @return {bool} Whether the connection is a valid connection that exists currently in the node system.
|
||||
*/
|
||||
$.oNodeLink.prototype.validateUpwards = function( inport, outportProvided ) {
|
||||
//IN THE EVENT OUTNODE WASNT PROVIDED.
|
||||
//IN THE EVENT OUTNODE WASN'T PROVIDED.
|
||||
this.path = this.findInputPath( this._inNode, inport, [] );
|
||||
if( !this.path || this.path.length == 0 ){
|
||||
return false;
|
||||
|
|
@ -1173,7 +1173,7 @@ Object.defineProperty($.oLink.prototype, 'outPort', {
|
|||
|
||||
|
||||
/**
|
||||
* The index of the link comming out of the out-port.
|
||||
* The index of the link coming out of the out-port.
|
||||
* <br>In the event this value wasn't known by the link object but the link is actually connected, the correct value will be found.
|
||||
* @name $.oLink#outLink
|
||||
* @readonly
|
||||
|
|
@ -1323,7 +1323,7 @@ $.oLink.prototype.getValidLink = function(createOutPorts, createInPorts){
|
|||
|
||||
|
||||
/**
|
||||
* Attemps to connect a link. Will guess the ports if not provided.
|
||||
* Attempts to connect a link. Will guess the ports if not provided.
|
||||
* @return {bool}
|
||||
*/
|
||||
$.oLink.prototype.connect = function(){
|
||||
|
|
@ -1623,11 +1623,11 @@ $.oLinkPath.prototype.findExistingPath = function(){
|
|||
|
||||
|
||||
/**
|
||||
* Gets a link object from two nodes that can be succesfully connected. Provide port numbers if there are specific requirements to match. If a link already exists, it will be returned.
|
||||
* Gets a link object from two nodes that can be successfully connected. Provide port numbers if there are specific requirements to match. If a link already exists, it will be returned.
|
||||
* @param {$.oNode} start The node from which the link originates.
|
||||
* @param {$.oNode} end The node at which the link ends.
|
||||
* @param {int} [outPort] A prefered out-port for the link to use.
|
||||
* @param {int} [inPort] A prefered in-port for the link to use.
|
||||
* @param {int} [outPort] A preferred out-port for the link to use.
|
||||
* @param {int} [inPort] A preferred in-port for the link to use.
|
||||
*
|
||||
* @return {$.oLink} the valid $.oLink object. Returns null if no such link could be created (for example if the node's in-port is already linked)
|
||||
*/
|
||||
|
|
|
|||
|
|
@ -4,7 +4,7 @@
|
|||
// openHarmony Library v0.01
|
||||
//
|
||||
//
|
||||
// Developped by Mathieu Chaptel, ...
|
||||
// Developed by Mathieu Chaptel, ...
|
||||
//
|
||||
//
|
||||
// This library is an open source implementation of a Document Object Model
|
||||
|
|
@ -16,7 +16,7 @@
|
|||
// and by hiding the heavy lifting required by the official API.
|
||||
//
|
||||
// This library is provided as is and is a work in progress. As such, not every
|
||||
// function has been implemented or is garanteed to work. Feel free to contribute
|
||||
// function has been implemented or is guaranteed to work. Feel free to contribute
|
||||
// improvements to its official github. If you do make sure you follow the provided
|
||||
// template and naming conventions and document your new methods properly.
|
||||
//
|
||||
|
|
@ -212,7 +212,7 @@ function openHarmony_toolInstaller(){
|
|||
|
||||
|
||||
//----------------------------------------------
|
||||
//-- GET THE FILE CONTENTS IN A DIRCTORY ON GIT
|
||||
//-- GET THE FILE CONTENTS IN A DIRECTORY ON GIT
|
||||
this.recurse_files = function( contents, arr_files ){
|
||||
with( context.$.global ){
|
||||
try{
|
||||
|
|
@ -501,7 +501,7 @@ function openHarmony_toolInstaller(){
|
|||
var download_item = item["download_url"];
|
||||
var query = $.network.webQuery( download_item, false, false );
|
||||
if( query ){
|
||||
//INSTALL TYPES ARE script, package, ect.
|
||||
//INSTALL TYPES ARE script, package, etc.
|
||||
|
||||
if( install_types[ m.install_cache[ item["url"] ] ] ){
|
||||
m.installLabel.text = install_types[ m.install_cache[ item["url"] ] ];
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
{
|
||||
"name": "openharmony",
|
||||
"version": "0.0.1",
|
||||
"description": "An Open Source Imlementation of a Document Object Model for the Toonboom Harmony scripting interface",
|
||||
"description": "An Open Source Implementation of a Document Object Model for the Toonboom Harmony scripting interface",
|
||||
"main": "openHarmony.js",
|
||||
"scripts": {
|
||||
"test": "$",
|
||||
|
|
|
|||
|
|
@ -108,7 +108,7 @@ __all__ = [
|
|||
"apply_colorspace_project",
|
||||
"apply_colorspace_clips",
|
||||
"get_sequence_pattern_and_padding",
|
||||
# depricated
|
||||
# deprecated
|
||||
"get_track_item_pype_tag",
|
||||
"set_track_item_pype_tag",
|
||||
"get_track_item_pype_data",
|
||||
|
|
|
|||
|
|
@ -1221,7 +1221,7 @@ def set_track_color(track_item, color):
|
|||
|
||||
def check_inventory_versions(track_items=None):
|
||||
"""
|
||||
Actual version color idetifier of Loaded containers
|
||||
Actual version color identifier of Loaded containers
|
||||
|
||||
Check all track items and filter only
|
||||
Loader nodes for its version. It will get all versions from database
|
||||
|
|
@ -1249,10 +1249,10 @@ def check_inventory_versions(track_items=None):
|
|||
project_name = legacy_io.active_project()
|
||||
filter_result = filter_containers(containers, project_name)
|
||||
for container in filter_result.latest:
|
||||
set_track_color(container["_item"], clip_color)
|
||||
set_track_color(container["_item"], clip_color_last)
|
||||
|
||||
for container in filter_result.outdated:
|
||||
set_track_color(container["_item"], clip_color_last)
|
||||
set_track_color(container["_item"], clip_color)
|
||||
|
||||
|
||||
def selection_changed_timeline(event):
|
||||
|
|
|
|||
|
|
@ -193,8 +193,8 @@ def parse_container(item, validate=True):
|
|||
return
|
||||
# convert the data to list and validate them
|
||||
for _, obj_data in _data.items():
|
||||
cotnainer = data_to_container(item, obj_data)
|
||||
return_list.append(cotnainer)
|
||||
container = data_to_container(item, obj_data)
|
||||
return_list.append(container)
|
||||
return return_list
|
||||
else:
|
||||
_data = lib.get_trackitem_openpype_data(item)
|
||||
|
|
|
|||
|
|
@ -411,7 +411,7 @@ class ClipLoader:
|
|||
self.with_handles = options.get("handles") or bool(
|
||||
options.get("handles") is True)
|
||||
# try to get value from options or evaluate key value for `load_how`
|
||||
self.sequencial_load = options.get("sequencially") or bool(
|
||||
self.sequencial_load = options.get("sequentially") or bool(
|
||||
"Sequentially in order" in options.get("load_how", ""))
|
||||
# try to get value from options or evaluate key value for `load_to`
|
||||
self.new_sequence = options.get("newSequence") or bool(
|
||||
|
|
@ -836,7 +836,7 @@ class PublishClip:
|
|||
# increasing steps by index of rename iteration
|
||||
self.count_steps *= self.rename_index
|
||||
|
||||
hierarchy_formating_data = {}
|
||||
hierarchy_formatting_data = {}
|
||||
hierarchy_data = deepcopy(self.hierarchy_data)
|
||||
_data = self.track_item_default_data.copy()
|
||||
if self.ui_inputs:
|
||||
|
|
@ -871,13 +871,13 @@ class PublishClip:
|
|||
|
||||
# fill up pythonic expresisons in hierarchy data
|
||||
for k, _v in hierarchy_data.items():
|
||||
hierarchy_formating_data[k] = _v["value"].format(**_data)
|
||||
hierarchy_formatting_data[k] = _v["value"].format(**_data)
|
||||
else:
|
||||
# if no gui mode then just pass default data
|
||||
hierarchy_formating_data = hierarchy_data
|
||||
hierarchy_formatting_data = hierarchy_data
|
||||
|
||||
tag_hierarchy_data = self._solve_tag_hierarchy_data(
|
||||
hierarchy_formating_data
|
||||
hierarchy_formatting_data
|
||||
)
|
||||
|
||||
tag_hierarchy_data.update({"heroTrack": True})
|
||||
|
|
@ -905,20 +905,20 @@ class PublishClip:
|
|||
# add data to return data dict
|
||||
self.tag_data.update(tag_hierarchy_data)
|
||||
|
||||
def _solve_tag_hierarchy_data(self, hierarchy_formating_data):
|
||||
def _solve_tag_hierarchy_data(self, hierarchy_formatting_data):
|
||||
""" Solve tag data from hierarchy data and templates. """
|
||||
# fill up clip name and hierarchy keys
|
||||
hierarchy_filled = self.hierarchy.format(**hierarchy_formating_data)
|
||||
clip_name_filled = self.clip_name.format(**hierarchy_formating_data)
|
||||
hierarchy_filled = self.hierarchy.format(**hierarchy_formatting_data)
|
||||
clip_name_filled = self.clip_name.format(**hierarchy_formatting_data)
|
||||
|
||||
# remove shot from hierarchy data: is not needed anymore
|
||||
hierarchy_formating_data.pop("shot")
|
||||
hierarchy_formatting_data.pop("shot")
|
||||
|
||||
return {
|
||||
"newClipName": clip_name_filled,
|
||||
"hierarchy": hierarchy_filled,
|
||||
"parents": self.parents,
|
||||
"hierarchyData": hierarchy_formating_data,
|
||||
"hierarchyData": hierarchy_formatting_data,
|
||||
"subset": self.subset,
|
||||
"family": self.subset_family,
|
||||
"families": [self.data["family"]]
|
||||
|
|
@ -934,16 +934,16 @@ class PublishClip:
|
|||
)
|
||||
|
||||
# first collect formatting data to use for formatting template
|
||||
formating_data = {}
|
||||
formatting_data = {}
|
||||
for _k, _v in self.hierarchy_data.items():
|
||||
value = _v["value"].format(
|
||||
**self.track_item_default_data)
|
||||
formating_data[_k] = value
|
||||
formatting_data[_k] = value
|
||||
|
||||
return {
|
||||
"entity_type": entity_type,
|
||||
"entity_name": template.format(
|
||||
**formating_data
|
||||
**formatting_data
|
||||
)
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -479,23 +479,13 @@ def reset_framerange():
|
|||
|
||||
frame_start = asset_data.get("frameStart")
|
||||
frame_end = asset_data.get("frameEnd")
|
||||
# Backwards compatibility
|
||||
if frame_start is None or frame_end is None:
|
||||
frame_start = asset_data.get("edit_in")
|
||||
frame_end = asset_data.get("edit_out")
|
||||
|
||||
if frame_start is None or frame_end is None:
|
||||
log.warning("No edit information found for %s" % asset_name)
|
||||
return
|
||||
|
||||
handles = asset_data.get("handles") or 0
|
||||
handle_start = asset_data.get("handleStart")
|
||||
if handle_start is None:
|
||||
handle_start = handles
|
||||
|
||||
handle_end = asset_data.get("handleEnd")
|
||||
if handle_end is None:
|
||||
handle_end = handles
|
||||
handle_start = asset_data.get("handleStart", 0)
|
||||
handle_end = asset_data.get("handleEnd", 0)
|
||||
|
||||
frame_start -= int(handle_start)
|
||||
frame_end += int(handle_end)
|
||||
|
|
|
|||
|
|
@ -144,13 +144,10 @@ class HoudiniHost(HostBase, IWorkfileHost, ILoadHost, IPublishHost):
|
|||
|
||||
"""
|
||||
obj_network = hou.node("/obj")
|
||||
op_ctx = obj_network.createNode("null", node_name="OpenPypeContext")
|
||||
|
||||
# A null in houdini by default comes with content inside to visualize
|
||||
# the null. However since we explicitly want to hide the node lets
|
||||
# remove the content and disable the display flag of the node
|
||||
for node in op_ctx.children():
|
||||
node.destroy()
|
||||
op_ctx = obj_network.createNode("subnet",
|
||||
node_name="OpenPypeContext",
|
||||
run_init_scripts=False,
|
||||
load_contents=False)
|
||||
|
||||
op_ctx.moveToGoodPosition()
|
||||
op_ctx.setBuiltExplicitly(False)
|
||||
|
|
|
|||
|
|
@ -60,7 +60,7 @@ class Creator(LegacyCreator):
|
|||
|
||||
def process(self):
|
||||
instance = super(CreateEpicNode, self, process()
|
||||
# Set paramaters for Alembic node
|
||||
# Set parameters for Alembic node
|
||||
instance.setParms(
|
||||
{"sop_path": "$HIP/%s.abc" % self.nodes[0]}
|
||||
)
|
||||
|
|
|
|||
|
|
@ -69,7 +69,7 @@ def generate_shelves():
|
|||
|
||||
mandatory_attributes = {'label', 'script'}
|
||||
for tool_definition in shelf_definition.get('tools_list'):
|
||||
# We verify that the name and script attibutes of the tool
|
||||
# We verify that the name and script attributes of the tool
|
||||
# are set
|
||||
if not all(
|
||||
tool_definition[key] for key in mandatory_attributes
|
||||
|
|
|
|||
|
|
@ -1,5 +1,5 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
"""Convertor for legacy Houdini subsets."""
|
||||
"""Converter for legacy Houdini subsets."""
|
||||
from openpype.pipeline.create.creator_plugins import SubsetConvertorPlugin
|
||||
from openpype.hosts.houdini.api.lib import imprint
|
||||
|
||||
|
|
@ -7,7 +7,7 @@ from openpype.hosts.houdini.api.lib import imprint
|
|||
class HoudiniLegacyConvertor(SubsetConvertorPlugin):
|
||||
"""Find and convert any legacy subsets in the scene.
|
||||
|
||||
This Convertor will find all legacy subsets in the scene and will
|
||||
This Converter will find all legacy subsets in the scene and will
|
||||
transform them to the current system. Since the old subsets doesn't
|
||||
retain any information about their original creators, the only mapping
|
||||
we can do is based on their families.
|
||||
|
|
|
|||
|
|
@ -1,7 +1,6 @@
|
|||
import os
|
||||
import hou
|
||||
|
||||
from openpype.pipeline import legacy_io
|
||||
import pyblish.api
|
||||
|
||||
|
||||
|
|
@ -11,7 +10,7 @@ class CollectHoudiniCurrentFile(pyblish.api.InstancePlugin):
|
|||
order = pyblish.api.CollectorOrder - 0.01
|
||||
label = "Houdini Current File"
|
||||
hosts = ["houdini"]
|
||||
family = ["workfile"]
|
||||
families = ["workfile"]
|
||||
|
||||
def process(self, instance):
|
||||
"""Inject the current working file"""
|
||||
|
|
@ -21,7 +20,7 @@ class CollectHoudiniCurrentFile(pyblish.api.InstancePlugin):
|
|||
# By default, Houdini will even point a new scene to a path.
|
||||
# However if the file is not saved at all and does not exist,
|
||||
# we assume the user never set it.
|
||||
filepath = ""
|
||||
current_file = ""
|
||||
|
||||
elif os.path.basename(current_file) == "untitled.hip":
|
||||
# Due to even a new file being called 'untitled.hip' we are unable
|
||||
|
|
|
|||
|
|
@ -6,6 +6,11 @@ from pymxs import runtime as rt
|
|||
from typing import Union
|
||||
import contextlib
|
||||
|
||||
from openpype.pipeline.context_tools import (
|
||||
get_current_project_asset,
|
||||
get_current_project
|
||||
)
|
||||
|
||||
|
||||
JSON_PREFIX = "JSON::"
|
||||
|
||||
|
|
@ -157,6 +162,105 @@ def get_multipass_setting(project_setting=None):
|
|||
["multipass"])
|
||||
|
||||
|
||||
def set_scene_resolution(width: int, height: int):
|
||||
"""Set the render resolution
|
||||
|
||||
Args:
|
||||
width(int): value of the width
|
||||
height(int): value of the height
|
||||
|
||||
Returns:
|
||||
None
|
||||
|
||||
"""
|
||||
rt.renderWidth = width
|
||||
rt.renderHeight = height
|
||||
|
||||
|
||||
def reset_scene_resolution():
|
||||
"""Apply the scene resolution from the project definition
|
||||
|
||||
scene resolution can be overwritten by an asset if the asset.data contains
|
||||
any information regarding scene resolution .
|
||||
Returns:
|
||||
None
|
||||
"""
|
||||
data = ["data.resolutionWidth", "data.resolutionHeight"]
|
||||
project_resolution = get_current_project(fields=data)
|
||||
project_resolution_data = project_resolution["data"]
|
||||
asset_resolution = get_current_project_asset(fields=data)
|
||||
asset_resolution_data = asset_resolution["data"]
|
||||
# Set project resolution
|
||||
project_width = int(project_resolution_data.get("resolutionWidth", 1920))
|
||||
project_height = int(project_resolution_data.get("resolutionHeight", 1080))
|
||||
width = int(asset_resolution_data.get("resolutionWidth", project_width))
|
||||
height = int(asset_resolution_data.get("resolutionHeight", project_height))
|
||||
|
||||
set_scene_resolution(width, height)
|
||||
|
||||
|
||||
def get_frame_range() -> dict:
|
||||
"""Get the current assets frame range and handles.
|
||||
|
||||
Returns:
|
||||
dict: with frame start, frame end, handle start, handle end.
|
||||
"""
|
||||
# Set frame start/end
|
||||
asset = get_current_project_asset()
|
||||
frame_start = asset["data"].get("frameStart")
|
||||
frame_end = asset["data"].get("frameEnd")
|
||||
|
||||
if frame_start is None or frame_end is None:
|
||||
return
|
||||
|
||||
handle_start = asset["data"].get("handleStart", 0)
|
||||
handle_end = asset["data"].get("handleEnd", 0)
|
||||
return {
|
||||
"frameStart": frame_start,
|
||||
"frameEnd": frame_end,
|
||||
"handleStart": handle_start,
|
||||
"handleEnd": handle_end
|
||||
}
|
||||
|
||||
|
||||
def reset_frame_range(fps: bool = True):
|
||||
"""Set frame range to current asset.
|
||||
This is part of 3dsmax documentation:
|
||||
|
||||
animationRange: A System Global variable which lets you get and
|
||||
set an Interval value that defines the start and end frames
|
||||
of the Active Time Segment.
|
||||
frameRate: A System Global variable which lets you get
|
||||
and set an Integer value that defines the current
|
||||
scene frame rate in frames-per-second.
|
||||
"""
|
||||
if fps:
|
||||
data_fps = get_current_project(fields=["data.fps"])
|
||||
fps_number = float(data_fps["data"]["fps"])
|
||||
rt.frameRate = fps_number
|
||||
frame_range = get_frame_range()
|
||||
frame_start = frame_range["frameStart"] - int(frame_range["handleStart"])
|
||||
frame_end = frame_range["frameEnd"] + int(frame_range["handleEnd"])
|
||||
frange_cmd = f"animationRange = interval {frame_start} {frame_end}"
|
||||
rt.execute(frange_cmd)
|
||||
|
||||
|
||||
def set_context_setting():
|
||||
"""Apply the project settings from the project definition
|
||||
|
||||
Settings can be overwritten by an asset if the asset.data contains
|
||||
any information regarding those settings.
|
||||
|
||||
Examples of settings:
|
||||
frame range
|
||||
resolution
|
||||
|
||||
Returns:
|
||||
None
|
||||
"""
|
||||
reset_scene_resolution()
|
||||
|
||||
|
||||
def get_max_version():
|
||||
"""
|
||||
Args:
|
||||
|
|
|
|||
|
|
@ -4,6 +4,7 @@ from qtpy import QtWidgets, QtCore
|
|||
from pymxs import runtime as rt
|
||||
|
||||
from openpype.tools.utils import host_tools
|
||||
from openpype.hosts.max.api import lib
|
||||
|
||||
|
||||
class OpenPypeMenu(object):
|
||||
|
|
@ -107,6 +108,17 @@ class OpenPypeMenu(object):
|
|||
workfiles_action = QtWidgets.QAction("Work Files...", openpype_menu)
|
||||
workfiles_action.triggered.connect(self.workfiles_callback)
|
||||
openpype_menu.addAction(workfiles_action)
|
||||
|
||||
openpype_menu.addSeparator()
|
||||
|
||||
res_action = QtWidgets.QAction("Set Resolution", openpype_menu)
|
||||
res_action.triggered.connect(self.resolution_callback)
|
||||
openpype_menu.addAction(res_action)
|
||||
|
||||
frame_action = QtWidgets.QAction("Set Frame Range", openpype_menu)
|
||||
frame_action.triggered.connect(self.frame_range_callback)
|
||||
openpype_menu.addAction(frame_action)
|
||||
|
||||
return openpype_menu
|
||||
|
||||
def load_callback(self):
|
||||
|
|
@ -128,3 +140,11 @@ class OpenPypeMenu(object):
|
|||
def workfiles_callback(self):
|
||||
"""Callback to show Workfiles tool."""
|
||||
host_tools.show_workfiles(parent=self.main_widget)
|
||||
|
||||
def resolution_callback(self):
|
||||
"""Callback to reset scene resolution"""
|
||||
return lib.reset_scene_resolution()
|
||||
|
||||
def frame_range_callback(self):
|
||||
"""Callback to reset frame range"""
|
||||
return lib.reset_frame_range()
|
||||
|
|
|
|||
|
|
@ -50,6 +50,11 @@ class MaxHost(HostBase, IWorkfileHost, ILoadHost, INewPublisher):
|
|||
|
||||
self._has_been_setup = True
|
||||
|
||||
def context_setting():
|
||||
return lib.set_context_setting()
|
||||
rt.callbacks.addScript(rt.Name('systemPostNew'),
|
||||
context_setting)
|
||||
|
||||
def has_unsaved_changes(self):
|
||||
# TODO: how to get it from 3dsmax?
|
||||
return True
|
||||
|
|
|
|||
|
|
@ -61,7 +61,7 @@ class CollectRender(pyblish.api.InstancePlugin):
|
|||
"plugin": "3dsmax",
|
||||
"frameStart": context.data['frameStart'],
|
||||
"frameEnd": context.data['frameEnd'],
|
||||
"version": version_int
|
||||
"version": version_int,
|
||||
}
|
||||
self.log.info("data: {0}".format(data))
|
||||
instance.data.update(data)
|
||||
|
|
|
|||
|
|
@ -0,0 +1,19 @@
|
|||
import pyblish.api
|
||||
from openpype.lib import version_up
|
||||
from pymxs import runtime as rt
|
||||
|
||||
|
||||
class IncrementWorkfileVersion(pyblish.api.ContextPlugin):
|
||||
"""Increment current workfile version."""
|
||||
|
||||
order = pyblish.api.IntegratorOrder + 0.9
|
||||
label = "Increment Workfile Version"
|
||||
hosts = ["max"]
|
||||
families = ["workfile"]
|
||||
|
||||
def process(self, context):
|
||||
path = context.data["currentFile"]
|
||||
filepath = version_up(path)
|
||||
|
||||
rt.saveMaxFile(filepath)
|
||||
self.log.info("Incrementing file version")
|
||||
|
|
@ -69,7 +69,7 @@ def _resolution_from_document(doc):
|
|||
resolution_width = doc["data"].get("resolution_width")
|
||||
resolution_height = doc["data"].get("resolution_height")
|
||||
|
||||
# Make sure both width and heigh are set
|
||||
# Make sure both width and height are set
|
||||
if resolution_width is None or resolution_height is None:
|
||||
cmds.warning(
|
||||
"No resolution information found for \"{}\"".format(doc["name"])
|
||||
|
|
|
|||
|
|
@ -1419,6 +1419,71 @@ def set_id(node, unique_id, overwrite=False):
|
|||
cmds.setAttr(attr, unique_id, type="string")
|
||||
|
||||
|
||||
def get_attribute(plug,
|
||||
asString=False,
|
||||
expandEnvironmentVariables=False,
|
||||
**kwargs):
|
||||
"""Maya getAttr with some fixes based on `pymel.core.general.getAttr()`.
|
||||
|
||||
Like Pymel getAttr this applies some changes to `maya.cmds.getAttr`
|
||||
- maya pointlessly returned vector results as a tuple wrapped in a list
|
||||
(ex. '[(1,2,3)]'). This command unpacks the vector for you.
|
||||
- when getting a multi-attr, maya would raise an error, but this will
|
||||
return a list of values for the multi-attr
|
||||
- added support for getting message attributes by returning the
|
||||
connections instead
|
||||
|
||||
Note that the asString + expandEnvironmentVariables argument naming
|
||||
convention matches the `maya.cmds.getAttr` arguments so that it can
|
||||
act as a direct replacement for it.
|
||||
|
||||
Args:
|
||||
plug (str): Node's attribute plug as `node.attribute`
|
||||
asString (bool): Return string value for enum attributes instead
|
||||
of the index. Note that the return value can be dependent on the
|
||||
UI language Maya is running in.
|
||||
expandEnvironmentVariables (bool): Expand any environment variable and
|
||||
(tilde characters on UNIX) found in string attributes which are
|
||||
returned.
|
||||
|
||||
Kwargs:
|
||||
Supports the keyword arguments of `maya.cmds.getAttr`
|
||||
|
||||
Returns:
|
||||
object: The value of the maya attribute.
|
||||
|
||||
"""
|
||||
attr_type = cmds.getAttr(plug, type=True)
|
||||
if asString:
|
||||
kwargs["asString"] = True
|
||||
if expandEnvironmentVariables:
|
||||
kwargs["expandEnvironmentVariables"] = True
|
||||
try:
|
||||
res = cmds.getAttr(plug, **kwargs)
|
||||
except RuntimeError:
|
||||
if attr_type == "message":
|
||||
return cmds.listConnections(plug)
|
||||
|
||||
node, attr = plug.split(".", 1)
|
||||
children = cmds.attributeQuery(attr, node=node, listChildren=True)
|
||||
if children:
|
||||
return [
|
||||
get_attribute("{}.{}".format(node, child))
|
||||
for child in children
|
||||
]
|
||||
|
||||
raise
|
||||
|
||||
# Convert vector result wrapped in tuple
|
||||
if isinstance(res, list) and len(res):
|
||||
if isinstance(res[0], tuple) and len(res):
|
||||
if attr_type in {'pointArray', 'vectorArray'}:
|
||||
return res
|
||||
return res[0]
|
||||
|
||||
return res
|
||||
|
||||
|
||||
def set_attribute(attribute, value, node):
|
||||
"""Adjust attributes based on the value from the attribute data
|
||||
|
||||
|
|
@ -1933,6 +1998,12 @@ def remove_other_uv_sets(mesh):
|
|||
cmds.removeMultiInstance(attr, b=True)
|
||||
|
||||
|
||||
def get_node_parent(node):
|
||||
"""Return full path name for parent of node"""
|
||||
parents = cmds.listRelatives(node, parent=True, fullPath=True)
|
||||
return parents[0] if parents else None
|
||||
|
||||
|
||||
def get_id_from_sibling(node, history_only=True):
|
||||
"""Return first node id in the history chain that matches this node.
|
||||
|
||||
|
|
@ -1956,10 +2027,6 @@ def get_id_from_sibling(node, history_only=True):
|
|||
|
||||
"""
|
||||
|
||||
def _get_parent(node):
|
||||
"""Return full path name for parent of node"""
|
||||
return cmds.listRelatives(node, parent=True, fullPath=True)
|
||||
|
||||
node = cmds.ls(node, long=True)[0]
|
||||
|
||||
# Find all similar nodes in history
|
||||
|
|
@ -1971,8 +2038,8 @@ def get_id_from_sibling(node, history_only=True):
|
|||
similar_nodes = [x for x in similar_nodes if x != node]
|
||||
|
||||
# The node *must be* under the same parent
|
||||
parent = _get_parent(node)
|
||||
similar_nodes = [i for i in similar_nodes if _get_parent(i) == parent]
|
||||
parent = get_node_parent(node)
|
||||
similar_nodes = [i for i in similar_nodes if get_node_parent(i) == parent]
|
||||
|
||||
# Check all of the remaining similar nodes and take the first one
|
||||
# with an id and assume it's the original.
|
||||
|
|
@ -2125,23 +2192,13 @@ def get_frame_range():
|
|||
|
||||
frame_start = asset["data"].get("frameStart")
|
||||
frame_end = asset["data"].get("frameEnd")
|
||||
# Backwards compatibility
|
||||
if frame_start is None or frame_end is None:
|
||||
frame_start = asset["data"].get("edit_in")
|
||||
frame_end = asset["data"].get("edit_out")
|
||||
|
||||
if frame_start is None or frame_end is None:
|
||||
cmds.warning("No edit information found for %s" % asset_name)
|
||||
return
|
||||
|
||||
handles = asset["data"].get("handles") or 0
|
||||
handle_start = asset["data"].get("handleStart")
|
||||
if handle_start is None:
|
||||
handle_start = handles
|
||||
|
||||
handle_end = asset["data"].get("handleEnd")
|
||||
if handle_end is None:
|
||||
handle_end = handles
|
||||
handle_start = asset["data"].get("handleStart") or 0
|
||||
handle_end = asset["data"].get("handleEnd") or 0
|
||||
|
||||
return {
|
||||
"frameStart": frame_start,
|
||||
|
|
@ -2530,8 +2587,8 @@ def load_capture_preset(data=None):
|
|||
float(value[2]) / 255
|
||||
]
|
||||
disp_options[key] = value
|
||||
else:
|
||||
disp_options['displayGradient'] = True
|
||||
elif key == "displayGradient":
|
||||
disp_options[key] = value
|
||||
|
||||
options['display_options'] = disp_options
|
||||
|
||||
|
|
@ -3228,38 +3285,78 @@ def set_colorspace():
|
|||
def parent_nodes(nodes, parent=None):
|
||||
# type: (list, str) -> list
|
||||
"""Context manager to un-parent provided nodes and return them back."""
|
||||
import pymel.core as pm # noqa
|
||||
|
||||
parent_node = None
|
||||
def _as_mdagpath(node):
|
||||
"""Return MDagPath for node path."""
|
||||
if not node:
|
||||
return
|
||||
sel = OpenMaya.MSelectionList()
|
||||
sel.add(node)
|
||||
return sel.getDagPath(0)
|
||||
|
||||
# We can only parent dag nodes so we ensure input contains only dag nodes
|
||||
nodes = cmds.ls(nodes, type="dagNode", long=True)
|
||||
if not nodes:
|
||||
# opt-out early
|
||||
yield
|
||||
return
|
||||
|
||||
parent_node_path = None
|
||||
delete_parent = False
|
||||
|
||||
if parent:
|
||||
if not cmds.objExists(parent):
|
||||
parent_node = pm.createNode("transform", n=parent, ss=False)
|
||||
parent_node = cmds.createNode("transform",
|
||||
name=parent,
|
||||
skipSelect=False)
|
||||
delete_parent = True
|
||||
else:
|
||||
parent_node = pm.PyNode(parent)
|
||||
parent_node = parent
|
||||
parent_node_path = cmds.ls(parent_node, long=True)[0]
|
||||
|
||||
# Store original parents
|
||||
node_parents = []
|
||||
for node in nodes:
|
||||
n = pm.PyNode(node)
|
||||
try:
|
||||
root = pm.listRelatives(n, parent=1)[0]
|
||||
except IndexError:
|
||||
root = None
|
||||
node_parents.append((n, root))
|
||||
node_parent = get_node_parent(node)
|
||||
node_parents.append((_as_mdagpath(node), _as_mdagpath(node_parent)))
|
||||
|
||||
try:
|
||||
for node in node_parents:
|
||||
if not parent:
|
||||
node[0].setParent(world=True)
|
||||
for node, node_parent in node_parents:
|
||||
node_parent_path = node_parent.fullPathName() if node_parent else None # noqa
|
||||
if node_parent_path == parent_node_path:
|
||||
# Already a child
|
||||
continue
|
||||
|
||||
if parent_node_path:
|
||||
cmds.parent(node.fullPathName(), parent_node_path)
|
||||
else:
|
||||
node[0].setParent(parent_node)
|
||||
cmds.parent(node.fullPathName(), world=True)
|
||||
|
||||
yield
|
||||
finally:
|
||||
for node in node_parents:
|
||||
if node[1]:
|
||||
node[0].setParent(node[1])
|
||||
# Reparent to original parents
|
||||
for node, original_parent in node_parents:
|
||||
node_path = node.fullPathName()
|
||||
if not node_path:
|
||||
# Node must have been deleted
|
||||
continue
|
||||
|
||||
node_parent_path = get_node_parent(node_path)
|
||||
|
||||
original_parent_path = None
|
||||
if original_parent:
|
||||
original_parent_path = original_parent.fullPathName()
|
||||
if not original_parent_path:
|
||||
# Original parent node must have been deleted
|
||||
continue
|
||||
|
||||
if node_parent_path != original_parent_path:
|
||||
if not original_parent_path:
|
||||
cmds.parent(node_path, world=True)
|
||||
else:
|
||||
cmds.parent(node_path, original_parent_path)
|
||||
|
||||
if delete_parent:
|
||||
pm.delete(parent_node)
|
||||
cmds.delete(parent_node_path)
|
||||
|
||||
|
||||
@contextlib.contextmanager
|
||||
|
|
@ -3727,3 +3824,43 @@ def len_flattened(components):
|
|||
else:
|
||||
n += 1
|
||||
return n
|
||||
|
||||
|
||||
def get_all_children(nodes):
|
||||
"""Return all children of `nodes` including each instanced child.
|
||||
Using maya.cmds.listRelatives(allDescendents=True) includes only the first
|
||||
instance. As such, this function acts as an optimal replacement with a
|
||||
focus on a fast query.
|
||||
|
||||
"""
|
||||
|
||||
sel = OpenMaya.MSelectionList()
|
||||
traversed = set()
|
||||
iterator = OpenMaya.MItDag(OpenMaya.MItDag.kDepthFirst)
|
||||
for node in nodes:
|
||||
|
||||
if node in traversed:
|
||||
# Ignore if already processed as a child
|
||||
# before
|
||||
continue
|
||||
|
||||
sel.clear()
|
||||
sel.add(node)
|
||||
dag = sel.getDagPath(0)
|
||||
|
||||
iterator.reset(dag)
|
||||
# ignore self
|
||||
iterator.next() # noqa: B305
|
||||
while not iterator.isDone():
|
||||
|
||||
path = iterator.fullPathName()
|
||||
|
||||
if path in traversed:
|
||||
iterator.prune()
|
||||
iterator.next() # noqa: B305
|
||||
continue
|
||||
|
||||
traversed.add(path)
|
||||
iterator.next() # noqa: B305
|
||||
|
||||
return list(traversed)
|
||||
|
|
|
|||
|
|
@ -339,7 +339,7 @@ class ARenderProducts:
|
|||
aov_tokens = ["<aov>", "<renderpass>"]
|
||||
|
||||
def match_last(tokens, text):
|
||||
"""regex match the last occurence from a list of tokens"""
|
||||
"""regex match the last occurrence from a list of tokens"""
|
||||
pattern = "(?:.*)({})".format("|".join(tokens))
|
||||
return re.search(pattern, text, re.IGNORECASE)
|
||||
|
||||
|
|
@ -857,6 +857,7 @@ class RenderProductsVray(ARenderProducts):
|
|||
if default_ext in {"exr (multichannel)", "exr (deep)"}:
|
||||
default_ext = "exr"
|
||||
|
||||
colorspace = lib.get_color_management_output_transform()
|
||||
products = []
|
||||
|
||||
# add beauty as default when not disabled
|
||||
|
|
@ -868,7 +869,7 @@ class RenderProductsVray(ARenderProducts):
|
|||
productName="",
|
||||
ext=default_ext,
|
||||
camera=camera,
|
||||
colorspace=lib.get_color_management_output_transform(),
|
||||
colorspace=colorspace,
|
||||
multipart=self.multipart
|
||||
)
|
||||
)
|
||||
|
|
@ -882,6 +883,7 @@ class RenderProductsVray(ARenderProducts):
|
|||
productName="Alpha",
|
||||
ext=default_ext,
|
||||
camera=camera,
|
||||
colorspace=colorspace,
|
||||
multipart=self.multipart
|
||||
)
|
||||
)
|
||||
|
|
@ -917,7 +919,8 @@ class RenderProductsVray(ARenderProducts):
|
|||
product = RenderProduct(productName=name,
|
||||
ext=default_ext,
|
||||
aov=aov,
|
||||
camera=camera)
|
||||
camera=camera,
|
||||
colorspace=colorspace)
|
||||
products.append(product)
|
||||
# Continue as we've processed this special case AOV
|
||||
continue
|
||||
|
|
@ -929,7 +932,7 @@ class RenderProductsVray(ARenderProducts):
|
|||
ext=default_ext,
|
||||
aov=aov,
|
||||
camera=camera,
|
||||
colorspace=lib.get_color_management_output_transform()
|
||||
colorspace=colorspace
|
||||
)
|
||||
products.append(product)
|
||||
|
||||
|
|
@ -1051,7 +1054,7 @@ class RenderProductsRedshift(ARenderProducts):
|
|||
def get_files(self, product):
|
||||
# When outputting AOVs we need to replace Redshift specific AOV tokens
|
||||
# with Maya render tokens for generating file sequences. We validate to
|
||||
# a specific AOV fileprefix so we only need to accout for one
|
||||
# a specific AOV fileprefix so we only need to account for one
|
||||
# replacement.
|
||||
if not product.multipart and product.driver:
|
||||
file_prefix = self._get_attr(product.driver + ".filePrefix")
|
||||
|
|
@ -1130,6 +1133,7 @@ class RenderProductsRedshift(ARenderProducts):
|
|||
products = []
|
||||
light_groups_enabled = False
|
||||
has_beauty_aov = False
|
||||
colorspace = lib.get_color_management_output_transform()
|
||||
for aov in aovs:
|
||||
enabled = self._get_attr(aov, "enabled")
|
||||
if not enabled:
|
||||
|
|
@ -1173,7 +1177,8 @@ class RenderProductsRedshift(ARenderProducts):
|
|||
ext=ext,
|
||||
multipart=False,
|
||||
camera=camera,
|
||||
driver=aov)
|
||||
driver=aov,
|
||||
colorspace=colorspace)
|
||||
products.append(product)
|
||||
|
||||
if light_groups:
|
||||
|
|
@ -1188,7 +1193,8 @@ class RenderProductsRedshift(ARenderProducts):
|
|||
ext=ext,
|
||||
multipart=False,
|
||||
camera=camera,
|
||||
driver=aov)
|
||||
driver=aov,
|
||||
colorspace=colorspace)
|
||||
products.append(product)
|
||||
|
||||
# When a Beauty AOV is added manually, it will be rendered as
|
||||
|
|
@ -1204,7 +1210,8 @@ class RenderProductsRedshift(ARenderProducts):
|
|||
RenderProduct(productName=beauty_name,
|
||||
ext=ext,
|
||||
multipart=self.multipart,
|
||||
camera=camera))
|
||||
camera=camera,
|
||||
colorspace=colorspace))
|
||||
|
||||
return products
|
||||
|
||||
|
|
@ -1236,6 +1243,8 @@ class RenderProductsRenderman(ARenderProducts):
|
|||
"""
|
||||
from rfm2.api.displays import get_displays # noqa
|
||||
|
||||
colorspace = lib.get_color_management_output_transform()
|
||||
|
||||
cameras = [
|
||||
self.sanitize_camera_name(c)
|
||||
for c in self.get_renderable_cameras()
|
||||
|
|
@ -1302,7 +1311,8 @@ class RenderProductsRenderman(ARenderProducts):
|
|||
productName=aov_name,
|
||||
ext=extensions,
|
||||
camera=camera,
|
||||
multipart=True
|
||||
multipart=True,
|
||||
colorspace=colorspace
|
||||
)
|
||||
|
||||
if has_cryptomatte and matte_enabled:
|
||||
|
|
@ -1311,7 +1321,8 @@ class RenderProductsRenderman(ARenderProducts):
|
|||
aov=cryptomatte_aov,
|
||||
ext=extensions,
|
||||
camera=camera,
|
||||
multipart=True
|
||||
multipart=True,
|
||||
colorspace=colorspace
|
||||
)
|
||||
else:
|
||||
# this code should handle the case where no multipart
|
||||
|
|
|
|||
|
|
@ -19,6 +19,8 @@ from maya.app.renderSetup.model.override import (
|
|||
UniqueOverride
|
||||
)
|
||||
|
||||
from openpype.hosts.maya.api.lib import get_attribute
|
||||
|
||||
EXACT_MATCH = 0
|
||||
PARENT_MATCH = 1
|
||||
CLIENT_MATCH = 2
|
||||
|
|
@ -96,9 +98,6 @@ def get_attr_in_layer(node_attr, layer):
|
|||
|
||||
"""
|
||||
|
||||
# Delay pymel import to here because it's slow to load
|
||||
import pymel.core as pm
|
||||
|
||||
def _layer_needs_update(layer):
|
||||
"""Return whether layer needs updating."""
|
||||
# Use `getattr` as e.g. DEFAULT_RENDER_LAYER does not have
|
||||
|
|
@ -125,7 +124,7 @@ def get_attr_in_layer(node_attr, layer):
|
|||
node = history_overrides[-1] if history_overrides else override
|
||||
node_attr_ = node + ".original"
|
||||
|
||||
return pm.getAttr(node_attr_, asString=True)
|
||||
return get_attribute(node_attr_, asString=True)
|
||||
|
||||
layer = get_rendersetup_layer(layer)
|
||||
rs = renderSetup.instance()
|
||||
|
|
@ -145,7 +144,7 @@ def get_attr_in_layer(node_attr, layer):
|
|||
# we will let it error out.
|
||||
rs.switchToLayer(current_layer)
|
||||
|
||||
return pm.getAttr(node_attr, asString=True)
|
||||
return get_attribute(node_attr, asString=True)
|
||||
|
||||
overrides = get_attr_overrides(node_attr, layer)
|
||||
default_layer_value = get_default_layer_value(node_attr)
|
||||
|
|
@ -156,7 +155,7 @@ def get_attr_in_layer(node_attr, layer):
|
|||
for match, layer_override, index in overrides:
|
||||
if isinstance(layer_override, AbsOverride):
|
||||
# Absolute override
|
||||
value = pm.getAttr(layer_override.name() + ".attrValue")
|
||||
value = get_attribute(layer_override.name() + ".attrValue")
|
||||
if match == EXACT_MATCH:
|
||||
# value = value
|
||||
pass
|
||||
|
|
@ -168,8 +167,8 @@ def get_attr_in_layer(node_attr, layer):
|
|||
elif isinstance(layer_override, RelOverride):
|
||||
# Relative override
|
||||
# Value = Original * Multiply + Offset
|
||||
multiply = pm.getAttr(layer_override.name() + ".multiply")
|
||||
offset = pm.getAttr(layer_override.name() + ".offset")
|
||||
multiply = get_attribute(layer_override.name() + ".multiply")
|
||||
offset = get_attribute(layer_override.name() + ".offset")
|
||||
|
||||
if match == EXACT_MATCH:
|
||||
value = value * multiply + offset
|
||||
|
|
|
|||
|
|
@ -33,7 +33,7 @@ class MayaTemplateBuilder(AbstractTemplateBuilder):
|
|||
get_template_preset implementation)
|
||||
|
||||
Returns:
|
||||
bool: Wether the template was succesfully imported or not
|
||||
bool: Whether the template was successfully imported or not
|
||||
"""
|
||||
|
||||
if cmds.objExists(PLACEHOLDER_SET):
|
||||
|
|
@ -116,7 +116,7 @@ class MayaPlaceholderLoadPlugin(PlaceholderPlugin, PlaceholderLoadMixin):
|
|||
placeholder_name_parts = placeholder_data["builder_type"].split("_")
|
||||
|
||||
pos = 1
|
||||
# add famlily in any
|
||||
# add family in any
|
||||
placeholder_family = placeholder_data["family"]
|
||||
if placeholder_family:
|
||||
placeholder_name_parts.insert(pos, placeholder_family)
|
||||
|
|
|
|||
|
|
@ -118,7 +118,7 @@ class ImportMayaLoader(load.LoaderPlugin):
|
|||
"clean_import",
|
||||
label="Clean import",
|
||||
default=False,
|
||||
help="Should all occurences of cbId be purged?"
|
||||
help="Should all occurrences of cbId be purged?"
|
||||
)
|
||||
]
|
||||
|
||||
|
|
|
|||
|
|
@ -84,7 +84,7 @@ class ArnoldStandinLoader(load.LoaderPlugin):
|
|||
sequence = is_sequence(os.listdir(os.path.dirname(self.fname)))
|
||||
cmds.setAttr(standin_shape + ".useFrameExtension", sequence)
|
||||
|
||||
nodes = [root, standin]
|
||||
nodes = [root, standin, standin_shape]
|
||||
if operator is not None:
|
||||
nodes.append(operator)
|
||||
self[:] = nodes
|
||||
|
|
@ -180,10 +180,10 @@ class ArnoldStandinLoader(load.LoaderPlugin):
|
|||
proxy_basename, proxy_path = self._get_proxy_path(path)
|
||||
|
||||
# Whether there is proxy or so, we still update the string operator.
|
||||
# If no proxy exists, the string operator wont replace anything.
|
||||
# If no proxy exists, the string operator won't replace anything.
|
||||
cmds.setAttr(
|
||||
string_replace_operator + ".match",
|
||||
"resources/" + proxy_basename,
|
||||
proxy_basename,
|
||||
type="string"
|
||||
)
|
||||
cmds.setAttr(
|
||||
|
|
|
|||
|
|
@ -11,7 +11,7 @@ from openpype.pipeline import (
|
|||
get_representation_path,
|
||||
)
|
||||
from openpype.hosts.maya.api.pipeline import containerise
|
||||
from openpype.hosts.maya.api.lib import unique_namespace
|
||||
from openpype.hosts.maya.api.lib import unique_namespace, get_container_members
|
||||
|
||||
|
||||
class AudioLoader(load.LoaderPlugin):
|
||||
|
|
@ -52,17 +52,15 @@ class AudioLoader(load.LoaderPlugin):
|
|||
)
|
||||
|
||||
def update(self, container, representation):
|
||||
import pymel.core as pm
|
||||
|
||||
audio_node = None
|
||||
for node in pm.PyNode(container["objectName"]).members():
|
||||
if node.nodeType() == "audio":
|
||||
audio_node = node
|
||||
members = get_container_members(container)
|
||||
audio_nodes = cmds.ls(members, type="audio")
|
||||
|
||||
assert audio_node is not None, "Audio node not found."
|
||||
assert audio_nodes is not None, "Audio node not found."
|
||||
audio_node = audio_nodes[0]
|
||||
|
||||
path = get_representation_path(representation)
|
||||
audio_node.filename.set(path)
|
||||
cmds.setAttr("{}.filename".format(audio_node), path, type="string")
|
||||
cmds.setAttr(
|
||||
container["objectName"] + ".representation",
|
||||
str(representation["_id"]),
|
||||
|
|
@ -80,8 +78,12 @@ class AudioLoader(load.LoaderPlugin):
|
|||
asset = get_asset_by_id(
|
||||
project_name, subset["parent"], fields=["parent"]
|
||||
)
|
||||
audio_node.sourceStart.set(1 - asset["data"]["frameStart"])
|
||||
audio_node.sourceEnd.set(asset["data"]["frameEnd"])
|
||||
|
||||
source_start = 1 - asset["data"]["frameStart"]
|
||||
source_end = asset["data"]["frameEnd"]
|
||||
|
||||
cmds.setAttr("{}.sourceStart".format(audio_node), source_start)
|
||||
cmds.setAttr("{}.sourceEnd".format(audio_node), source_end)
|
||||
|
||||
def switch(self, container, representation):
|
||||
self.update(container, representation)
|
||||
|
|
|
|||
|
|
@ -1,5 +1,9 @@
|
|||
import os
|
||||
|
||||
import maya.cmds as cmds
|
||||
|
||||
from openpype.hosts.maya.api.pipeline import containerise
|
||||
from openpype.hosts.maya.api.lib import unique_namespace
|
||||
from openpype.pipeline import (
|
||||
load,
|
||||
get_representation_path
|
||||
|
|
@ -11,19 +15,15 @@ class GpuCacheLoader(load.LoaderPlugin):
|
|||
"""Load Alembic as gpuCache"""
|
||||
|
||||
families = ["model", "animation", "proxyAbc", "pointcache"]
|
||||
representations = ["abc"]
|
||||
representations = ["abc", "gpu_cache"]
|
||||
|
||||
label = "Import Gpu Cache"
|
||||
label = "Load Gpu Cache"
|
||||
order = -5
|
||||
icon = "code-fork"
|
||||
color = "orange"
|
||||
|
||||
def load(self, context, name, namespace, data):
|
||||
|
||||
import maya.cmds as cmds
|
||||
from openpype.hosts.maya.api.pipeline import containerise
|
||||
from openpype.hosts.maya.api.lib import unique_namespace
|
||||
|
||||
asset = context['asset']['name']
|
||||
namespace = namespace or unique_namespace(
|
||||
asset + "_",
|
||||
|
|
@ -42,10 +42,9 @@ class GpuCacheLoader(load.LoaderPlugin):
|
|||
c = colors.get('model')
|
||||
if c is not None:
|
||||
cmds.setAttr(root + ".useOutlinerColor", 1)
|
||||
cmds.setAttr(root + ".outlinerColor",
|
||||
(float(c[0])/255),
|
||||
(float(c[1])/255),
|
||||
(float(c[2])/255)
|
||||
cmds.setAttr(
|
||||
root + ".outlinerColor",
|
||||
(float(c[0]) / 255), (float(c[1]) / 255), (float(c[2]) / 255)
|
||||
)
|
||||
|
||||
# Create transform with shape
|
||||
|
|
@ -74,9 +73,6 @@ class GpuCacheLoader(load.LoaderPlugin):
|
|||
loader=self.__class__.__name__)
|
||||
|
||||
def update(self, container, representation):
|
||||
|
||||
import maya.cmds as cmds
|
||||
|
||||
path = get_representation_path(representation)
|
||||
|
||||
# Update the cache
|
||||
|
|
@ -96,7 +92,6 @@ class GpuCacheLoader(load.LoaderPlugin):
|
|||
self.update(container, representation)
|
||||
|
||||
def remove(self, container):
|
||||
import maya.cmds as cmds
|
||||
members = cmds.sets(container['objectName'], query=True)
|
||||
cmds.lockNode(members, lock=False)
|
||||
cmds.delete([container['objectName']] + members)
|
||||
|
|
|
|||
|
|
@ -11,11 +11,26 @@ from openpype.pipeline import (
|
|||
get_representation_path
|
||||
)
|
||||
from openpype.hosts.maya.api.pipeline import containerise
|
||||
from openpype.hosts.maya.api.lib import unique_namespace
|
||||
from openpype.hosts.maya.api.lib import (
|
||||
unique_namespace,
|
||||
namespaced,
|
||||
pairwise,
|
||||
get_container_members
|
||||
)
|
||||
|
||||
from maya import cmds
|
||||
|
||||
|
||||
def disconnect_inputs(plug):
|
||||
overrides = cmds.listConnections(plug,
|
||||
source=True,
|
||||
destination=False,
|
||||
plugs=True,
|
||||
connections=True) or []
|
||||
for dest, src in pairwise(overrides):
|
||||
cmds.disconnectAttr(src, dest)
|
||||
|
||||
|
||||
class CameraWindow(QtWidgets.QDialog):
|
||||
|
||||
def __init__(self, cameras):
|
||||
|
|
@ -74,6 +89,7 @@ class CameraWindow(QtWidgets.QDialog):
|
|||
self.camera = None
|
||||
self.close()
|
||||
|
||||
|
||||
class ImagePlaneLoader(load.LoaderPlugin):
|
||||
"""Specific loader of plate for image planes on selected camera."""
|
||||
|
||||
|
|
@ -84,9 +100,7 @@ class ImagePlaneLoader(load.LoaderPlugin):
|
|||
color = "orange"
|
||||
|
||||
def load(self, context, name, namespace, data, options=None):
|
||||
import pymel.core as pm
|
||||
|
||||
new_nodes = []
|
||||
image_plane_depth = 1000
|
||||
asset = context['asset']['name']
|
||||
namespace = namespace or unique_namespace(
|
||||
|
|
@ -96,16 +110,20 @@ class ImagePlaneLoader(load.LoaderPlugin):
|
|||
)
|
||||
|
||||
# Get camera from user selection.
|
||||
camera = None
|
||||
# is_static_image_plane = None
|
||||
# is_in_all_views = None
|
||||
if data:
|
||||
camera = pm.PyNode(data.get("camera"))
|
||||
camera = data.get("camera") if data else None
|
||||
|
||||
if not camera:
|
||||
cameras = pm.ls(type="camera")
|
||||
camera_names = {x.getParent().name(): x for x in cameras}
|
||||
camera_names["Create new camera."] = "create_camera"
|
||||
cameras = cmds.ls(type="camera")
|
||||
|
||||
# Cameras by names
|
||||
camera_names = {}
|
||||
for camera in cameras:
|
||||
parent = cmds.listRelatives(camera, parent=True, path=True)[0]
|
||||
camera_names[parent] = camera
|
||||
|
||||
camera_names["Create new camera."] = "create-camera"
|
||||
window = CameraWindow(camera_names.keys())
|
||||
window.exec_()
|
||||
# Skip if no camera was selected (Dialog was closed)
|
||||
|
|
@ -113,43 +131,48 @@ class ImagePlaneLoader(load.LoaderPlugin):
|
|||
return
|
||||
camera = camera_names[window.camera]
|
||||
|
||||
if camera == "create_camera":
|
||||
camera = pm.createNode("camera")
|
||||
if camera == "create-camera":
|
||||
camera = cmds.createNode("camera")
|
||||
|
||||
if camera is None:
|
||||
return
|
||||
|
||||
try:
|
||||
camera.displayResolution.set(1)
|
||||
camera.farClipPlane.set(image_plane_depth * 10)
|
||||
cmds.setAttr("{}.displayResolution".format(camera), True)
|
||||
cmds.setAttr("{}.farClipPlane".format(camera),
|
||||
image_plane_depth * 10)
|
||||
except RuntimeError:
|
||||
pass
|
||||
|
||||
# Create image plane
|
||||
image_plane_transform, image_plane_shape = pm.imagePlane(
|
||||
fileName=context["representation"]["data"]["path"],
|
||||
camera=camera)
|
||||
image_plane_shape.depth.set(image_plane_depth)
|
||||
with namespaced(namespace):
|
||||
# Create inside the namespace
|
||||
image_plane_transform, image_plane_shape = cmds.imagePlane(
|
||||
fileName=context["representation"]["data"]["path"],
|
||||
camera=camera
|
||||
)
|
||||
start_frame = cmds.playbackOptions(query=True, min=True)
|
||||
end_frame = cmds.playbackOptions(query=True, max=True)
|
||||
|
||||
|
||||
start_frame = pm.playbackOptions(q=True, min=True)
|
||||
end_frame = pm.playbackOptions(q=True, max=True)
|
||||
|
||||
image_plane_shape.frameOffset.set(0)
|
||||
image_plane_shape.frameIn.set(start_frame)
|
||||
image_plane_shape.frameOut.set(end_frame)
|
||||
image_plane_shape.frameCache.set(end_frame)
|
||||
image_plane_shape.useFrameExtension.set(1)
|
||||
for attr, value in {
|
||||
"depth": image_plane_depth,
|
||||
"frameOffset": 0,
|
||||
"frameIn": start_frame,
|
||||
"frameOut": end_frame,
|
||||
"frameCache": end_frame,
|
||||
"useFrameExtension": True
|
||||
}.items():
|
||||
plug = "{}.{}".format(image_plane_shape, attr)
|
||||
cmds.setAttr(plug, value)
|
||||
|
||||
movie_representations = ["mov", "preview"]
|
||||
if context["representation"]["name"] in movie_representations:
|
||||
# Need to get "type" by string, because its a method as well.
|
||||
pm.Attribute(image_plane_shape + ".type").set(2)
|
||||
cmds.setAttr(image_plane_shape + ".type", 2)
|
||||
|
||||
# Ask user whether to use sequence or still image.
|
||||
if context["representation"]["name"] == "exr":
|
||||
# Ensure OpenEXRLoader plugin is loaded.
|
||||
pm.loadPlugin("OpenEXRLoader.mll", quiet=True)
|
||||
cmds.loadPlugin("OpenEXRLoader", quiet=True)
|
||||
|
||||
message = (
|
||||
"Hold image sequence on first frame?"
|
||||
|
|
@ -161,32 +184,18 @@ class ImagePlaneLoader(load.LoaderPlugin):
|
|||
None,
|
||||
"Frame Hold.",
|
||||
message,
|
||||
QtWidgets.QMessageBox.Ok,
|
||||
QtWidgets.QMessageBox.Cancel
|
||||
QtWidgets.QMessageBox.Yes,
|
||||
QtWidgets.QMessageBox.No
|
||||
)
|
||||
if reply == QtWidgets.QMessageBox.Ok:
|
||||
# find the input and output of frame extension
|
||||
expressions = image_plane_shape.frameExtension.inputs()
|
||||
frame_ext_output = image_plane_shape.frameExtension.outputs()
|
||||
if expressions:
|
||||
# the "time1" node is non-deletable attr
|
||||
# in Maya, use disconnectAttr instead
|
||||
pm.disconnectAttr(expressions, frame_ext_output)
|
||||
if reply == QtWidgets.QMessageBox.Yes:
|
||||
frame_extension_plug = "{}.frameExtension".format(image_plane_shape) # noqa
|
||||
|
||||
if not image_plane_shape.frameExtension.isFreeToChange():
|
||||
raise RuntimeError("Can't set frame extension for {}".format(image_plane_shape)) # noqa
|
||||
# get the node of time instead and set the time for it.
|
||||
image_plane_shape.frameExtension.set(start_frame)
|
||||
# Remove current frame expression
|
||||
disconnect_inputs(frame_extension_plug)
|
||||
|
||||
new_nodes.extend(
|
||||
[
|
||||
image_plane_transform.longName().split("|")[-1],
|
||||
image_plane_shape.longName().split("|")[-1]
|
||||
]
|
||||
)
|
||||
cmds.setAttr(frame_extension_plug, start_frame)
|
||||
|
||||
for node in new_nodes:
|
||||
pm.rename(node, "{}:{}".format(namespace, node))
|
||||
new_nodes = [image_plane_transform, image_plane_shape]
|
||||
|
||||
return containerise(
|
||||
name=name,
|
||||
|
|
@ -197,21 +206,19 @@ class ImagePlaneLoader(load.LoaderPlugin):
|
|||
)
|
||||
|
||||
def update(self, container, representation):
|
||||
import pymel.core as pm
|
||||
image_plane_shape = None
|
||||
for node in pm.PyNode(container["objectName"]).members():
|
||||
if node.nodeType() == "imagePlane":
|
||||
image_plane_shape = node
|
||||
|
||||
assert image_plane_shape is not None, "Image plane not found."
|
||||
members = get_container_members(container)
|
||||
image_planes = cmds.ls(members, type="imagePlane")
|
||||
assert image_planes, "Image plane not found."
|
||||
image_plane_shape = image_planes[0]
|
||||
|
||||
path = get_representation_path(representation)
|
||||
image_plane_shape.imageName.set(path)
|
||||
cmds.setAttr(
|
||||
container["objectName"] + ".representation",
|
||||
str(representation["_id"]),
|
||||
type="string"
|
||||
)
|
||||
cmds.setAttr("{}.imageName".format(image_plane_shape),
|
||||
path,
|
||||
type="string")
|
||||
cmds.setAttr("{}.representation".format(container["objectName"]),
|
||||
str(representation["_id"]),
|
||||
type="string")
|
||||
|
||||
# Set frame range.
|
||||
project_name = legacy_io.active_project()
|
||||
|
|
@ -227,10 +234,14 @@ class ImagePlaneLoader(load.LoaderPlugin):
|
|||
start_frame = asset["data"]["frameStart"]
|
||||
end_frame = asset["data"]["frameEnd"]
|
||||
|
||||
image_plane_shape.frameOffset.set(0)
|
||||
image_plane_shape.frameIn.set(start_frame)
|
||||
image_plane_shape.frameOut.set(end_frame)
|
||||
image_plane_shape.frameCache.set(end_frame)
|
||||
for attr, value in {
|
||||
"frameOffset": 0,
|
||||
"frameIn": start_frame,
|
||||
"frameOut": end_frame,
|
||||
"frameCache": end_frame
|
||||
}:
|
||||
plug = "{}.{}".format(image_plane_shape, attr)
|
||||
cmds.setAttr(plug, value)
|
||||
|
||||
def switch(self, container, representation):
|
||||
self.update(container, representation)
|
||||
|
|
|
|||
|
|
@ -1,11 +1,89 @@
|
|||
import os
|
||||
import difflib
|
||||
import contextlib
|
||||
from maya import cmds
|
||||
|
||||
from openpype.pipeline import registered_host
|
||||
from openpype.pipeline.create import CreateContext
|
||||
from openpype.settings import get_project_settings
|
||||
import openpype.hosts.maya.api.plugin
|
||||
from openpype.hosts.maya.api.lib import maintained_selection
|
||||
from openpype.hosts.maya.api.lib import (
|
||||
maintained_selection,
|
||||
get_container_members,
|
||||
parent_nodes
|
||||
)
|
||||
|
||||
|
||||
@contextlib.contextmanager
|
||||
def preserve_modelpanel_cameras(container, log=None):
|
||||
"""Preserve camera members of container in the modelPanels.
|
||||
|
||||
This is used to ensure a camera remains in the modelPanels after updating
|
||||
to a new version.
|
||||
|
||||
"""
|
||||
|
||||
# Get the modelPanels that used the old camera
|
||||
members = get_container_members(container)
|
||||
old_cameras = set(cmds.ls(members, type="camera", long=True))
|
||||
if not old_cameras:
|
||||
# No need to manage anything
|
||||
yield
|
||||
return
|
||||
|
||||
panel_cameras = {}
|
||||
for panel in cmds.getPanel(type="modelPanel"):
|
||||
cam = cmds.ls(cmds.modelPanel(panel, query=True, camera=True),
|
||||
long=True)
|
||||
|
||||
# Often but not always maya returns the transform from the
|
||||
# modelPanel as opposed to the camera shape, so we convert it
|
||||
# to explicitly be the camera shape
|
||||
if cmds.nodeType(cam) != "camera":
|
||||
cam = cmds.listRelatives(cam,
|
||||
children=True,
|
||||
fullPath=True,
|
||||
type="camera")[0]
|
||||
if cam in old_cameras:
|
||||
panel_cameras[panel] = cam
|
||||
|
||||
if not panel_cameras:
|
||||
# No need to manage anything
|
||||
yield
|
||||
return
|
||||
|
||||
try:
|
||||
yield
|
||||
finally:
|
||||
new_members = get_container_members(container)
|
||||
new_cameras = set(cmds.ls(new_members, type="camera", long=True))
|
||||
if not new_cameras:
|
||||
return
|
||||
|
||||
for panel, cam_name in panel_cameras.items():
|
||||
new_camera = None
|
||||
if cam_name in new_cameras:
|
||||
new_camera = cam_name
|
||||
elif len(new_cameras) == 1:
|
||||
new_camera = next(iter(new_cameras))
|
||||
else:
|
||||
# Multiple cameras in the updated container but not an exact
|
||||
# match detected by name. Find the closest match
|
||||
matches = difflib.get_close_matches(word=cam_name,
|
||||
possibilities=new_cameras,
|
||||
n=1)
|
||||
if matches:
|
||||
new_camera = matches[0] # best match
|
||||
if log:
|
||||
log.info("Camera in '{}' restored with "
|
||||
"closest match camera: {} (before: {})"
|
||||
.format(panel, new_camera, cam_name))
|
||||
|
||||
if not new_camera:
|
||||
# Unable to find the camera to re-apply in the modelpanel
|
||||
continue
|
||||
|
||||
cmds.modelPanel(panel, edit=True, camera=new_camera)
|
||||
|
||||
|
||||
class ReferenceLoader(openpype.hosts.maya.api.plugin.ReferenceLoader):
|
||||
|
|
@ -38,7 +116,6 @@ class ReferenceLoader(openpype.hosts.maya.api.plugin.ReferenceLoader):
|
|||
|
||||
def process_reference(self, context, name, namespace, options):
|
||||
import maya.cmds as cmds
|
||||
import pymel.core as pm
|
||||
|
||||
try:
|
||||
family = context["representation"]["context"]["family"]
|
||||
|
|
@ -65,7 +142,10 @@ class ReferenceLoader(openpype.hosts.maya.api.plugin.ReferenceLoader):
|
|||
|
||||
new_nodes = (list(set(nodes) - set(shapes)))
|
||||
|
||||
current_namespace = pm.namespaceInfo(currentNamespace=True)
|
||||
# if there are cameras, try to lock their transforms
|
||||
self._lock_camera_transforms(new_nodes)
|
||||
|
||||
current_namespace = cmds.namespaceInfo(currentNamespace=True)
|
||||
|
||||
if current_namespace != ":":
|
||||
group_name = current_namespace + ":" + group_name
|
||||
|
|
@ -75,37 +155,29 @@ class ReferenceLoader(openpype.hosts.maya.api.plugin.ReferenceLoader):
|
|||
self[:] = new_nodes
|
||||
|
||||
if attach_to_root:
|
||||
group_node = pm.PyNode(group_name)
|
||||
roots = set()
|
||||
roots = cmds.listRelatives(group_name,
|
||||
children=True,
|
||||
fullPath=True) or []
|
||||
|
||||
for node in new_nodes:
|
||||
try:
|
||||
roots.add(pm.PyNode(node).getAllParents()[-2])
|
||||
except: # noqa: E722
|
||||
pass
|
||||
if family not in {"layout", "setdress",
|
||||
"mayaAscii", "mayaScene"}:
|
||||
# QUESTION Why do we need to exclude these families?
|
||||
with parent_nodes(roots, parent=None):
|
||||
cmds.xform(group_name, zeroTransformPivots=True)
|
||||
|
||||
if family not in ["layout", "setdress",
|
||||
"mayaAscii", "mayaScene"]:
|
||||
for root in roots:
|
||||
root.setParent(world=True)
|
||||
|
||||
group_node.zeroTransformPivots()
|
||||
for root in roots:
|
||||
root.setParent(group_node)
|
||||
|
||||
cmds.setAttr(group_name + ".displayHandle", 1)
|
||||
cmds.setAttr("{}.displayHandle".format(group_name), 1)
|
||||
|
||||
settings = get_project_settings(os.environ['AVALON_PROJECT'])
|
||||
colors = settings['maya']['load']['colors']
|
||||
c = colors.get(family)
|
||||
if c is not None:
|
||||
group_node.useOutlinerColor.set(1)
|
||||
group_node.outlinerColor.set(
|
||||
(float(c[0]) / 255),
|
||||
(float(c[1]) / 255),
|
||||
(float(c[2]) / 255))
|
||||
cmds.setAttr("{}.useOutlinerColor".format(group_name), 1)
|
||||
cmds.setAttr("{}.outlinerColor".format(group_name),
|
||||
(float(c[0]) / 255),
|
||||
(float(c[1]) / 255),
|
||||
(float(c[2]) / 255))
|
||||
|
||||
cmds.setAttr(group_name + ".displayHandle", 1)
|
||||
cmds.setAttr("{}.displayHandle".format(group_name), 1)
|
||||
# get bounding box
|
||||
bbox = cmds.exactWorldBoundingBox(group_name)
|
||||
# get pivot position on world space
|
||||
|
|
@ -119,20 +191,30 @@ class ReferenceLoader(openpype.hosts.maya.api.plugin.ReferenceLoader):
|
|||
cy = cy + pivot[1]
|
||||
cz = cz + pivot[2]
|
||||
# set selection handle offset to center of bounding box
|
||||
cmds.setAttr(group_name + ".selectHandleX", cx)
|
||||
cmds.setAttr(group_name + ".selectHandleY", cy)
|
||||
cmds.setAttr(group_name + ".selectHandleZ", cz)
|
||||
cmds.setAttr("{}.selectHandleX".format(group_name), cx)
|
||||
cmds.setAttr("{}.selectHandleY".format(group_name), cy)
|
||||
cmds.setAttr("{}.selectHandleZ".format(group_name), cz)
|
||||
|
||||
if family == "rig":
|
||||
self._post_process_rig(name, namespace, context, options)
|
||||
else:
|
||||
if "translate" in options:
|
||||
cmds.setAttr(group_name + ".t", *options["translate"])
|
||||
cmds.setAttr("{}.translate".format(group_name),
|
||||
*options["translate"])
|
||||
return new_nodes
|
||||
|
||||
def switch(self, container, representation):
|
||||
self.update(container, representation)
|
||||
|
||||
def update(self, container, representation):
|
||||
with preserve_modelpanel_cameras(container, log=self.log):
|
||||
super(ReferenceLoader, self).update(container, representation)
|
||||
|
||||
# We also want to lock camera transforms on any new cameras in the
|
||||
# reference or for a camera which might have changed names.
|
||||
members = get_container_members(container)
|
||||
self._lock_camera_transforms(members)
|
||||
|
||||
def _post_process_rig(self, name, namespace, context, options):
|
||||
|
||||
output = next((node for node in self if
|
||||
|
|
@ -163,3 +245,18 @@ class ReferenceLoader(openpype.hosts.maya.api.plugin.ReferenceLoader):
|
|||
variant=namespace,
|
||||
pre_create_data={"use_selection": True}
|
||||
)
|
||||
|
||||
def _lock_camera_transforms(self, nodes):
|
||||
cameras = cmds.ls(nodes, type="camera")
|
||||
if not cameras:
|
||||
return
|
||||
|
||||
# Check the Maya version, lockTransform has been introduced since
|
||||
# Maya 2016.5 Ext 2
|
||||
version = int(cmds.about(version=True))
|
||||
if version >= 2016:
|
||||
for camera in cameras:
|
||||
cmds.camera(camera, edit=True, lockTransform=True)
|
||||
else:
|
||||
self.log.warning("This version of Maya does not support locking of"
|
||||
" transforms of cameras.")
|
||||
|
|
|
|||
|
|
@ -1,6 +1,7 @@
|
|||
from maya import cmds
|
||||
|
||||
import pyblish.api
|
||||
from openpype.hosts.maya.api.lib import get_all_children
|
||||
|
||||
|
||||
class CollectArnoldSceneSource(pyblish.api.InstancePlugin):
|
||||
|
|
@ -21,18 +22,21 @@ class CollectArnoldSceneSource(pyblish.api.InstancePlugin):
|
|||
self.log.warning("Skipped empty instance: \"%s\" " % objset)
|
||||
continue
|
||||
if objset.endswith("content_SET"):
|
||||
instance.data["setMembers"] = cmds.ls(members, long=True)
|
||||
self.log.debug("content members: {}".format(members))
|
||||
members = cmds.ls(members, long=True)
|
||||
children = get_all_children(members)
|
||||
instance.data["contentMembers"] = children
|
||||
self.log.debug("content members: {}".format(children))
|
||||
elif objset.endswith("proxy_SET"):
|
||||
instance.data["proxy"] = cmds.ls(members, long=True)
|
||||
self.log.debug("proxy members: {}".format(members))
|
||||
set_members = get_all_children(cmds.ls(members, long=True))
|
||||
instance.data["proxy"] = set_members
|
||||
self.log.debug("proxy members: {}".format(set_members))
|
||||
|
||||
# Use camera in object set if present else default to render globals
|
||||
# camera.
|
||||
cameras = cmds.ls(type="camera", long=True)
|
||||
renderable = [c for c in cameras if cmds.getAttr("%s.renderable" % c)]
|
||||
camera = renderable[0]
|
||||
for node in instance.data["setMembers"]:
|
||||
for node in instance.data["contentMembers"]:
|
||||
camera_shapes = cmds.listRelatives(
|
||||
node, shapes=True, type="camera"
|
||||
)
|
||||
|
|
|
|||
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Add a link
Reference in a new issue