mirror of
https://github.com/ynput/ayon-core.git
synced 2026-01-02 00:44:52 +01:00
Merge branch 'develop' into feature/blender-improved_asset_handling
This commit is contained in:
commit
a278938b5a
265 changed files with 8218 additions and 8294 deletions
23
.github/workflows/nightly_merge.yml
vendored
Normal file
23
.github/workflows/nightly_merge.yml
vendored
Normal file
|
|
@ -0,0 +1,23 @@
|
|||
name: Nightly Merge
|
||||
|
||||
on:
|
||||
schedule:
|
||||
- cron: '21 3 * * 3,6'
|
||||
workflow_dispatch:
|
||||
|
||||
jobs:
|
||||
develop-to-main:
|
||||
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- name: 🚛 Checkout Code
|
||||
uses: actions/checkout@v2
|
||||
|
||||
- name: Merge development -> main
|
||||
uses: devmasx/merge-branch@v1.3.1
|
||||
with:
|
||||
type: now
|
||||
from_branch: develop
|
||||
target_branch: main
|
||||
github_token: ${{ secrets.TOKEN }}
|
||||
93
.github/workflows/prerelease.yml
vendored
Normal file
93
.github/workflows/prerelease.yml
vendored
Normal file
|
|
@ -0,0 +1,93 @@
|
|||
name: Nightly Prerelease
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [main]
|
||||
|
||||
|
||||
jobs:
|
||||
create_nightly:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- name: 🚛 Checkout Code
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Set up Python
|
||||
uses: actions/setup-python@v2
|
||||
with:
|
||||
python-version: 3.7
|
||||
|
||||
- name: Install Python requirements
|
||||
run: pip install gitpython semver
|
||||
|
||||
- name: 🔎 Determine next version type
|
||||
id: version_type
|
||||
run: |
|
||||
TYPE=$(python ./tools/ci_tools.py --bump)
|
||||
|
||||
echo ::set-output name=type::$TYPE
|
||||
|
||||
- name: 💉 Inject new version into files
|
||||
id: version
|
||||
if: steps.version_type.outputs.type != 'skip'
|
||||
run: |
|
||||
RESULT=$(python ./tools/ci_tools.py --nightly)
|
||||
|
||||
echo ::set-output name=next_tag::$RESULT
|
||||
|
||||
- name: "✏️ Generate full changelog"
|
||||
if: steps.version_type.outputs.type != 'skip'
|
||||
id: generate-full-changelog
|
||||
uses: heinrichreimer/github-changelog-generator-action@v2.2
|
||||
with:
|
||||
token: ${{ secrets.GITHUB_TOKEN }}
|
||||
breakingLabel: '#### 💥 Breaking'
|
||||
enhancementLabel: '#### 🚀 Enhancements'
|
||||
bugsLabel: '#### 🐛 Bug fixes'
|
||||
deprecatedLabel: '#### ⚠️ Deprecations'
|
||||
addSections: '{"documentation":{"prefix":"### 📖 Documentation","labels":["documentation"]},"tests":{"prefix":"### ✅ Testing","labels":["tests"]}}'
|
||||
issues: false
|
||||
issuesWoLabels: false
|
||||
pullRequests: true
|
||||
prWoLabels: false
|
||||
author: false
|
||||
unreleased: true
|
||||
compareLink: true
|
||||
stripGeneratorNotice: true
|
||||
verbose: true
|
||||
unreleasedLabel: ${{ steps.version.outputs.next_tag }}
|
||||
excludeTagsRegex: "CI/.+"
|
||||
releaseBranch: "main"
|
||||
|
||||
- name: "🖨️ Print changelog to console"
|
||||
if: steps.version_type.outputs.type != 'skip'
|
||||
run: cat CHANGELOG.md
|
||||
|
||||
- name: 💾 Commit and Tag
|
||||
id: git_commit
|
||||
if: steps.version_type.outputs.type != 'skip'
|
||||
run: |
|
||||
git config user.email ${{ secrets.CI_EMAIL }}
|
||||
git config user.name ${{ secrets.CI_USER }}
|
||||
cd repos/avalon-core
|
||||
git checkout main
|
||||
git pull
|
||||
cd ../..
|
||||
git add .
|
||||
git commit -m "[Automated] Bump version"
|
||||
tag_name="CI/${{ steps.version.outputs.next_tag }}"
|
||||
git tag -a $tag_name -m "nightly build"
|
||||
git push
|
||||
git push origin $tag_name
|
||||
|
||||
- name: 🔨 Merge main back to develop
|
||||
uses: everlytic/branch-merge@1.1.0
|
||||
if: steps.version_type.outputs.type != 'skip'
|
||||
with:
|
||||
github_token: ${{ secrets.GITHUB_TOKEN }}
|
||||
source_ref: 'main'
|
||||
target_branch: 'develop'
|
||||
commit_message_template: '[Automated] Merged {source_ref} into {target_branch}'
|
||||
99
.github/workflows/release.yml
vendored
Normal file
99
.github/workflows/release.yml
vendored
Normal file
|
|
@ -0,0 +1,99 @@
|
|||
name: Stable Release
|
||||
|
||||
on:
|
||||
push:
|
||||
tags:
|
||||
- '*[0-9].*[0-9].*[0-9]*'
|
||||
|
||||
jobs:
|
||||
create_release:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- name: 🚛 Checkout Code
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Set up Python
|
||||
uses: actions/setup-python@v2
|
||||
with:
|
||||
python-version: 3.7
|
||||
- name: Install Python requirements
|
||||
run: pip install gitpython semver
|
||||
|
||||
- name: Set env
|
||||
run: |
|
||||
echo "RELEASE_VERSION=${GITHUB_REF#refs/*/}" >> $GITHUB_ENV
|
||||
|
||||
git config user.email ${{ secrets.CI_EMAIL }}
|
||||
git config user.name ${{ secrets.CI_USER }}
|
||||
git fetch
|
||||
git checkout -b main origin/main
|
||||
git tag -d ${GITHUB_REF#refs/*/}
|
||||
git push origin --delete ${GITHUB_REF#refs/*/}
|
||||
echo PREVIOUS_VERSION=`git describe --tags --match="[0-9]*" --abbrev=0` >> $GITHUB_ENV
|
||||
|
||||
- name: 💉 Inject new version into files
|
||||
id: version
|
||||
if: steps.version_type.outputs.type != 'skip'
|
||||
run: |
|
||||
python ./tools/ci_tools.py --version ${{ env.RELEASE_VERSION }}
|
||||
|
||||
- name: "✏️ Generate full changelog"
|
||||
if: steps.version_type.outputs.type != 'skip'
|
||||
id: generate-full-changelog
|
||||
uses: heinrichreimer/github-changelog-generator-action@v2.2
|
||||
with:
|
||||
token: ${{ secrets.GITHUB_TOKEN }}
|
||||
breakingLabel: '#### 💥 Breaking'
|
||||
enhancementLabel: '#### 🚀 Enhancements'
|
||||
bugsLabel: '#### 🐛 Bug fixes'
|
||||
deprecatedLabel: '#### ⚠️ Deprecations'
|
||||
addSections: '{"documentation":{"prefix":"### 📖 Documentation","labels":["documentation"]},"tests":{"prefix":"### ✅ Testing","labels":["tests"]}}'
|
||||
issues: false
|
||||
issuesWoLabels: false
|
||||
pullRequests: true
|
||||
prWoLabels: false
|
||||
author: false
|
||||
unreleased: true
|
||||
compareLink: true
|
||||
stripGeneratorNotice: true
|
||||
verbose: true
|
||||
futureRelease: ${{ env.RELEASE_VERSION }}
|
||||
excludeTagsRegex: "CI/.+"
|
||||
releaseBranch: "main"
|
||||
|
||||
- name: "🖨️ Print changelog to console"
|
||||
run: echo ${{ steps.generate-last-changelog.outputs.changelog }}
|
||||
|
||||
- name: 💾 Commit and Tag
|
||||
id: git_commit
|
||||
if: steps.version_type.outputs.type != 'skip'
|
||||
run: |
|
||||
git add .
|
||||
git commit -m "[Automated] Release"
|
||||
tag_name="${{ env.RELEASE_VERSION }}"
|
||||
git push
|
||||
git tag -fa $tag_name -m "stable release"
|
||||
git push origin $tag_name
|
||||
|
||||
- name: "🚀 Github Release"
|
||||
uses: docker://antonyurchenko/git-release:latest
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
DRAFT_RELEASE: "false"
|
||||
PRE_RELEASE: "false"
|
||||
CHANGELOG_FILE: "CHANGELOG.md"
|
||||
ALLOW_EMPTY_CHANGELOG: "false"
|
||||
ALLOW_TAG_PREFIX: "true"
|
||||
|
||||
|
||||
- name: 🔨 Merge main back to develop
|
||||
uses: everlytic/branch-merge@1.1.0
|
||||
if: steps.version_type.outputs.type != 'skip'
|
||||
with:
|
||||
github_token: ${{ secrets.GITHUB_TOKEN }}
|
||||
source_ref: 'main'
|
||||
target_branch: 'develop'
|
||||
commit_message_template: '[Automated] Merged release {source_ref} into {target_branch}'
|
||||
|
|
@ -1,9 +0,0 @@
|
|||
pr-wo-labels=False
|
||||
exclude-labels=duplicate,question,invalid,wontfix,weekly-digest
|
||||
author=False
|
||||
unreleased=True
|
||||
since-tag=2.13.6
|
||||
release-branch=master
|
||||
enhancement-label=**Enhancements:**
|
||||
issues=False
|
||||
pulls=False
|
||||
1
.gitignore
vendored
1
.gitignore
vendored
|
|
@ -36,6 +36,7 @@ Temporary Items
|
|||
# CX_Freeze
|
||||
###########
|
||||
/build
|
||||
/dist/
|
||||
|
||||
/vendor/bin/*
|
||||
/.venv
|
||||
|
|
|
|||
1
.gitmodules
vendored
1
.gitmodules
vendored
|
|
@ -1,7 +1,6 @@
|
|||
[submodule "repos/avalon-core"]
|
||||
path = repos/avalon-core
|
||||
url = https://github.com/pypeclub/avalon-core.git
|
||||
branch = develop
|
||||
[submodule "repos/avalon-unreal-integration"]
|
||||
path = repos/avalon-unreal-integration
|
||||
url = https://github.com/pypeclub/avalon-unreal-integration.git
|
||||
|
|
|
|||
444
CHANGELOG.md
444
CHANGELOG.md
|
|
@ -1,21 +1,352 @@
|
|||
# Changelog
|
||||
|
||||
## [2.18.0](https://github.com/pypeclub/openpype/tree/2.18.0) (2021-05-18)
|
||||
|
||||
[Full Changelog](https://github.com/pypeclub/openpype/compare/2.17.3...2.18.0)
|
||||
## [3.0.0](https://github.com/pypeclub/openpype/tree/3.0.0)
|
||||
|
||||
**Enhancements:**
|
||||
[Full Changelog](https://github.com/pypeclub/openpype/compare/2.18.1...3.0.0)
|
||||
|
||||
- Use SubsetLoader and multiple contexts for delete_old_versions [\#1484](ttps://github.com/pypeclub/OpenPype/pull/1484))
|
||||
- TVPaint: Increment workfile version on successfull publish. [\#1489](https://github.com/pypeclub/OpenPype/pull/1489)
|
||||
- Maya: Use of multiple deadline servers [\#1483](https://github.com/pypeclub/OpenPype/pull/1483)
|
||||
### Configuration
|
||||
- Studio Settings GUI: no more json configuration files.
|
||||
- OpenPype Modules can be turned on and off.
|
||||
- Easy to add Application versions.
|
||||
- Per Project Environment and plugin management.
|
||||
- Robust profile system for creating reviewables and burnins, with filtering based on Application, Task and data family.
|
||||
- Configurable publish plugins.
|
||||
- Options to make any validator or extractor, optional or disabled.
|
||||
- Color Management is now unified under anatomy settings.
|
||||
- Subset naming and grouping is fully configurable.
|
||||
- All project attributes can now be set directly in OpenPype settings.
|
||||
- Studio Setting can be locked to prevent unwanted artist changes.
|
||||
- You can now add per project and per task type templates for workfile initialization in most hosts.
|
||||
- Too many other individual configurable option to list in this changelog :)
|
||||
|
||||
### Local Settings
|
||||
- Local Settings GUI where users can change certain option on individual basis.
|
||||
- Application executables.
|
||||
- Project roots.
|
||||
- Project site sync settings.
|
||||
|
||||
### Build, Installation and Deployments
|
||||
- No requirements on artist machine.
|
||||
- Fully distributed workflow possible.
|
||||
- Self-contained installation.
|
||||
- Available on all three major platforms.
|
||||
- Automatic artist OpenPype updates.
|
||||
- Studio OpenPype repository for updates distribution.
|
||||
- Robust Build system.
|
||||
- Safe studio update versioning with staging and production options.
|
||||
- MacOS build generates .app and .dmg installer.
|
||||
- Windows build with installer creation script.
|
||||
|
||||
### Misc
|
||||
- System and diagnostic info tool in the tray.
|
||||
- Launching application from Launcher indicates activity.
|
||||
- All project roots are now named. Single root project are now achieved by having a single named root in the project anatomy.
|
||||
- Every project root is cast into environment variable as well, so it can be used in DCC instead of absolute path (depends on DCC support for env vars).
|
||||
- Basic support for task types, on top of task names.
|
||||
- Timer now change automatically when the context is switched inside running application.
|
||||
- 'Master" versions have been renamed to "Hero".
|
||||
- Extract Burnins now supports file sequences and color settings.
|
||||
- Extract Review support overscan cropping, better letterboxes and background colour fill.
|
||||
- Delivery tool for copying and renaming any published assets in bulk.
|
||||
- Harmony, Photoshop and After Effects now connect directly with OpenPype tray instead of spawning their own terminal.
|
||||
|
||||
### Project Manager GUI
|
||||
- Create Projects.
|
||||
- Create Shots and Assets.
|
||||
- Create Tasks and assign task types.
|
||||
- Fill required asset attributes.
|
||||
- Validations for duplicated or unsupported names.
|
||||
- Archive Assets.
|
||||
- Move Asset within hierarchy.
|
||||
|
||||
### Site Sync (beta)
|
||||
- Synchronization of published files between workstations and central storage.
|
||||
- Ability to add arbitrary storage providers to the Site Sync system.
|
||||
- Default setup includes Disk and Google Drive providers as examples.
|
||||
- Access to availability information from Loader and Scene Manager.
|
||||
- Sync queue GUI with filtering, error and status reporting.
|
||||
- Site sync can be configured on a per-project basis.
|
||||
- Bulk upload and download from the loader.
|
||||
|
||||
### Ftrack
|
||||
- Actions have customisable roles.
|
||||
- Settings on all actions are updated live and don't need openpype restart.
|
||||
- Ftrack module can now be turned off completely.
|
||||
- It is enough to specify ftrack server name and the URL will be formed correctly. So instead of mystudio.ftrackapp.com, it's possible to use simply: "mystudio".
|
||||
|
||||
### Editorial
|
||||
- Fully OTIO based editorial publishing.
|
||||
- Completely re-done Hiero publishing to be a lot simpler and faster.
|
||||
- Consistent conforming from Resolve, Hiero and Standalone Publisher.
|
||||
|
||||
### Backend
|
||||
- OpenPype and Avalon now always share the same database (in 2.x is was possible to split them).
|
||||
- Major codebase refactoring to allow for better CI, versioning and control of individual integrations.
|
||||
- OTIO is bundled with build.
|
||||
- OIIO is bundled with build.
|
||||
- FFMPEG is bundled with build.
|
||||
- Rest API and host WebSocket servers have been unified into a single local webserver.
|
||||
- Maya look assigner has been integrated into the main codebase.
|
||||
- Publish GUI has been integrated into the main codebase.
|
||||
- Studio and Project settings overrides are now stored in Mongo.
|
||||
- Too many other backend fixes and tweaks to list :), you can see full changelog on github for those.
|
||||
- OpenPype uses Poetry to manage it's virtual environment when running from code.
|
||||
- all applications can be marked as python 2 or 3 compatible to make the switch a bit easier.
|
||||
|
||||
|
||||
### Pull Requests since 3.0.0-rc.6
|
||||
|
||||
|
||||
**Implemented enhancements:**
|
||||
|
||||
- settings: task types enum entity [\#1605](https://github.com/pypeclub/OpenPype/issues/1605)
|
||||
- Settings: ignore keys in referenced schema [\#1600](https://github.com/pypeclub/OpenPype/issues/1600)
|
||||
- Maya: support for frame steps and frame lists [\#1585](https://github.com/pypeclub/OpenPype/issues/1585)
|
||||
- TVPaint: Publish workfile. [\#1548](https://github.com/pypeclub/OpenPype/issues/1548)
|
||||
- Loader: Current Asset Button [\#1448](https://github.com/pypeclub/OpenPype/issues/1448)
|
||||
- Hiero: publish with retiming [\#1377](https://github.com/pypeclub/OpenPype/issues/1377)
|
||||
- Ask user to restart after changing global environments in settings [\#910](https://github.com/pypeclub/OpenPype/issues/910)
|
||||
- add option to define paht to workfile template [\#895](https://github.com/pypeclub/OpenPype/issues/895)
|
||||
- Harmony: move server console to system tray [\#676](https://github.com/pypeclub/OpenPype/issues/676)
|
||||
- Standalone style [\#1630](https://github.com/pypeclub/OpenPype/pull/1630) ([iLLiCiTiT](https://github.com/iLLiCiTiT))
|
||||
- Faster hierarchical values push [\#1627](https://github.com/pypeclub/OpenPype/pull/1627) ([iLLiCiTiT](https://github.com/iLLiCiTiT))
|
||||
- Launcher tool style [\#1624](https://github.com/pypeclub/OpenPype/pull/1624) ([iLLiCiTiT](https://github.com/iLLiCiTiT))
|
||||
- Loader and Library loader enhancements [\#1623](https://github.com/pypeclub/OpenPype/pull/1623) ([iLLiCiTiT](https://github.com/iLLiCiTiT))
|
||||
- Tray style [\#1622](https://github.com/pypeclub/OpenPype/pull/1622) ([iLLiCiTiT](https://github.com/iLLiCiTiT))
|
||||
- Maya schemas cleanup [\#1610](https://github.com/pypeclub/OpenPype/pull/1610) ([iLLiCiTiT](https://github.com/iLLiCiTiT))
|
||||
- Settings: ignore keys in referenced schema [\#1608](https://github.com/pypeclub/OpenPype/pull/1608) ([create-issue-branch[bot]](https://github.com/apps/create-issue-branch))
|
||||
- settings: task types enum entity [\#1606](https://github.com/pypeclub/OpenPype/pull/1606) ([create-issue-branch[bot]](https://github.com/apps/create-issue-branch))
|
||||
- Openpype style [\#1604](https://github.com/pypeclub/OpenPype/pull/1604) ([iLLiCiTiT](https://github.com/iLLiCiTiT))
|
||||
- TVPaint: Publish workfile. [\#1597](https://github.com/pypeclub/OpenPype/pull/1597) ([create-issue-branch[bot]](https://github.com/apps/create-issue-branch))
|
||||
- Nuke: add option to define path to workfile template [\#1571](https://github.com/pypeclub/OpenPype/pull/1571) ([create-issue-branch[bot]](https://github.com/apps/create-issue-branch))
|
||||
- Crop overscan in Extract Review [\#1569](https://github.com/pypeclub/OpenPype/pull/1569) ([iLLiCiTiT](https://github.com/iLLiCiTiT))
|
||||
- Unreal and Blender: Material Workflow [\#1562](https://github.com/pypeclub/OpenPype/pull/1562) ([simonebarbieri](https://github.com/simonebarbieri))
|
||||
- Harmony: move server console to system tray [\#1560](https://github.com/pypeclub/OpenPype/pull/1560) ([create-issue-branch[bot]](https://github.com/apps/create-issue-branch))
|
||||
- Ask user to restart after changing global environments in settings [\#1550](https://github.com/pypeclub/OpenPype/pull/1550) ([create-issue-branch[bot]](https://github.com/apps/create-issue-branch))
|
||||
- Hiero: publish with retiming [\#1545](https://github.com/pypeclub/OpenPype/pull/1545) ([create-issue-branch[bot]](https://github.com/apps/create-issue-branch))
|
||||
|
||||
**Fixed bugs:**
|
||||
|
||||
- Use instance frame start instead of timeline. [\#1486](https://github.com/pypeclub/OpenPype/pull/1486)
|
||||
- Maya: Redshift - set proper start frame on proxy [\#1480](https://github.com/pypeclub/OpenPype/pull/1480)
|
||||
- Maya: wrong collection of playblasted frames [\#1517](https://github.com/pypeclub/OpenPype/pull/1517)
|
||||
- Existing subsets hints in creator [\#1502](https://github.com/pypeclub/OpenPype/pull/1502)
|
||||
- Library loader load asset documents on OpenPype start [\#1603](https://github.com/pypeclub/OpenPype/issues/1603)
|
||||
- Resolve: unable to load the same footage twice [\#1317](https://github.com/pypeclub/OpenPype/issues/1317)
|
||||
- Resolve: unable to load footage [\#1316](https://github.com/pypeclub/OpenPype/issues/1316)
|
||||
- Add required Python 2 modules [\#1291](https://github.com/pypeclub/OpenPype/issues/1291)
|
||||
- GUi scaling with hires displays [\#705](https://github.com/pypeclub/OpenPype/issues/705)
|
||||
- Maya: non unicode string in publish validation [\#673](https://github.com/pypeclub/OpenPype/issues/673)
|
||||
- Nuke: Rendered Frame validation is triggered by multiple collections [\#156](https://github.com/pypeclub/OpenPype/issues/156)
|
||||
- avalon-core debugging failing [\#80](https://github.com/pypeclub/OpenPype/issues/80)
|
||||
- Only check arnold shading group if arnold is used [\#72](https://github.com/pypeclub/OpenPype/issues/72)
|
||||
- Sync server Qt layout fix [\#1621](https://github.com/pypeclub/OpenPype/pull/1621) ([iLLiCiTiT](https://github.com/iLLiCiTiT))
|
||||
- Console Listener on Python 2 fix [\#1620](https://github.com/pypeclub/OpenPype/pull/1620) ([iLLiCiTiT](https://github.com/iLLiCiTiT))
|
||||
- Bug: Initialize blessed term only in console mode [\#1619](https://github.com/pypeclub/OpenPype/pull/1619) ([antirotor](https://github.com/antirotor))
|
||||
- Settings template skip paths support wrappers [\#1618](https://github.com/pypeclub/OpenPype/pull/1618) ([iLLiCiTiT](https://github.com/iLLiCiTiT))
|
||||
- Maya capture 'isolate\_view' fix + minor corrections [\#1617](https://github.com/pypeclub/OpenPype/pull/1617) ([2-REC](https://github.com/2-REC))
|
||||
- MacOs Fix launch of standalone publisher [\#1616](https://github.com/pypeclub/OpenPype/pull/1616) ([iLLiCiTiT](https://github.com/iLLiCiTiT))
|
||||
- 'Delivery action' report fix + typos [\#1612](https://github.com/pypeclub/OpenPype/pull/1612) ([2-REC](https://github.com/2-REC))
|
||||
- List append fix in mutable dict settings [\#1599](https://github.com/pypeclub/OpenPype/pull/1599) ([iLLiCiTiT](https://github.com/iLLiCiTiT))
|
||||
- Documentation: Maya: fix review [\#1598](https://github.com/pypeclub/OpenPype/pull/1598) ([antirotor](https://github.com/antirotor))
|
||||
- Bugfix: Set certifi CA bundle for all platforms [\#1596](https://github.com/pypeclub/OpenPype/pull/1596) ([antirotor](https://github.com/antirotor))
|
||||
|
||||
**Merged pull requests:**
|
||||
|
||||
- Bump dns-packet from 1.3.1 to 1.3.4 in /website [\#1611](https://github.com/pypeclub/OpenPype/pull/1611) ([dependabot[bot]](https://github.com/apps/dependabot))
|
||||
- Maya: Render workflow fixes [\#1607](https://github.com/pypeclub/OpenPype/pull/1607) ([antirotor](https://github.com/antirotor))
|
||||
- Maya: support for frame steps and frame lists [\#1586](https://github.com/pypeclub/OpenPype/pull/1586) ([create-issue-branch[bot]](https://github.com/apps/create-issue-branch))
|
||||
- 3.0.0 - curated changelog [\#1284](https://github.com/pypeclub/OpenPype/pull/1284) ([mkolar](https://github.com/mkolar))
|
||||
|
||||
## [2.18.1](https://github.com/pypeclub/openpype/tree/2.18.1) (2021-06-03)
|
||||
|
||||
[Full Changelog](https://github.com/pypeclub/openpype/compare/2.18.0...2.18.1)
|
||||
|
||||
**Enhancements:**
|
||||
|
||||
- Faster hierarchical values push [\#1626](https://github.com/pypeclub/OpenPype/pull/1626)
|
||||
- Feature Delivery in library loader [\#1549](https://github.com/pypeclub/OpenPype/pull/1549)
|
||||
- Hiero: Initial frame publish support. [\#1172](https://github.com/pypeclub/OpenPype/pull/1172)
|
||||
|
||||
**Fixed bugs:**
|
||||
|
||||
- Maya capture 'isolate\_view' fix + minor corrections [\#1614](https://github.com/pypeclub/OpenPype/pull/1614)
|
||||
- 'Delivery action' report fix +typos [\#1613](https://github.com/pypeclub/OpenPype/pull/1613)
|
||||
- Delivery in LibraryLoader - fixed sequence issue [\#1590](https://github.com/pypeclub/OpenPype/pull/1590)
|
||||
- FFmpeg filters in quote marks [\#1588](https://github.com/pypeclub/OpenPype/pull/1588)
|
||||
- Ftrack delete action cause circular error [\#1581](https://github.com/pypeclub/OpenPype/pull/1581)
|
||||
- Fix Maya playblast. [\#1566](https://github.com/pypeclub/OpenPype/pull/1566)
|
||||
- More failsafes prevent errored runs. [\#1554](https://github.com/pypeclub/OpenPype/pull/1554)
|
||||
- Celaction publishing [\#1539](https://github.com/pypeclub/OpenPype/pull/1539)
|
||||
- celaction: app not starting [\#1533](https://github.com/pypeclub/OpenPype/pull/1533)
|
||||
|
||||
**Merged pull requests:**
|
||||
|
||||
- Maya: Render workflow fixes - 2.0 backport [\#1609](https://github.com/pypeclub/OpenPype/pull/1609)
|
||||
- Maya Hardware support [\#1553](https://github.com/pypeclub/OpenPype/pull/1553)
|
||||
|
||||
|
||||
## [CI/3.0.0-rc.6](https://github.com/pypeclub/openpype/tree/CI/3.0.0-rc.6) (2021-05-27)
|
||||
|
||||
[Full Changelog](https://github.com/pypeclub/openpype/compare/CI/3.0.0-rc.5...CI/3.0.0-rc.6)
|
||||
|
||||
**Implemented enhancements:**
|
||||
|
||||
- Hiero: publish color and transformation soft-effects [\#1376](https://github.com/pypeclub/OpenPype/issues/1376)
|
||||
- Get rid of `AVALON\_HIERARCHY` and `hiearchy` key on asset [\#432](https://github.com/pypeclub/OpenPype/issues/432)
|
||||
- Sync to avalon do not store hierarchy key [\#1582](https://github.com/pypeclub/OpenPype/pull/1582) ([iLLiCiTiT](https://github.com/iLLiCiTiT))
|
||||
- Tools: launcher scripts for project manager [\#1557](https://github.com/pypeclub/OpenPype/pull/1557) ([antirotor](https://github.com/antirotor))
|
||||
- Simple tvpaint publish [\#1555](https://github.com/pypeclub/OpenPype/pull/1555) ([iLLiCiTiT](https://github.com/iLLiCiTiT))
|
||||
- Feature Delivery in library loader [\#1546](https://github.com/pypeclub/OpenPype/pull/1546) ([kalisp](https://github.com/kalisp))
|
||||
- Documentation: Dev and system build documentation [\#1543](https://github.com/pypeclub/OpenPype/pull/1543) ([antirotor](https://github.com/antirotor))
|
||||
- Color entity [\#1542](https://github.com/pypeclub/OpenPype/pull/1542) ([iLLiCiTiT](https://github.com/iLLiCiTiT))
|
||||
- Extract review bg color [\#1534](https://github.com/pypeclub/OpenPype/pull/1534) ([iLLiCiTiT](https://github.com/iLLiCiTiT))
|
||||
- TVPaint loader settings [\#1530](https://github.com/pypeclub/OpenPype/pull/1530) ([iLLiCiTiT](https://github.com/iLLiCiTiT))
|
||||
- Blender can initialize differente user script paths [\#1528](https://github.com/pypeclub/OpenPype/pull/1528) ([iLLiCiTiT](https://github.com/iLLiCiTiT))
|
||||
- Blender and Unreal: Improved Animation Workflow [\#1514](https://github.com/pypeclub/OpenPype/pull/1514) ([simonebarbieri](https://github.com/simonebarbieri))
|
||||
- Hiero: publish color and transformation soft-effects [\#1511](https://github.com/pypeclub/OpenPype/pull/1511) ([create-issue-branch[bot]](https://github.com/apps/create-issue-branch))
|
||||
|
||||
**Fixed bugs:**
|
||||
|
||||
- OpenPype specific version issues [\#1583](https://github.com/pypeclub/OpenPype/issues/1583)
|
||||
- Ftrack login server can't work without stderr [\#1576](https://github.com/pypeclub/OpenPype/issues/1576)
|
||||
- Mac application launch [\#1575](https://github.com/pypeclub/OpenPype/issues/1575)
|
||||
- Settings are not propagated to Nuke write nodes [\#1538](https://github.com/pypeclub/OpenPype/issues/1538)
|
||||
- Subset names settings not applied for publishing [\#1537](https://github.com/pypeclub/OpenPype/issues/1537)
|
||||
- Nuke: callback at start not setting colorspace [\#1412](https://github.com/pypeclub/OpenPype/issues/1412)
|
||||
- Pype 3: Missing icon for Settings [\#1272](https://github.com/pypeclub/OpenPype/issues/1272)
|
||||
- Blender: cannot initialize Avalon if BLENDER\_USER\_SCRIPTS is already used [\#1050](https://github.com/pypeclub/OpenPype/issues/1050)
|
||||
- Ftrack delete action cause circular error [\#206](https://github.com/pypeclub/OpenPype/issues/206)
|
||||
- Build: stop cleaning of pyc files in build directory [\#1592](https://github.com/pypeclub/OpenPype/pull/1592) ([antirotor](https://github.com/antirotor))
|
||||
- Ftrack login server can't work without stderr [\#1591](https://github.com/pypeclub/OpenPype/pull/1591) ([create-issue-branch[bot]](https://github.com/apps/create-issue-branch))
|
||||
- FFmpeg filters in quote marks [\#1589](https://github.com/pypeclub/OpenPype/pull/1589) ([iLLiCiTiT](https://github.com/iLLiCiTiT))
|
||||
- OpenPype specific version issues [\#1584](https://github.com/pypeclub/OpenPype/pull/1584) ([create-issue-branch[bot]](https://github.com/apps/create-issue-branch))
|
||||
- Mac application launch [\#1580](https://github.com/pypeclub/OpenPype/pull/1580) ([create-issue-branch[bot]](https://github.com/apps/create-issue-branch))
|
||||
- Ftrack delete action cause circular error [\#1579](https://github.com/pypeclub/OpenPype/pull/1579) ([iLLiCiTiT](https://github.com/iLLiCiTiT))
|
||||
- Hiero: publishing issues [\#1578](https://github.com/pypeclub/OpenPype/pull/1578) ([jezscha](https://github.com/jezscha))
|
||||
- Nuke: callback at start not setting colorspace [\#1561](https://github.com/pypeclub/OpenPype/pull/1561) ([create-issue-branch[bot]](https://github.com/apps/create-issue-branch))
|
||||
- Bugfix PS subset and quick review [\#1541](https://github.com/pypeclub/OpenPype/pull/1541) ([kalisp](https://github.com/kalisp))
|
||||
- Settings are not propagated to Nuke write nodes [\#1540](https://github.com/pypeclub/OpenPype/pull/1540) ([create-issue-branch[bot]](https://github.com/apps/create-issue-branch))
|
||||
- OpenPype: Powershell scripts polishing [\#1536](https://github.com/pypeclub/OpenPype/pull/1536) ([antirotor](https://github.com/antirotor))
|
||||
- Host name collecting fix [\#1535](https://github.com/pypeclub/OpenPype/pull/1535) ([iLLiCiTiT](https://github.com/iLLiCiTiT))
|
||||
- Handle duplicated task names in project manager [\#1531](https://github.com/pypeclub/OpenPype/pull/1531) ([iLLiCiTiT](https://github.com/iLLiCiTiT))
|
||||
- Validate is file attribute in settings schema [\#1529](https://github.com/pypeclub/OpenPype/pull/1529) ([iLLiCiTiT](https://github.com/iLLiCiTiT))
|
||||
|
||||
**Merged pull requests:**
|
||||
|
||||
- Bump postcss from 8.2.8 to 8.3.0 in /website [\#1593](https://github.com/pypeclub/OpenPype/pull/1593) ([dependabot[bot]](https://github.com/apps/dependabot))
|
||||
- User installation documentation [\#1532](https://github.com/pypeclub/OpenPype/pull/1532) ([64qam](https://github.com/64qam))
|
||||
|
||||
## [CI/3.0.0-rc.5](https://github.com/pypeclub/openpype/tree/CI/3.0.0-rc.5) (2021-05-19)
|
||||
|
||||
[Full Changelog](https://github.com/pypeclub/openpype/compare/2.18.0...CI/3.0.0-rc.5)
|
||||
|
||||
**Implemented enhancements:**
|
||||
|
||||
- OpenPype: Build - Add progress bars [\#1524](https://github.com/pypeclub/OpenPype/pull/1524) ([antirotor](https://github.com/antirotor))
|
||||
- Default environments per host imlementation [\#1522](https://github.com/pypeclub/OpenPype/pull/1522) ([create-issue-branch[bot]](https://github.com/apps/create-issue-branch))
|
||||
- OpenPype: use `semver` module for version resolution [\#1513](https://github.com/pypeclub/OpenPype/pull/1513) ([antirotor](https://github.com/antirotor))
|
||||
- Feature Aftereffects setting cleanup documentation [\#1510](https://github.com/pypeclub/OpenPype/pull/1510) ([kalisp](https://github.com/kalisp))
|
||||
- Feature Sync server settings enhancement [\#1501](https://github.com/pypeclub/OpenPype/pull/1501) ([kalisp](https://github.com/kalisp))
|
||||
- Project manager [\#1396](https://github.com/pypeclub/OpenPype/pull/1396) ([iLLiCiTiT](https://github.com/iLLiCiTiT))
|
||||
|
||||
**Fixed bugs:**
|
||||
|
||||
- Unified schema definition [\#874](https://github.com/pypeclub/OpenPype/issues/874)
|
||||
- Maya: fix look assignment [\#1526](https://github.com/pypeclub/OpenPype/pull/1526) ([antirotor](https://github.com/antirotor))
|
||||
- Bugfix Sync server local site issues [\#1523](https://github.com/pypeclub/OpenPype/pull/1523) ([kalisp](https://github.com/kalisp))
|
||||
- Store as list dictionary check initial value with right type [\#1520](https://github.com/pypeclub/OpenPype/pull/1520) ([iLLiCiTiT](https://github.com/iLLiCiTiT))
|
||||
- Maya: wrong collection of playblasted frames [\#1515](https://github.com/pypeclub/OpenPype/pull/1515) ([mkolar](https://github.com/mkolar))
|
||||
- Convert pyblish logs to string at the moment of logging [\#1512](https://github.com/pypeclub/OpenPype/pull/1512) ([iLLiCiTiT](https://github.com/iLLiCiTiT))
|
||||
- 3.0 | nuke: fixing start\_at with option gui [\#1509](https://github.com/pypeclub/OpenPype/pull/1509) ([jezscha](https://github.com/jezscha))
|
||||
- Tests: fix pype -\> openpype to make tests work again [\#1508](https://github.com/pypeclub/OpenPype/pull/1508) ([antirotor](https://github.com/antirotor))
|
||||
|
||||
**Merged pull requests:**
|
||||
|
||||
- OpenPype: disable submodule update with `--no-submodule-update` [\#1525](https://github.com/pypeclub/OpenPype/pull/1525) ([antirotor](https://github.com/antirotor))
|
||||
- Ftrack without autosync in Pype 3 [\#1519](https://github.com/pypeclub/OpenPype/pull/1519) ([create-issue-branch[bot]](https://github.com/apps/create-issue-branch))
|
||||
- Feature Harmony setting cleanup documentation [\#1506](https://github.com/pypeclub/OpenPype/pull/1506) ([kalisp](https://github.com/kalisp))
|
||||
- Sync Server beginning of documentation [\#1471](https://github.com/pypeclub/OpenPype/pull/1471) ([kalisp](https://github.com/kalisp))
|
||||
- Blender: publish layout json [\#1348](https://github.com/pypeclub/OpenPype/pull/1348) ([create-issue-branch[bot]](https://github.com/apps/create-issue-branch))
|
||||
|
||||
## [2.18.0](https://github.com/pypeclub/openpype/tree/2.18.0) (2021-05-18)
|
||||
|
||||
[Full Changelog](https://github.com/pypeclub/openpype/compare/CI/3.0.0-rc.4...2.18.0)
|
||||
|
||||
**Implemented enhancements:**
|
||||
|
||||
- Default environments per host imlementation [\#1405](https://github.com/pypeclub/OpenPype/issues/1405)
|
||||
- Blender: publish layout json [\#1346](https://github.com/pypeclub/OpenPype/issues/1346)
|
||||
- Ftrack without autosync in Pype 3 [\#1128](https://github.com/pypeclub/OpenPype/issues/1128)
|
||||
- Launcher: started action indicator [\#1102](https://github.com/pypeclub/OpenPype/issues/1102)
|
||||
- Launch arguments of applications [\#1094](https://github.com/pypeclub/OpenPype/issues/1094)
|
||||
- Publish: instance info [\#724](https://github.com/pypeclub/OpenPype/issues/724)
|
||||
- Review: ability to control review length [\#482](https://github.com/pypeclub/OpenPype/issues/482)
|
||||
- Colorized recognition of creator result [\#394](https://github.com/pypeclub/OpenPype/issues/394)
|
||||
- event assign user to started task [\#49](https://github.com/pypeclub/OpenPype/issues/49)
|
||||
- rebuild containers from reference in maya [\#55](https://github.com/pypeclub/OpenPype/issues/55)
|
||||
- nuke Load metadata [\#66](https://github.com/pypeclub/OpenPype/issues/66)
|
||||
- Maya: Safer handling of expected render output names [\#1496](https://github.com/pypeclub/OpenPype/pull/1496) ([create-issue-branch[bot]](https://github.com/apps/create-issue-branch))
|
||||
- TVPaint: Increment workfile version on successfull publish. [\#1489](https://github.com/pypeclub/OpenPype/pull/1489) ([tokejepsen](https://github.com/tokejepsen))
|
||||
- Use SubsetLoader and multiple contexts for delete\_old\_versions [\#1484](https://github.com/pypeclub/OpenPype/pull/1484) ([tokejepsen](https://github.com/tokejepsen))
|
||||
- Maya: Use of multiple deadline servers [\#1483](https://github.com/pypeclub/OpenPype/pull/1483) ([antirotor](https://github.com/antirotor))
|
||||
|
||||
**Fixed bugs:**
|
||||
|
||||
- Igniter version resolution doesn't consider it's own version [\#1505](https://github.com/pypeclub/OpenPype/issues/1505)
|
||||
- Maya: Safer handling of expected render output names [\#1159](https://github.com/pypeclub/OpenPype/issues/1159)
|
||||
- Harmony: Invalid render output from non-conventionally named instance [\#871](https://github.com/pypeclub/OpenPype/issues/871)
|
||||
- Existing subsets hints in creator [\#1503](https://github.com/pypeclub/OpenPype/pull/1503) ([iLLiCiTiT](https://github.com/iLLiCiTiT))
|
||||
- nuke: space in node name breaking process [\#1494](https://github.com/pypeclub/OpenPype/pull/1494) ([jezscha](https://github.com/jezscha))
|
||||
- Maya: wrong collection of playblasted frames [\#1517](https://github.com/pypeclub/OpenPype/pull/1517) ([mkolar](https://github.com/mkolar))
|
||||
- Existing subsets hints in creator [\#1502](https://github.com/pypeclub/OpenPype/pull/1502) ([iLLiCiTiT](https://github.com/iLLiCiTiT))
|
||||
- Use instance frame start instead of timeline. [\#1486](https://github.com/pypeclub/OpenPype/pull/1486) ([tokejepsen](https://github.com/tokejepsen))
|
||||
- Maya: Redshift - set proper start frame on proxy [\#1480](https://github.com/pypeclub/OpenPype/pull/1480) ([antirotor](https://github.com/antirotor))
|
||||
|
||||
**Closed issues:**
|
||||
|
||||
- Nuke: wrong "star at" value on render load [\#1352](https://github.com/pypeclub/OpenPype/issues/1352)
|
||||
- DV Resolve - loading/updating - image video [\#915](https://github.com/pypeclub/OpenPype/issues/915)
|
||||
|
||||
**Merged pull requests:**
|
||||
|
||||
- nuke: fixing start\_at with option gui [\#1507](https://github.com/pypeclub/OpenPype/pull/1507) ([jezscha](https://github.com/jezscha))
|
||||
|
||||
## [CI/3.0.0-rc.4](https://github.com/pypeclub/openpype/tree/CI/3.0.0-rc.4) (2021-05-12)
|
||||
|
||||
[Full Changelog](https://github.com/pypeclub/openpype/compare/2.17.3...CI/3.0.0-rc.4)
|
||||
|
||||
**Implemented enhancements:**
|
||||
|
||||
- Resolve: documentation [\#1490](https://github.com/pypeclub/OpenPype/issues/1490)
|
||||
- Hiero: audio to review [\#1378](https://github.com/pypeclub/OpenPype/issues/1378)
|
||||
- nks color clips after publish [\#44](https://github.com/pypeclub/OpenPype/issues/44)
|
||||
- Store data from modifiable dict as list [\#1504](https://github.com/pypeclub/OpenPype/pull/1504) ([iLLiCiTiT](https://github.com/iLLiCiTiT))
|
||||
- Use SubsetLoader and multiple contexts for delete\_old\_versions [\#1497](https://github.com/pypeclub/OpenPype/pull/1497) ([iLLiCiTiT](https://github.com/iLLiCiTiT))
|
||||
- Hiero: publish audio and add to review [\#1493](https://github.com/pypeclub/OpenPype/pull/1493) ([create-issue-branch[bot]](https://github.com/apps/create-issue-branch))
|
||||
- Resolve: documentation [\#1491](https://github.com/pypeclub/OpenPype/pull/1491) ([create-issue-branch[bot]](https://github.com/apps/create-issue-branch))
|
||||
- Change integratenew template profiles setting [\#1487](https://github.com/pypeclub/OpenPype/pull/1487) ([kalisp](https://github.com/kalisp))
|
||||
- Settings tool cleanup [\#1477](https://github.com/pypeclub/OpenPype/pull/1477) ([iLLiCiTiT](https://github.com/iLLiCiTiT))
|
||||
- Sorted Applications and Tools in Custom attribute [\#1476](https://github.com/pypeclub/OpenPype/pull/1476) ([iLLiCiTiT](https://github.com/iLLiCiTiT))
|
||||
- PS - group all published instances [\#1416](https://github.com/pypeclub/OpenPype/pull/1416) ([kalisp](https://github.com/kalisp))
|
||||
- OpenPype: Support for Docker [\#1289](https://github.com/pypeclub/OpenPype/pull/1289) ([antirotor](https://github.com/antirotor))
|
||||
|
||||
**Fixed bugs:**
|
||||
|
||||
- Harmony: palettes publishing [\#1439](https://github.com/pypeclub/OpenPype/issues/1439)
|
||||
- Photoshop: validation for already created images [\#1435](https://github.com/pypeclub/OpenPype/issues/1435)
|
||||
- Nuke Extracts Thumbnail from frame out of shot range [\#963](https://github.com/pypeclub/OpenPype/issues/963)
|
||||
- Instance in same Context repairing [\#390](https://github.com/pypeclub/OpenPype/issues/390)
|
||||
- User Inactivity - Start timers sets wrong time [\#91](https://github.com/pypeclub/OpenPype/issues/91)
|
||||
- Use instance frame start instead of timeline [\#1499](https://github.com/pypeclub/OpenPype/pull/1499) ([mkolar](https://github.com/mkolar))
|
||||
- Various smaller fixes [\#1498](https://github.com/pypeclub/OpenPype/pull/1498) ([mkolar](https://github.com/mkolar))
|
||||
- nuke: space in node name breaking process [\#1495](https://github.com/pypeclub/OpenPype/pull/1495) ([jezscha](https://github.com/jezscha))
|
||||
- Codec determination in extract burnin [\#1492](https://github.com/pypeclub/OpenPype/pull/1492) ([iLLiCiTiT](https://github.com/iLLiCiTiT))
|
||||
- Undefined constant in subprocess module [\#1485](https://github.com/pypeclub/OpenPype/pull/1485) ([iLLiCiTiT](https://github.com/iLLiCiTiT))
|
||||
- List entity catch add/remove item changes properly [\#1482](https://github.com/pypeclub/OpenPype/pull/1482) ([iLLiCiTiT](https://github.com/iLLiCiTiT))
|
||||
- Resolve: additional fixes of publishing workflow [\#1481](https://github.com/pypeclub/OpenPype/pull/1481) ([jezscha](https://github.com/jezscha))
|
||||
- Photoshop: validation for already created images [\#1436](https://github.com/pypeclub/OpenPype/pull/1436) ([create-issue-branch[bot]](https://github.com/apps/create-issue-branch))
|
||||
|
||||
**Merged pull requests:**
|
||||
|
||||
- Maya: Support for looks on VRay Proxies [\#1443](https://github.com/pypeclub/OpenPype/pull/1443) ([antirotor](https://github.com/antirotor))
|
||||
|
||||
## [2.17.3](https://github.com/pypeclub/openpype/tree/2.17.3) (2021-05-06)
|
||||
|
||||
|
|
@ -23,15 +354,102 @@
|
|||
|
||||
**Fixed bugs:**
|
||||
|
||||
- Nuke: workfile version synced to db version always [\#1479](https://github.com/pypeclub/OpenPype/pull/1479)
|
||||
- Nuke: workfile version synced to db version always [\#1479](https://github.com/pypeclub/OpenPype/pull/1479) ([jezscha](https://github.com/jezscha))
|
||||
|
||||
## [CI/3.0.0-rc.3](https://github.com/pypeclub/openpype/tree/CI/3.0.0-rc.3) (2021-05-05)
|
||||
|
||||
[Full Changelog](https://github.com/pypeclub/openpype/compare/CI/3.0.0-rc.2...CI/3.0.0-rc.3)
|
||||
|
||||
**Implemented enhancements:**
|
||||
|
||||
- Path entity with placeholder [\#1473](https://github.com/pypeclub/OpenPype/pull/1473) ([iLLiCiTiT](https://github.com/iLLiCiTiT))
|
||||
- Burnin custom font filepath [\#1472](https://github.com/pypeclub/OpenPype/pull/1472) ([iLLiCiTiT](https://github.com/iLLiCiTiT))
|
||||
- Poetry: Move to OpenPype [\#1449](https://github.com/pypeclub/OpenPype/pull/1449) ([antirotor](https://github.com/antirotor))
|
||||
|
||||
**Fixed bugs:**
|
||||
|
||||
- Mac SSL path needs to be relative to pype\_root [\#1469](https://github.com/pypeclub/OpenPype/issues/1469)
|
||||
- Resolve: fix loading clips to timeline [\#1421](https://github.com/pypeclub/OpenPype/issues/1421)
|
||||
- Wrong handling of slashes when loading on mac [\#1411](https://github.com/pypeclub/OpenPype/issues/1411)
|
||||
- Nuke openpype3 [\#1342](https://github.com/pypeclub/OpenPype/issues/1342)
|
||||
- Houdini launcher [\#1171](https://github.com/pypeclub/OpenPype/issues/1171)
|
||||
- Fix SyncServer get\_enabled\_projects should handle global state [\#1475](https://github.com/pypeclub/OpenPype/pull/1475) ([kalisp](https://github.com/kalisp))
|
||||
- Igniter buttons enable/disable fix [\#1474](https://github.com/pypeclub/OpenPype/pull/1474) ([iLLiCiTiT](https://github.com/iLLiCiTiT))
|
||||
- Mac SSL path needs to be relative to pype\_root [\#1470](https://github.com/pypeclub/OpenPype/pull/1470) ([create-issue-branch[bot]](https://github.com/apps/create-issue-branch))
|
||||
- Resolve: 17 compatibility issues and load image sequences [\#1422](https://github.com/pypeclub/OpenPype/pull/1422) ([create-issue-branch[bot]](https://github.com/apps/create-issue-branch))
|
||||
|
||||
## [CI/3.0.0-rc.2](https://github.com/pypeclub/openpype/tree/CI/3.0.0-rc.2) (2021-05-04)
|
||||
|
||||
[Full Changelog](https://github.com/pypeclub/openpype/compare/2.17.2...CI/3.0.0-rc.2)
|
||||
|
||||
**Implemented enhancements:**
|
||||
|
||||
- Extract burnins with sequences [\#1467](https://github.com/pypeclub/OpenPype/pull/1467) ([iLLiCiTiT](https://github.com/iLLiCiTiT))
|
||||
- Extract burnins with color setting [\#1466](https://github.com/pypeclub/OpenPype/pull/1466) ([iLLiCiTiT](https://github.com/iLLiCiTiT))
|
||||
|
||||
**Fixed bugs:**
|
||||
|
||||
- Fix groups check in Python 2 [\#1468](https://github.com/pypeclub/OpenPype/pull/1468) ([iLLiCiTiT](https://github.com/iLLiCiTiT))
|
||||
|
||||
## [2.17.2](https://github.com/pypeclub/openpype/tree/2.17.2) (2021-05-04)
|
||||
|
||||
[Full Changelog](https://github.com/pypeclub/openpype/compare/CI/3.0.0-rc.1...2.17.2)
|
||||
|
||||
**Enhancements:**
|
||||
**Implemented enhancements:**
|
||||
|
||||
- Forward/Backward compatible apps and tools with OpenPype 3 [\#1463](https://github.com/pypeclub/OpenPype/pull/1463)
|
||||
- Forward/Backward compatible apps and tools with OpenPype 3 [\#1463](https://github.com/pypeclub/OpenPype/pull/1463) ([iLLiCiTiT](https://github.com/iLLiCiTiT))
|
||||
|
||||
## [CI/3.0.0-rc.1](https://github.com/pypeclub/openpype/tree/CI/3.0.0-rc.1) (2021-05-04)
|
||||
|
||||
[Full Changelog](https://github.com/pypeclub/openpype/compare/2.17.1...CI/3.0.0-rc.1)
|
||||
|
||||
**Implemented enhancements:**
|
||||
|
||||
- Only show studio settings to admins [\#1406](https://github.com/pypeclub/OpenPype/issues/1406)
|
||||
- Ftrack specific settings save warning messages [\#1458](https://github.com/pypeclub/OpenPype/pull/1458) ([iLLiCiTiT](https://github.com/iLLiCiTiT))
|
||||
- Faster settings actions [\#1446](https://github.com/pypeclub/OpenPype/pull/1446) ([iLLiCiTiT](https://github.com/iLLiCiTiT))
|
||||
- Feature/sync server priority [\#1444](https://github.com/pypeclub/OpenPype/pull/1444) ([kalisp](https://github.com/kalisp))
|
||||
- Faster settings UI loading [\#1442](https://github.com/pypeclub/OpenPype/pull/1442) ([iLLiCiTiT](https://github.com/iLLiCiTiT))
|
||||
- Igniter re-write [\#1441](https://github.com/pypeclub/OpenPype/pull/1441) ([mkolar](https://github.com/mkolar))
|
||||
- Wrap openpype build into installers [\#1419](https://github.com/pypeclub/OpenPype/pull/1419) ([create-issue-branch[bot]](https://github.com/apps/create-issue-branch))
|
||||
- Extract review first documentation [\#1404](https://github.com/pypeclub/OpenPype/pull/1404) ([iLLiCiTiT](https://github.com/iLLiCiTiT))
|
||||
- Blender PySide2 install guide [\#1403](https://github.com/pypeclub/OpenPype/pull/1403) ([iLLiCiTiT](https://github.com/iLLiCiTiT))
|
||||
- Nuke: deadline submission with gpu [\#1394](https://github.com/pypeclub/OpenPype/pull/1394) ([create-issue-branch[bot]](https://github.com/apps/create-issue-branch))
|
||||
- Igniter: Reverse item filter for OpenPype version [\#1349](https://github.com/pypeclub/OpenPype/pull/1349) ([antirotor](https://github.com/antirotor))
|
||||
|
||||
**Fixed bugs:**
|
||||
|
||||
- OpenPype Mongo URL definition [\#1450](https://github.com/pypeclub/OpenPype/issues/1450)
|
||||
- Various typos and smaller fixes [\#1464](https://github.com/pypeclub/OpenPype/pull/1464) ([mkolar](https://github.com/mkolar))
|
||||
- Validation of dynamic items in settings [\#1462](https://github.com/pypeclub/OpenPype/pull/1462) ([iLLiCiTiT](https://github.com/iLLiCiTiT))
|
||||
- List can handle new items correctly [\#1459](https://github.com/pypeclub/OpenPype/pull/1459) ([iLLiCiTiT](https://github.com/iLLiCiTiT))
|
||||
- Settings actions process fix [\#1457](https://github.com/pypeclub/OpenPype/pull/1457) ([iLLiCiTiT](https://github.com/iLLiCiTiT))
|
||||
- Add to overrides actions fix [\#1456](https://github.com/pypeclub/OpenPype/pull/1456) ([iLLiCiTiT](https://github.com/iLLiCiTiT))
|
||||
- OpenPype Mongo URL definition [\#1455](https://github.com/pypeclub/OpenPype/pull/1455) ([create-issue-branch[bot]](https://github.com/apps/create-issue-branch))
|
||||
- Global settings save/load out of system settings [\#1447](https://github.com/pypeclub/OpenPype/pull/1447) ([iLLiCiTiT](https://github.com/iLLiCiTiT))
|
||||
- Keep metadata on remove overrides [\#1445](https://github.com/pypeclub/OpenPype/pull/1445) ([iLLiCiTiT](https://github.com/iLLiCiTiT))
|
||||
- Nuke: fixing undo for loaded mov and sequence [\#1432](https://github.com/pypeclub/OpenPype/pull/1432) ([jezscha](https://github.com/jezscha))
|
||||
- ExtractReview skip empty strings from settings [\#1431](https://github.com/pypeclub/OpenPype/pull/1431) ([iLLiCiTiT](https://github.com/iLLiCiTiT))
|
||||
- Bugfix Sync server tweaks [\#1430](https://github.com/pypeclub/OpenPype/pull/1430) ([kalisp](https://github.com/kalisp))
|
||||
- Hiero: missing thumbnail in review [\#1429](https://github.com/pypeclub/OpenPype/pull/1429) ([create-issue-branch[bot]](https://github.com/apps/create-issue-branch))
|
||||
- Bugfix Maya in deadline for OpenPype [\#1428](https://github.com/pypeclub/OpenPype/pull/1428) ([kalisp](https://github.com/kalisp))
|
||||
- AE - validation for duration was 1 frame shorter [\#1427](https://github.com/pypeclub/OpenPype/pull/1427) ([kalisp](https://github.com/kalisp))
|
||||
- Houdini menu filename [\#1418](https://github.com/pypeclub/OpenPype/pull/1418) ([iLLiCiTiT](https://github.com/iLLiCiTiT))
|
||||
- Fix Avalon plugins attribute overrides [\#1413](https://github.com/pypeclub/OpenPype/pull/1413) ([iLLiCiTiT](https://github.com/iLLiCiTiT))
|
||||
- Nuke: submit to Deadline fails [\#1409](https://github.com/pypeclub/OpenPype/pull/1409) ([create-issue-branch[bot]](https://github.com/apps/create-issue-branch))
|
||||
- Validate MongoDB Url on start [\#1407](https://github.com/pypeclub/OpenPype/pull/1407) ([iLLiCiTiT](https://github.com/iLLiCiTiT))
|
||||
- Nuke: fix set colorspace with new settings [\#1386](https://github.com/pypeclub/OpenPype/pull/1386) ([create-issue-branch[bot]](https://github.com/apps/create-issue-branch))
|
||||
- MacOs build and install issues [\#1380](https://github.com/pypeclub/OpenPype/pull/1380) ([mkolar](https://github.com/mkolar))
|
||||
|
||||
**Closed issues:**
|
||||
|
||||
- test [\#1452](https://github.com/pypeclub/OpenPype/issues/1452)
|
||||
|
||||
**Merged pull requests:**
|
||||
|
||||
- TVPaint frame range definition [\#1425](https://github.com/pypeclub/OpenPype/pull/1425) ([iLLiCiTiT](https://github.com/iLLiCiTiT))
|
||||
- Only show studio settings to admins [\#1420](https://github.com/pypeclub/OpenPype/pull/1420) ([create-issue-branch[bot]](https://github.com/apps/create-issue-branch))
|
||||
- TVPaint documentation [\#1305](https://github.com/pypeclub/OpenPype/pull/1305) ([64qam](https://github.com/64qam))
|
||||
|
||||
## [2.17.1](https://github.com/pypeclub/openpype/tree/2.17.1) (2021-04-30)
|
||||
|
||||
|
|
|
|||
|
|
@ -1,5 +1,7 @@
|
|||
## How to contribute to Pype
|
||||
|
||||
We are always happy for any contributions for OpenPype improvements. Before making a PR and starting working on an issue, please read these simple guidelines.
|
||||
|
||||
#### **Did you find a bug?**
|
||||
|
||||
1. Check in the issues and our [bug triage[(https://github.com/pypeclub/pype/projects/2) to make sure it wasn't reported already.
|
||||
|
|
@ -13,11 +15,11 @@
|
|||
- Open a new GitHub pull request with the patch.
|
||||
- Ensure the PR description clearly describes the problem and solution. Include the relevant issue number if applicable.
|
||||
|
||||
|
||||
#### **Do you intend to add a new feature or change an existing one?**
|
||||
|
||||
- Open a new thread in the [github discussions](https://github.com/pypeclub/pype/discussions/new)
|
||||
- Do not open issue untill the suggestion is discussed. We will convert accepted suggestions into backlog and point them to the relevant discussion thread to keep the context.
|
||||
- Do not open issue until the suggestion is discussed. We will convert accepted suggestions into backlog and point them to the relevant discussion thread to keep the context.
|
||||
- If you are already working on a new feature and you'd like it eventually merged to the main codebase, please consider making a DRAFT PR as soon as possible. This makes it a lot easier to give feedback, discuss the code and functionalit, plus it prevents multiple people tackling the same problem independently.
|
||||
|
||||
#### **Do you have questions about the source code?**
|
||||
|
||||
|
|
@ -41,13 +43,11 @@ A few important notes about 2.x and 3.x development:
|
|||
- Please keep the corresponding 2 and 3 PR names the same so they can be easily identified from the PR list page.
|
||||
- Each 2.x PR should be labeled with `2.x-dev` label.
|
||||
|
||||
Inside each PR, put a link to the corresponding PR
|
||||
Inside each PR, put a link to the corresponding PR for the other version
|
||||
|
||||
Of course if you want to contribute, feel free to make a PR to only 2.x/develop or develop, based on what you are using. While reviewing the PRs, we might convert the code to corresponding PR for the other release ourselves.
|
||||
|
||||
We might also change the target of you PR to and intermediate branch, rather than `develop` if we feel it requires some extra work on our end. That way, we preserve all your commits so you don't loos out on the contribution credits.
|
||||
|
||||
|
||||
We might also change the target of you PR to and intermediate branch, rather than `develop` if we feel it requires some extra work on our end. That way, we preserve all your commits so you don't loose out on the contribution credits.
|
||||
|
||||
|
||||
If a PR is targeted at 2.x release it must be labelled with 2x-dev label in Github.
|
||||
|
|
|
|||
514
HISTORY.md
514
HISTORY.md
|
|
@ -1,3 +1,514 @@
|
|||
# Changelog
|
||||
|
||||
|
||||
## [3.0.0](https://github.com/pypeclub/openpype/tree/3.0.0)
|
||||
|
||||
[Full Changelog](https://github.com/pypeclub/openpype/compare/CI/3.0.0-rc.6...3.0.0)
|
||||
|
||||
### Configuration
|
||||
- Studio Settings GUI: no more json configuration files.
|
||||
- OpenPype Modules can be turned on and off.
|
||||
- Easy to add Application versions.
|
||||
- Per Project Environment and plugin management.
|
||||
- Robust profile system for creating reviewables and burnins, with filtering based on Application, Task and data family.
|
||||
- Configurable publish plugins.
|
||||
- Options to make any validator or extractor, optional or disabled.
|
||||
- Color Management is now unified under anatomy settings.
|
||||
- Subset naming and grouping is fully configurable.
|
||||
- All project attributes can now be set directly in OpenPype settings.
|
||||
- Studio Setting can be locked to prevent unwanted artist changes.
|
||||
- You can now add per project and per task type templates for workfile initialization in most hosts.
|
||||
- Too many other individual configurable option to list in this changelog :)
|
||||
|
||||
### Local Settings
|
||||
- Local Settings GUI where users can change certain option on individual basis.
|
||||
- Application executables.
|
||||
- Project roots.
|
||||
- Project site sync settings.
|
||||
|
||||
### Build, Installation and Deployments
|
||||
- No requirements on artist machine.
|
||||
- Fully distributed workflow possible.
|
||||
- Self-contained installation.
|
||||
- Available on all three major platforms.
|
||||
- Automatic artist OpenPype updates.
|
||||
- Studio OpenPype repository for updates distribution.
|
||||
- Robust Build system.
|
||||
- Safe studio update versioning with staging and production options.
|
||||
- MacOS build generates .app and .dmg installer.
|
||||
- Windows build with installer creation script.
|
||||
|
||||
### Misc
|
||||
- System and diagnostic info tool in the tray.
|
||||
- Launching application from Launcher indicates activity.
|
||||
- All project roots are now named. Single root project are now achieved by having a single named root in the project anatomy.
|
||||
- Every project root is cast into environment variable as well, so it can be used in DCC instead of absolute path (depends on DCC support for env vars).
|
||||
- Basic support for task types, on top of task names.
|
||||
- Timer now change automatically when the context is switched inside running application.
|
||||
- 'Master" versions have been renamed to "Hero".
|
||||
- Extract Burnins now supports file sequences and color settings.
|
||||
- Extract Review support overscan cropping, better letterboxes and background colour fill.
|
||||
- Delivery tool for copying and renaming any published assets in bulk.
|
||||
- Harmony, Photoshop and After Effects now connect directly with OpenPype tray instead of spawning their own terminal.
|
||||
|
||||
### Project Manager GUI
|
||||
- Create Projects.
|
||||
- Create Shots and Assets.
|
||||
- Create Tasks and assign task types.
|
||||
- Fill required asset attributes.
|
||||
- Validations for duplicated or unsupported names.
|
||||
- Archive Assets.
|
||||
- Move Asset within hierarchy.
|
||||
|
||||
### Site Sync (beta)
|
||||
- Synchronization of published files between workstations and central storage.
|
||||
- Ability to add arbitrary storage providers to the Site Sync system.
|
||||
- Default setup includes Disk and Google Drive providers as examples.
|
||||
- Access to availability information from Loader and Scene Manager.
|
||||
- Sync queue GUI with filtering, error and status reporting.
|
||||
- Site sync can be configured on a per-project basis.
|
||||
- Bulk upload and download from the loader.
|
||||
|
||||
### Ftrack
|
||||
- Actions have customisable roles.
|
||||
- Settings on all actions are updated live and don't need openpype restart.
|
||||
- Ftrack module can now be turned off completely.
|
||||
- It is enough to specify ftrack server name and the URL will be formed correctly. So instead of mystudio.ftrackapp.com, it's possible to use simply: "mystudio".
|
||||
|
||||
### Editorial
|
||||
- Fully OTIO based editorial publishing.
|
||||
- Completely re-done Hiero publishing to be a lot simpler and faster.
|
||||
- Consistent conforming from Resolve, Hiero and Standalone Publisher.
|
||||
|
||||
### Backend
|
||||
- OpenPype and Avalon now always share the same database (in 2.x is was possible to split them).
|
||||
- Major codebase refactoring to allow for better CI, versioning and control of individual integrations.
|
||||
- OTIO is bundled with build.
|
||||
- OIIO is bundled with build.
|
||||
- FFMPEG is bundled with build.
|
||||
- Rest API and host WebSocket servers have been unified into a single local webserver.
|
||||
- Maya look assigner has been integrated into the main codebase.
|
||||
- Publish GUI has been integrated into the main codebase.
|
||||
- Studio and Project settings overrides are now stored in Mongo.
|
||||
- Too many other backend fixes and tweaks to list :), you can see full changelog on github for those.
|
||||
- OpenPype uses Poetry to manage it's virtual environment when running from code.
|
||||
- all applications can be marked as python 2 or 3 compatible to make the switch a bit easier.
|
||||
|
||||
|
||||
### Pull Requests since 3.0.0-rc.6
|
||||
|
||||
|
||||
**Implemented enhancements:**
|
||||
|
||||
- settings: task types enum entity [\#1605](https://github.com/pypeclub/OpenPype/issues/1605)
|
||||
- Settings: ignore keys in referenced schema [\#1600](https://github.com/pypeclub/OpenPype/issues/1600)
|
||||
- Maya: support for frame steps and frame lists [\#1585](https://github.com/pypeclub/OpenPype/issues/1585)
|
||||
- TVPaint: Publish workfile. [\#1548](https://github.com/pypeclub/OpenPype/issues/1548)
|
||||
- Loader: Current Asset Button [\#1448](https://github.com/pypeclub/OpenPype/issues/1448)
|
||||
- Hiero: publish with retiming [\#1377](https://github.com/pypeclub/OpenPype/issues/1377)
|
||||
- Ask user to restart after changing global environments in settings [\#910](https://github.com/pypeclub/OpenPype/issues/910)
|
||||
- add option to define paht to workfile template [\#895](https://github.com/pypeclub/OpenPype/issues/895)
|
||||
- Harmony: move server console to system tray [\#676](https://github.com/pypeclub/OpenPype/issues/676)
|
||||
- Standalone style [\#1630](https://github.com/pypeclub/OpenPype/pull/1630) ([iLLiCiTiT](https://github.com/iLLiCiTiT))
|
||||
- Faster hierarchical values push [\#1627](https://github.com/pypeclub/OpenPype/pull/1627) ([iLLiCiTiT](https://github.com/iLLiCiTiT))
|
||||
- Launcher tool style [\#1624](https://github.com/pypeclub/OpenPype/pull/1624) ([iLLiCiTiT](https://github.com/iLLiCiTiT))
|
||||
- Loader and Library loader enhancements [\#1623](https://github.com/pypeclub/OpenPype/pull/1623) ([iLLiCiTiT](https://github.com/iLLiCiTiT))
|
||||
- Tray style [\#1622](https://github.com/pypeclub/OpenPype/pull/1622) ([iLLiCiTiT](https://github.com/iLLiCiTiT))
|
||||
- Maya schemas cleanup [\#1610](https://github.com/pypeclub/OpenPype/pull/1610) ([iLLiCiTiT](https://github.com/iLLiCiTiT))
|
||||
- Settings: ignore keys in referenced schema [\#1608](https://github.com/pypeclub/OpenPype/pull/1608) ([create-issue-branch[bot]](https://github.com/apps/create-issue-branch))
|
||||
- settings: task types enum entity [\#1606](https://github.com/pypeclub/OpenPype/pull/1606) ([create-issue-branch[bot]](https://github.com/apps/create-issue-branch))
|
||||
- Openpype style [\#1604](https://github.com/pypeclub/OpenPype/pull/1604) ([iLLiCiTiT](https://github.com/iLLiCiTiT))
|
||||
- TVPaint: Publish workfile. [\#1597](https://github.com/pypeclub/OpenPype/pull/1597) ([create-issue-branch[bot]](https://github.com/apps/create-issue-branch))
|
||||
- Nuke: add option to define path to workfile template [\#1571](https://github.com/pypeclub/OpenPype/pull/1571) ([create-issue-branch[bot]](https://github.com/apps/create-issue-branch))
|
||||
- Crop overscan in Extract Review [\#1569](https://github.com/pypeclub/OpenPype/pull/1569) ([iLLiCiTiT](https://github.com/iLLiCiTiT))
|
||||
- Unreal and Blender: Material Workflow [\#1562](https://github.com/pypeclub/OpenPype/pull/1562) ([simonebarbieri](https://github.com/simonebarbieri))
|
||||
- Harmony: move server console to system tray [\#1560](https://github.com/pypeclub/OpenPype/pull/1560) ([create-issue-branch[bot]](https://github.com/apps/create-issue-branch))
|
||||
- Ask user to restart after changing global environments in settings [\#1550](https://github.com/pypeclub/OpenPype/pull/1550) ([create-issue-branch[bot]](https://github.com/apps/create-issue-branch))
|
||||
- Hiero: publish with retiming [\#1545](https://github.com/pypeclub/OpenPype/pull/1545) ([create-issue-branch[bot]](https://github.com/apps/create-issue-branch))
|
||||
|
||||
**Fixed bugs:**
|
||||
|
||||
- Library loader load asset documents on OpenPype start [\#1603](https://github.com/pypeclub/OpenPype/issues/1603)
|
||||
- Resolve: unable to load the same footage twice [\#1317](https://github.com/pypeclub/OpenPype/issues/1317)
|
||||
- Resolve: unable to load footage [\#1316](https://github.com/pypeclub/OpenPype/issues/1316)
|
||||
- Add required Python 2 modules [\#1291](https://github.com/pypeclub/OpenPype/issues/1291)
|
||||
- GUi scaling with hires displays [\#705](https://github.com/pypeclub/OpenPype/issues/705)
|
||||
- Maya: non unicode string in publish validation [\#673](https://github.com/pypeclub/OpenPype/issues/673)
|
||||
- Nuke: Rendered Frame validation is triggered by multiple collections [\#156](https://github.com/pypeclub/OpenPype/issues/156)
|
||||
- avalon-core debugging failing [\#80](https://github.com/pypeclub/OpenPype/issues/80)
|
||||
- Only check arnold shading group if arnold is used [\#72](https://github.com/pypeclub/OpenPype/issues/72)
|
||||
- Sync server Qt layout fix [\#1621](https://github.com/pypeclub/OpenPype/pull/1621) ([iLLiCiTiT](https://github.com/iLLiCiTiT))
|
||||
- Console Listener on Python 2 fix [\#1620](https://github.com/pypeclub/OpenPype/pull/1620) ([iLLiCiTiT](https://github.com/iLLiCiTiT))
|
||||
- Bug: Initialize blessed term only in console mode [\#1619](https://github.com/pypeclub/OpenPype/pull/1619) ([antirotor](https://github.com/antirotor))
|
||||
- Settings template skip paths support wrappers [\#1618](https://github.com/pypeclub/OpenPype/pull/1618) ([iLLiCiTiT](https://github.com/iLLiCiTiT))
|
||||
- Maya capture 'isolate\_view' fix + minor corrections [\#1617](https://github.com/pypeclub/OpenPype/pull/1617) ([2-REC](https://github.com/2-REC))
|
||||
- MacOs Fix launch of standalone publisher [\#1616](https://github.com/pypeclub/OpenPype/pull/1616) ([iLLiCiTiT](https://github.com/iLLiCiTiT))
|
||||
- 'Delivery action' report fix + typos [\#1612](https://github.com/pypeclub/OpenPype/pull/1612) ([2-REC](https://github.com/2-REC))
|
||||
- List append fix in mutable dict settings [\#1599](https://github.com/pypeclub/OpenPype/pull/1599) ([iLLiCiTiT](https://github.com/iLLiCiTiT))
|
||||
- Documentation: Maya: fix review [\#1598](https://github.com/pypeclub/OpenPype/pull/1598) ([antirotor](https://github.com/antirotor))
|
||||
- Bugfix: Set certifi CA bundle for all platforms [\#1596](https://github.com/pypeclub/OpenPype/pull/1596) ([antirotor](https://github.com/antirotor))
|
||||
|
||||
**Merged pull requests:**
|
||||
|
||||
- Bump dns-packet from 1.3.1 to 1.3.4 in /website [\#1611](https://github.com/pypeclub/OpenPype/pull/1611) ([dependabot[bot]](https://github.com/apps/dependabot))
|
||||
- Maya: Render workflow fixes [\#1607](https://github.com/pypeclub/OpenPype/pull/1607) ([antirotor](https://github.com/antirotor))
|
||||
- Maya: support for frame steps and frame lists [\#1586](https://github.com/pypeclub/OpenPype/pull/1586) ([create-issue-branch[bot]](https://github.com/apps/create-issue-branch))
|
||||
- 3.0.0 - curated changelog [\#1284](https://github.com/pypeclub/OpenPype/pull/1284) ([mkolar](https://github.com/mkolar))
|
||||
|
||||
## [2.18.1](https://github.com/pypeclub/openpype/tree/2.18.1) (2021-06-03)
|
||||
|
||||
[Full Changelog](https://github.com/pypeclub/openpype/compare/2.18.0...2.18.1)
|
||||
|
||||
**Enhancements:**
|
||||
|
||||
- Faster hierarchical values push [\#1626](https://github.com/pypeclub/OpenPype/pull/1626)
|
||||
- Feature Delivery in library loader [\#1549](https://github.com/pypeclub/OpenPype/pull/1549)
|
||||
- Hiero: Initial frame publish support. [\#1172](https://github.com/pypeclub/OpenPype/pull/1172)
|
||||
|
||||
**Fixed bugs:**
|
||||
|
||||
- Maya capture 'isolate\_view' fix + minor corrections [\#1614](https://github.com/pypeclub/OpenPype/pull/1614)
|
||||
- 'Delivery action' report fix +typos [\#1613](https://github.com/pypeclub/OpenPype/pull/1613)
|
||||
- Delivery in LibraryLoader - fixed sequence issue [\#1590](https://github.com/pypeclub/OpenPype/pull/1590)
|
||||
- FFmpeg filters in quote marks [\#1588](https://github.com/pypeclub/OpenPype/pull/1588)
|
||||
- Ftrack delete action cause circular error [\#1581](https://github.com/pypeclub/OpenPype/pull/1581)
|
||||
- Fix Maya playblast. [\#1566](https://github.com/pypeclub/OpenPype/pull/1566)
|
||||
- More failsafes prevent errored runs. [\#1554](https://github.com/pypeclub/OpenPype/pull/1554)
|
||||
- Celaction publishing [\#1539](https://github.com/pypeclub/OpenPype/pull/1539)
|
||||
- celaction: app not starting [\#1533](https://github.com/pypeclub/OpenPype/pull/1533)
|
||||
|
||||
**Merged pull requests:**
|
||||
|
||||
- Maya: Render workflow fixes - 2.0 backport [\#1609](https://github.com/pypeclub/OpenPype/pull/1609)
|
||||
- Maya Hardware support [\#1553](https://github.com/pypeclub/OpenPype/pull/1553)
|
||||
|
||||
|
||||
## [CI/3.0.0-rc.6](https://github.com/pypeclub/openpype/tree/CI/3.0.0-rc.6) (2021-05-27)
|
||||
|
||||
[Full Changelog](https://github.com/pypeclub/openpype/compare/CI/3.0.0-rc.5...CI/3.0.0-rc.6)
|
||||
|
||||
**Implemented enhancements:**
|
||||
|
||||
- Hiero: publish color and transformation soft-effects [\#1376](https://github.com/pypeclub/OpenPype/issues/1376)
|
||||
- Get rid of `AVALON\_HIERARCHY` and `hiearchy` key on asset [\#432](https://github.com/pypeclub/OpenPype/issues/432)
|
||||
- Sync to avalon do not store hierarchy key [\#1582](https://github.com/pypeclub/OpenPype/pull/1582) ([iLLiCiTiT](https://github.com/iLLiCiTiT))
|
||||
- Tools: launcher scripts for project manager [\#1557](https://github.com/pypeclub/OpenPype/pull/1557) ([antirotor](https://github.com/antirotor))
|
||||
- Simple tvpaint publish [\#1555](https://github.com/pypeclub/OpenPype/pull/1555) ([iLLiCiTiT](https://github.com/iLLiCiTiT))
|
||||
- Feature Delivery in library loader [\#1546](https://github.com/pypeclub/OpenPype/pull/1546) ([kalisp](https://github.com/kalisp))
|
||||
- Documentation: Dev and system build documentation [\#1543](https://github.com/pypeclub/OpenPype/pull/1543) ([antirotor](https://github.com/antirotor))
|
||||
- Color entity [\#1542](https://github.com/pypeclub/OpenPype/pull/1542) ([iLLiCiTiT](https://github.com/iLLiCiTiT))
|
||||
- Extract review bg color [\#1534](https://github.com/pypeclub/OpenPype/pull/1534) ([iLLiCiTiT](https://github.com/iLLiCiTiT))
|
||||
- TVPaint loader settings [\#1530](https://github.com/pypeclub/OpenPype/pull/1530) ([iLLiCiTiT](https://github.com/iLLiCiTiT))
|
||||
- Blender can initialize differente user script paths [\#1528](https://github.com/pypeclub/OpenPype/pull/1528) ([iLLiCiTiT](https://github.com/iLLiCiTiT))
|
||||
- Blender and Unreal: Improved Animation Workflow [\#1514](https://github.com/pypeclub/OpenPype/pull/1514) ([simonebarbieri](https://github.com/simonebarbieri))
|
||||
- Hiero: publish color and transformation soft-effects [\#1511](https://github.com/pypeclub/OpenPype/pull/1511) ([create-issue-branch[bot]](https://github.com/apps/create-issue-branch))
|
||||
|
||||
**Fixed bugs:**
|
||||
|
||||
- OpenPype specific version issues [\#1583](https://github.com/pypeclub/OpenPype/issues/1583)
|
||||
- Ftrack login server can't work without stderr [\#1576](https://github.com/pypeclub/OpenPype/issues/1576)
|
||||
- Mac application launch [\#1575](https://github.com/pypeclub/OpenPype/issues/1575)
|
||||
- Settings are not propagated to Nuke write nodes [\#1538](https://github.com/pypeclub/OpenPype/issues/1538)
|
||||
- Subset names settings not applied for publishing [\#1537](https://github.com/pypeclub/OpenPype/issues/1537)
|
||||
- Nuke: callback at start not setting colorspace [\#1412](https://github.com/pypeclub/OpenPype/issues/1412)
|
||||
- Pype 3: Missing icon for Settings [\#1272](https://github.com/pypeclub/OpenPype/issues/1272)
|
||||
- Blender: cannot initialize Avalon if BLENDER\_USER\_SCRIPTS is already used [\#1050](https://github.com/pypeclub/OpenPype/issues/1050)
|
||||
- Ftrack delete action cause circular error [\#206](https://github.com/pypeclub/OpenPype/issues/206)
|
||||
- Build: stop cleaning of pyc files in build directory [\#1592](https://github.com/pypeclub/OpenPype/pull/1592) ([antirotor](https://github.com/antirotor))
|
||||
- Ftrack login server can't work without stderr [\#1591](https://github.com/pypeclub/OpenPype/pull/1591) ([create-issue-branch[bot]](https://github.com/apps/create-issue-branch))
|
||||
- FFmpeg filters in quote marks [\#1589](https://github.com/pypeclub/OpenPype/pull/1589) ([iLLiCiTiT](https://github.com/iLLiCiTiT))
|
||||
- OpenPype specific version issues [\#1584](https://github.com/pypeclub/OpenPype/pull/1584) ([create-issue-branch[bot]](https://github.com/apps/create-issue-branch))
|
||||
- Mac application launch [\#1580](https://github.com/pypeclub/OpenPype/pull/1580) ([create-issue-branch[bot]](https://github.com/apps/create-issue-branch))
|
||||
- Ftrack delete action cause circular error [\#1579](https://github.com/pypeclub/OpenPype/pull/1579) ([iLLiCiTiT](https://github.com/iLLiCiTiT))
|
||||
- Hiero: publishing issues [\#1578](https://github.com/pypeclub/OpenPype/pull/1578) ([jezscha](https://github.com/jezscha))
|
||||
- Nuke: callback at start not setting colorspace [\#1561](https://github.com/pypeclub/OpenPype/pull/1561) ([create-issue-branch[bot]](https://github.com/apps/create-issue-branch))
|
||||
- Bugfix PS subset and quick review [\#1541](https://github.com/pypeclub/OpenPype/pull/1541) ([kalisp](https://github.com/kalisp))
|
||||
- Settings are not propagated to Nuke write nodes [\#1540](https://github.com/pypeclub/OpenPype/pull/1540) ([create-issue-branch[bot]](https://github.com/apps/create-issue-branch))
|
||||
- OpenPype: Powershell scripts polishing [\#1536](https://github.com/pypeclub/OpenPype/pull/1536) ([antirotor](https://github.com/antirotor))
|
||||
- Host name collecting fix [\#1535](https://github.com/pypeclub/OpenPype/pull/1535) ([iLLiCiTiT](https://github.com/iLLiCiTiT))
|
||||
- Handle duplicated task names in project manager [\#1531](https://github.com/pypeclub/OpenPype/pull/1531) ([iLLiCiTiT](https://github.com/iLLiCiTiT))
|
||||
- Validate is file attribute in settings schema [\#1529](https://github.com/pypeclub/OpenPype/pull/1529) ([iLLiCiTiT](https://github.com/iLLiCiTiT))
|
||||
|
||||
**Merged pull requests:**
|
||||
|
||||
- Bump postcss from 8.2.8 to 8.3.0 in /website [\#1593](https://github.com/pypeclub/OpenPype/pull/1593) ([dependabot[bot]](https://github.com/apps/dependabot))
|
||||
- User installation documentation [\#1532](https://github.com/pypeclub/OpenPype/pull/1532) ([64qam](https://github.com/64qam))
|
||||
|
||||
## [CI/3.0.0-rc.5](https://github.com/pypeclub/openpype/tree/CI/3.0.0-rc.5) (2021-05-19)
|
||||
|
||||
[Full Changelog](https://github.com/pypeclub/openpype/compare/2.18.0...CI/3.0.0-rc.5)
|
||||
|
||||
**Implemented enhancements:**
|
||||
|
||||
- OpenPype: Build - Add progress bars [\#1524](https://github.com/pypeclub/OpenPype/pull/1524) ([antirotor](https://github.com/antirotor))
|
||||
- Default environments per host imlementation [\#1522](https://github.com/pypeclub/OpenPype/pull/1522) ([create-issue-branch[bot]](https://github.com/apps/create-issue-branch))
|
||||
- OpenPype: use `semver` module for version resolution [\#1513](https://github.com/pypeclub/OpenPype/pull/1513) ([antirotor](https://github.com/antirotor))
|
||||
- Feature Aftereffects setting cleanup documentation [\#1510](https://github.com/pypeclub/OpenPype/pull/1510) ([kalisp](https://github.com/kalisp))
|
||||
- Feature Sync server settings enhancement [\#1501](https://github.com/pypeclub/OpenPype/pull/1501) ([kalisp](https://github.com/kalisp))
|
||||
- Project manager [\#1396](https://github.com/pypeclub/OpenPype/pull/1396) ([iLLiCiTiT](https://github.com/iLLiCiTiT))
|
||||
|
||||
**Fixed bugs:**
|
||||
|
||||
- Unified schema definition [\#874](https://github.com/pypeclub/OpenPype/issues/874)
|
||||
- Maya: fix look assignment [\#1526](https://github.com/pypeclub/OpenPype/pull/1526) ([antirotor](https://github.com/antirotor))
|
||||
- Bugfix Sync server local site issues [\#1523](https://github.com/pypeclub/OpenPype/pull/1523) ([kalisp](https://github.com/kalisp))
|
||||
- Store as list dictionary check initial value with right type [\#1520](https://github.com/pypeclub/OpenPype/pull/1520) ([iLLiCiTiT](https://github.com/iLLiCiTiT))
|
||||
- Maya: wrong collection of playblasted frames [\#1515](https://github.com/pypeclub/OpenPype/pull/1515) ([mkolar](https://github.com/mkolar))
|
||||
- Convert pyblish logs to string at the moment of logging [\#1512](https://github.com/pypeclub/OpenPype/pull/1512) ([iLLiCiTiT](https://github.com/iLLiCiTiT))
|
||||
- 3.0 | nuke: fixing start\_at with option gui [\#1509](https://github.com/pypeclub/OpenPype/pull/1509) ([jezscha](https://github.com/jezscha))
|
||||
- Tests: fix pype -\> openpype to make tests work again [\#1508](https://github.com/pypeclub/OpenPype/pull/1508) ([antirotor](https://github.com/antirotor))
|
||||
|
||||
**Merged pull requests:**
|
||||
|
||||
- OpenPype: disable submodule update with `--no-submodule-update` [\#1525](https://github.com/pypeclub/OpenPype/pull/1525) ([antirotor](https://github.com/antirotor))
|
||||
- Ftrack without autosync in Pype 3 [\#1519](https://github.com/pypeclub/OpenPype/pull/1519) ([create-issue-branch[bot]](https://github.com/apps/create-issue-branch))
|
||||
- Feature Harmony setting cleanup documentation [\#1506](https://github.com/pypeclub/OpenPype/pull/1506) ([kalisp](https://github.com/kalisp))
|
||||
- Sync Server beginning of documentation [\#1471](https://github.com/pypeclub/OpenPype/pull/1471) ([kalisp](https://github.com/kalisp))
|
||||
- Blender: publish layout json [\#1348](https://github.com/pypeclub/OpenPype/pull/1348) ([create-issue-branch[bot]](https://github.com/apps/create-issue-branch))
|
||||
|
||||
## [2.18.0](https://github.com/pypeclub/openpype/tree/2.18.0) (2021-05-18)
|
||||
|
||||
[Full Changelog](https://github.com/pypeclub/openpype/compare/CI/3.0.0-rc.4...2.18.0)
|
||||
|
||||
**Implemented enhancements:**
|
||||
|
||||
- Default environments per host imlementation [\#1405](https://github.com/pypeclub/OpenPype/issues/1405)
|
||||
- Blender: publish layout json [\#1346](https://github.com/pypeclub/OpenPype/issues/1346)
|
||||
- Ftrack without autosync in Pype 3 [\#1128](https://github.com/pypeclub/OpenPype/issues/1128)
|
||||
- Launcher: started action indicator [\#1102](https://github.com/pypeclub/OpenPype/issues/1102)
|
||||
- Launch arguments of applications [\#1094](https://github.com/pypeclub/OpenPype/issues/1094)
|
||||
- Publish: instance info [\#724](https://github.com/pypeclub/OpenPype/issues/724)
|
||||
- Review: ability to control review length [\#482](https://github.com/pypeclub/OpenPype/issues/482)
|
||||
- Colorized recognition of creator result [\#394](https://github.com/pypeclub/OpenPype/issues/394)
|
||||
- event assign user to started task [\#49](https://github.com/pypeclub/OpenPype/issues/49)
|
||||
- rebuild containers from reference in maya [\#55](https://github.com/pypeclub/OpenPype/issues/55)
|
||||
- nuke Load metadata [\#66](https://github.com/pypeclub/OpenPype/issues/66)
|
||||
- Maya: Safer handling of expected render output names [\#1496](https://github.com/pypeclub/OpenPype/pull/1496) ([create-issue-branch[bot]](https://github.com/apps/create-issue-branch))
|
||||
- TVPaint: Increment workfile version on successfull publish. [\#1489](https://github.com/pypeclub/OpenPype/pull/1489) ([tokejepsen](https://github.com/tokejepsen))
|
||||
- Use SubsetLoader and multiple contexts for delete\_old\_versions [\#1484](https://github.com/pypeclub/OpenPype/pull/1484) ([tokejepsen](https://github.com/tokejepsen))
|
||||
- Maya: Use of multiple deadline servers [\#1483](https://github.com/pypeclub/OpenPype/pull/1483) ([antirotor](https://github.com/antirotor))
|
||||
|
||||
**Fixed bugs:**
|
||||
|
||||
- Igniter version resolution doesn't consider it's own version [\#1505](https://github.com/pypeclub/OpenPype/issues/1505)
|
||||
- Maya: Safer handling of expected render output names [\#1159](https://github.com/pypeclub/OpenPype/issues/1159)
|
||||
- Harmony: Invalid render output from non-conventionally named instance [\#871](https://github.com/pypeclub/OpenPype/issues/871)
|
||||
- Existing subsets hints in creator [\#1503](https://github.com/pypeclub/OpenPype/pull/1503) ([iLLiCiTiT](https://github.com/iLLiCiTiT))
|
||||
- nuke: space in node name breaking process [\#1494](https://github.com/pypeclub/OpenPype/pull/1494) ([jezscha](https://github.com/jezscha))
|
||||
- Maya: wrong collection of playblasted frames [\#1517](https://github.com/pypeclub/OpenPype/pull/1517) ([mkolar](https://github.com/mkolar))
|
||||
- Existing subsets hints in creator [\#1502](https://github.com/pypeclub/OpenPype/pull/1502) ([iLLiCiTiT](https://github.com/iLLiCiTiT))
|
||||
- Use instance frame start instead of timeline. [\#1486](https://github.com/pypeclub/OpenPype/pull/1486) ([tokejepsen](https://github.com/tokejepsen))
|
||||
- Maya: Redshift - set proper start frame on proxy [\#1480](https://github.com/pypeclub/OpenPype/pull/1480) ([antirotor](https://github.com/antirotor))
|
||||
|
||||
**Closed issues:**
|
||||
|
||||
- Nuke: wrong "star at" value on render load [\#1352](https://github.com/pypeclub/OpenPype/issues/1352)
|
||||
- DV Resolve - loading/updating - image video [\#915](https://github.com/pypeclub/OpenPype/issues/915)
|
||||
|
||||
**Merged pull requests:**
|
||||
|
||||
- nuke: fixing start\_at with option gui [\#1507](https://github.com/pypeclub/OpenPype/pull/1507) ([jezscha](https://github.com/jezscha))
|
||||
|
||||
## [CI/3.0.0-rc.4](https://github.com/pypeclub/openpype/tree/CI/3.0.0-rc.4) (2021-05-12)
|
||||
|
||||
[Full Changelog](https://github.com/pypeclub/openpype/compare/2.17.3...CI/3.0.0-rc.4)
|
||||
|
||||
**Implemented enhancements:**
|
||||
|
||||
- Resolve: documentation [\#1490](https://github.com/pypeclub/OpenPype/issues/1490)
|
||||
- Hiero: audio to review [\#1378](https://github.com/pypeclub/OpenPype/issues/1378)
|
||||
- nks color clips after publish [\#44](https://github.com/pypeclub/OpenPype/issues/44)
|
||||
- Store data from modifiable dict as list [\#1504](https://github.com/pypeclub/OpenPype/pull/1504) ([iLLiCiTiT](https://github.com/iLLiCiTiT))
|
||||
- Use SubsetLoader and multiple contexts for delete\_old\_versions [\#1497](https://github.com/pypeclub/OpenPype/pull/1497) ([iLLiCiTiT](https://github.com/iLLiCiTiT))
|
||||
- Hiero: publish audio and add to review [\#1493](https://github.com/pypeclub/OpenPype/pull/1493) ([create-issue-branch[bot]](https://github.com/apps/create-issue-branch))
|
||||
- Resolve: documentation [\#1491](https://github.com/pypeclub/OpenPype/pull/1491) ([create-issue-branch[bot]](https://github.com/apps/create-issue-branch))
|
||||
- Change integratenew template profiles setting [\#1487](https://github.com/pypeclub/OpenPype/pull/1487) ([kalisp](https://github.com/kalisp))
|
||||
- Settings tool cleanup [\#1477](https://github.com/pypeclub/OpenPype/pull/1477) ([iLLiCiTiT](https://github.com/iLLiCiTiT))
|
||||
- Sorted Applications and Tools in Custom attribute [\#1476](https://github.com/pypeclub/OpenPype/pull/1476) ([iLLiCiTiT](https://github.com/iLLiCiTiT))
|
||||
- PS - group all published instances [\#1416](https://github.com/pypeclub/OpenPype/pull/1416) ([kalisp](https://github.com/kalisp))
|
||||
- OpenPype: Support for Docker [\#1289](https://github.com/pypeclub/OpenPype/pull/1289) ([antirotor](https://github.com/antirotor))
|
||||
|
||||
**Fixed bugs:**
|
||||
|
||||
- Harmony: palettes publishing [\#1439](https://github.com/pypeclub/OpenPype/issues/1439)
|
||||
- Photoshop: validation for already created images [\#1435](https://github.com/pypeclub/OpenPype/issues/1435)
|
||||
- Nuke Extracts Thumbnail from frame out of shot range [\#963](https://github.com/pypeclub/OpenPype/issues/963)
|
||||
- Instance in same Context repairing [\#390](https://github.com/pypeclub/OpenPype/issues/390)
|
||||
- User Inactivity - Start timers sets wrong time [\#91](https://github.com/pypeclub/OpenPype/issues/91)
|
||||
- Use instance frame start instead of timeline [\#1499](https://github.com/pypeclub/OpenPype/pull/1499) ([mkolar](https://github.com/mkolar))
|
||||
- Various smaller fixes [\#1498](https://github.com/pypeclub/OpenPype/pull/1498) ([mkolar](https://github.com/mkolar))
|
||||
- nuke: space in node name breaking process [\#1495](https://github.com/pypeclub/OpenPype/pull/1495) ([jezscha](https://github.com/jezscha))
|
||||
- Codec determination in extract burnin [\#1492](https://github.com/pypeclub/OpenPype/pull/1492) ([iLLiCiTiT](https://github.com/iLLiCiTiT))
|
||||
- Undefined constant in subprocess module [\#1485](https://github.com/pypeclub/OpenPype/pull/1485) ([iLLiCiTiT](https://github.com/iLLiCiTiT))
|
||||
- List entity catch add/remove item changes properly [\#1482](https://github.com/pypeclub/OpenPype/pull/1482) ([iLLiCiTiT](https://github.com/iLLiCiTiT))
|
||||
- Resolve: additional fixes of publishing workflow [\#1481](https://github.com/pypeclub/OpenPype/pull/1481) ([jezscha](https://github.com/jezscha))
|
||||
- Photoshop: validation for already created images [\#1436](https://github.com/pypeclub/OpenPype/pull/1436) ([create-issue-branch[bot]](https://github.com/apps/create-issue-branch))
|
||||
|
||||
**Merged pull requests:**
|
||||
|
||||
- Maya: Support for looks on VRay Proxies [\#1443](https://github.com/pypeclub/OpenPype/pull/1443) ([antirotor](https://github.com/antirotor))
|
||||
|
||||
## [2.17.3](https://github.com/pypeclub/openpype/tree/2.17.3) (2021-05-06)
|
||||
|
||||
[Full Changelog](https://github.com/pypeclub/openpype/compare/CI/3.0.0-rc.3...2.17.3)
|
||||
|
||||
**Fixed bugs:**
|
||||
|
||||
- Nuke: workfile version synced to db version always [\#1479](https://github.com/pypeclub/OpenPype/pull/1479) ([jezscha](https://github.com/jezscha))
|
||||
|
||||
## [CI/3.0.0-rc.3](https://github.com/pypeclub/openpype/tree/CI/3.0.0-rc.3) (2021-05-05)
|
||||
|
||||
[Full Changelog](https://github.com/pypeclub/openpype/compare/CI/3.0.0-rc.2...CI/3.0.0-rc.3)
|
||||
|
||||
**Implemented enhancements:**
|
||||
|
||||
- Path entity with placeholder [\#1473](https://github.com/pypeclub/OpenPype/pull/1473) ([iLLiCiTiT](https://github.com/iLLiCiTiT))
|
||||
- Burnin custom font filepath [\#1472](https://github.com/pypeclub/OpenPype/pull/1472) ([iLLiCiTiT](https://github.com/iLLiCiTiT))
|
||||
- Poetry: Move to OpenPype [\#1449](https://github.com/pypeclub/OpenPype/pull/1449) ([antirotor](https://github.com/antirotor))
|
||||
|
||||
**Fixed bugs:**
|
||||
|
||||
- Mac SSL path needs to be relative to pype\_root [\#1469](https://github.com/pypeclub/OpenPype/issues/1469)
|
||||
- Resolve: fix loading clips to timeline [\#1421](https://github.com/pypeclub/OpenPype/issues/1421)
|
||||
- Wrong handling of slashes when loading on mac [\#1411](https://github.com/pypeclub/OpenPype/issues/1411)
|
||||
- Nuke openpype3 [\#1342](https://github.com/pypeclub/OpenPype/issues/1342)
|
||||
- Houdini launcher [\#1171](https://github.com/pypeclub/OpenPype/issues/1171)
|
||||
- Fix SyncServer get\_enabled\_projects should handle global state [\#1475](https://github.com/pypeclub/OpenPype/pull/1475) ([kalisp](https://github.com/kalisp))
|
||||
- Igniter buttons enable/disable fix [\#1474](https://github.com/pypeclub/OpenPype/pull/1474) ([iLLiCiTiT](https://github.com/iLLiCiTiT))
|
||||
- Mac SSL path needs to be relative to pype\_root [\#1470](https://github.com/pypeclub/OpenPype/pull/1470) ([create-issue-branch[bot]](https://github.com/apps/create-issue-branch))
|
||||
- Resolve: 17 compatibility issues and load image sequences [\#1422](https://github.com/pypeclub/OpenPype/pull/1422) ([create-issue-branch[bot]](https://github.com/apps/create-issue-branch))
|
||||
|
||||
## [CI/3.0.0-rc.2](https://github.com/pypeclub/openpype/tree/CI/3.0.0-rc.2) (2021-05-04)
|
||||
|
||||
[Full Changelog](https://github.com/pypeclub/openpype/compare/2.17.2...CI/3.0.0-rc.2)
|
||||
|
||||
**Implemented enhancements:**
|
||||
|
||||
- Extract burnins with sequences [\#1467](https://github.com/pypeclub/OpenPype/pull/1467) ([iLLiCiTiT](https://github.com/iLLiCiTiT))
|
||||
- Extract burnins with color setting [\#1466](https://github.com/pypeclub/OpenPype/pull/1466) ([iLLiCiTiT](https://github.com/iLLiCiTiT))
|
||||
|
||||
**Fixed bugs:**
|
||||
|
||||
- Fix groups check in Python 2 [\#1468](https://github.com/pypeclub/OpenPype/pull/1468) ([iLLiCiTiT](https://github.com/iLLiCiTiT))
|
||||
|
||||
## [2.17.2](https://github.com/pypeclub/openpype/tree/2.17.2) (2021-05-04)
|
||||
|
||||
[Full Changelog](https://github.com/pypeclub/openpype/compare/CI/3.0.0-rc.1...2.17.2)
|
||||
|
||||
**Implemented enhancements:**
|
||||
|
||||
- Forward/Backward compatible apps and tools with OpenPype 3 [\#1463](https://github.com/pypeclub/OpenPype/pull/1463) ([iLLiCiTiT](https://github.com/iLLiCiTiT))
|
||||
|
||||
## [CI/3.0.0-rc.1](https://github.com/pypeclub/openpype/tree/CI/3.0.0-rc.1) (2021-05-04)
|
||||
|
||||
[Full Changelog](https://github.com/pypeclub/openpype/compare/2.17.1...CI/3.0.0-rc.1)
|
||||
|
||||
**Implemented enhancements:**
|
||||
|
||||
- Only show studio settings to admins [\#1406](https://github.com/pypeclub/OpenPype/issues/1406)
|
||||
- Ftrack specific settings save warning messages [\#1458](https://github.com/pypeclub/OpenPype/pull/1458) ([iLLiCiTiT](https://github.com/iLLiCiTiT))
|
||||
- Faster settings actions [\#1446](https://github.com/pypeclub/OpenPype/pull/1446) ([iLLiCiTiT](https://github.com/iLLiCiTiT))
|
||||
- Feature/sync server priority [\#1444](https://github.com/pypeclub/OpenPype/pull/1444) ([kalisp](https://github.com/kalisp))
|
||||
- Faster settings UI loading [\#1442](https://github.com/pypeclub/OpenPype/pull/1442) ([iLLiCiTiT](https://github.com/iLLiCiTiT))
|
||||
- Igniter re-write [\#1441](https://github.com/pypeclub/OpenPype/pull/1441) ([mkolar](https://github.com/mkolar))
|
||||
- Wrap openpype build into installers [\#1419](https://github.com/pypeclub/OpenPype/pull/1419) ([create-issue-branch[bot]](https://github.com/apps/create-issue-branch))
|
||||
- Extract review first documentation [\#1404](https://github.com/pypeclub/OpenPype/pull/1404) ([iLLiCiTiT](https://github.com/iLLiCiTiT))
|
||||
- Blender PySide2 install guide [\#1403](https://github.com/pypeclub/OpenPype/pull/1403) ([iLLiCiTiT](https://github.com/iLLiCiTiT))
|
||||
- Nuke: deadline submission with gpu [\#1394](https://github.com/pypeclub/OpenPype/pull/1394) ([create-issue-branch[bot]](https://github.com/apps/create-issue-branch))
|
||||
- Igniter: Reverse item filter for OpenPype version [\#1349](https://github.com/pypeclub/OpenPype/pull/1349) ([antirotor](https://github.com/antirotor))
|
||||
|
||||
**Fixed bugs:**
|
||||
|
||||
- OpenPype Mongo URL definition [\#1450](https://github.com/pypeclub/OpenPype/issues/1450)
|
||||
- Various typos and smaller fixes [\#1464](https://github.com/pypeclub/OpenPype/pull/1464) ([mkolar](https://github.com/mkolar))
|
||||
- Validation of dynamic items in settings [\#1462](https://github.com/pypeclub/OpenPype/pull/1462) ([iLLiCiTiT](https://github.com/iLLiCiTiT))
|
||||
- List can handle new items correctly [\#1459](https://github.com/pypeclub/OpenPype/pull/1459) ([iLLiCiTiT](https://github.com/iLLiCiTiT))
|
||||
- Settings actions process fix [\#1457](https://github.com/pypeclub/OpenPype/pull/1457) ([iLLiCiTiT](https://github.com/iLLiCiTiT))
|
||||
- Add to overrides actions fix [\#1456](https://github.com/pypeclub/OpenPype/pull/1456) ([iLLiCiTiT](https://github.com/iLLiCiTiT))
|
||||
- OpenPype Mongo URL definition [\#1455](https://github.com/pypeclub/OpenPype/pull/1455) ([create-issue-branch[bot]](https://github.com/apps/create-issue-branch))
|
||||
- Global settings save/load out of system settings [\#1447](https://github.com/pypeclub/OpenPype/pull/1447) ([iLLiCiTiT](https://github.com/iLLiCiTiT))
|
||||
- Keep metadata on remove overrides [\#1445](https://github.com/pypeclub/OpenPype/pull/1445) ([iLLiCiTiT](https://github.com/iLLiCiTiT))
|
||||
- Nuke: fixing undo for loaded mov and sequence [\#1432](https://github.com/pypeclub/OpenPype/pull/1432) ([jezscha](https://github.com/jezscha))
|
||||
- ExtractReview skip empty strings from settings [\#1431](https://github.com/pypeclub/OpenPype/pull/1431) ([iLLiCiTiT](https://github.com/iLLiCiTiT))
|
||||
- Bugfix Sync server tweaks [\#1430](https://github.com/pypeclub/OpenPype/pull/1430) ([kalisp](https://github.com/kalisp))
|
||||
- Hiero: missing thumbnail in review [\#1429](https://github.com/pypeclub/OpenPype/pull/1429) ([create-issue-branch[bot]](https://github.com/apps/create-issue-branch))
|
||||
- Bugfix Maya in deadline for OpenPype [\#1428](https://github.com/pypeclub/OpenPype/pull/1428) ([kalisp](https://github.com/kalisp))
|
||||
- AE - validation for duration was 1 frame shorter [\#1427](https://github.com/pypeclub/OpenPype/pull/1427) ([kalisp](https://github.com/kalisp))
|
||||
- Houdini menu filename [\#1418](https://github.com/pypeclub/OpenPype/pull/1418) ([iLLiCiTiT](https://github.com/iLLiCiTiT))
|
||||
- Fix Avalon plugins attribute overrides [\#1413](https://github.com/pypeclub/OpenPype/pull/1413) ([iLLiCiTiT](https://github.com/iLLiCiTiT))
|
||||
- Nuke: submit to Deadline fails [\#1409](https://github.com/pypeclub/OpenPype/pull/1409) ([create-issue-branch[bot]](https://github.com/apps/create-issue-branch))
|
||||
- Validate MongoDB Url on start [\#1407](https://github.com/pypeclub/OpenPype/pull/1407) ([iLLiCiTiT](https://github.com/iLLiCiTiT))
|
||||
- Nuke: fix set colorspace with new settings [\#1386](https://github.com/pypeclub/OpenPype/pull/1386) ([create-issue-branch[bot]](https://github.com/apps/create-issue-branch))
|
||||
- MacOs build and install issues [\#1380](https://github.com/pypeclub/OpenPype/pull/1380) ([mkolar](https://github.com/mkolar))
|
||||
|
||||
**Closed issues:**
|
||||
|
||||
- test [\#1452](https://github.com/pypeclub/OpenPype/issues/1452)
|
||||
|
||||
**Merged pull requests:**
|
||||
|
||||
- TVPaint frame range definition [\#1425](https://github.com/pypeclub/OpenPype/pull/1425) ([iLLiCiTiT](https://github.com/iLLiCiTiT))
|
||||
- Only show studio settings to admins [\#1420](https://github.com/pypeclub/OpenPype/pull/1420) ([create-issue-branch[bot]](https://github.com/apps/create-issue-branch))
|
||||
- TVPaint documentation [\#1305](https://github.com/pypeclub/OpenPype/pull/1305) ([64qam](https://github.com/64qam))
|
||||
|
||||
## [2.17.1](https://github.com/pypeclub/openpype/tree/2.17.1) (2021-04-30)
|
||||
|
||||
[Full Changelog](https://github.com/pypeclub/openpype/compare/2.17.0...2.17.1)
|
||||
|
||||
**Enhancements:**
|
||||
|
||||
- Nuke: deadline submission with gpu [\#1414](https://github.com/pypeclub/OpenPype/pull/1414)
|
||||
- TVPaint frame range definition [\#1424](https://github.com/pypeclub/OpenPype/pull/1424)
|
||||
- PS - group all published instances [\#1415](https://github.com/pypeclub/OpenPype/pull/1415)
|
||||
- Add task name to context pop up. [\#1383](https://github.com/pypeclub/OpenPype/pull/1383)
|
||||
- Enhance review letterbox feature. [\#1371](https://github.com/pypeclub/OpenPype/pull/1371)
|
||||
|
||||
**Fixed bugs:**
|
||||
|
||||
- Houdini menu filename [\#1417](https://github.com/pypeclub/OpenPype/pull/1417)
|
||||
- AE - validation for duration was 1 frame shorter [\#1426](https://github.com/pypeclub/OpenPype/pull/1426)
|
||||
|
||||
**Merged pull requests:**
|
||||
|
||||
- Maya: Vray - problem getting all file nodes for look publishing [\#1399](https://github.com/pypeclub/OpenPype/pull/1399)
|
||||
- Maya: Support for Redshift proxies [\#1360](https://github.com/pypeclub/OpenPype/pull/1360)
|
||||
|
||||
## [2.17.0](https://github.com/pypeclub/openpype/tree/2.17.0) (2021-04-20)
|
||||
|
||||
[Full Changelog](https://github.com/pypeclub/openpype/compare/CI/3.0.0-beta.2...2.17.0)
|
||||
|
||||
**Enhancements:**
|
||||
|
||||
- Forward compatible ftrack group [\#1243](https://github.com/pypeclub/OpenPype/pull/1243)
|
||||
- Settings in mongo as dict [\#1221](https://github.com/pypeclub/OpenPype/pull/1221)
|
||||
- Maya: Make tx option configurable with presets [\#1328](https://github.com/pypeclub/OpenPype/pull/1328)
|
||||
- TVPaint asset name validation [\#1302](https://github.com/pypeclub/OpenPype/pull/1302)
|
||||
- TV Paint: Set initial project settings. [\#1299](https://github.com/pypeclub/OpenPype/pull/1299)
|
||||
- TV Paint: Validate mark in and out. [\#1298](https://github.com/pypeclub/OpenPype/pull/1298)
|
||||
- Validate project settings [\#1297](https://github.com/pypeclub/OpenPype/pull/1297)
|
||||
- After Effects: added SubsetManager [\#1234](https://github.com/pypeclub/OpenPype/pull/1234)
|
||||
- Show error message in pyblish UI [\#1206](https://github.com/pypeclub/OpenPype/pull/1206)
|
||||
|
||||
**Fixed bugs:**
|
||||
|
||||
- Hiero: fixing source frame from correct object [\#1362](https://github.com/pypeclub/OpenPype/pull/1362)
|
||||
- Nuke: fix colourspace, prerenders and nuke panes opening [\#1308](https://github.com/pypeclub/OpenPype/pull/1308)
|
||||
- AE remove orphaned instance from workfile - fix self.stub [\#1282](https://github.com/pypeclub/OpenPype/pull/1282)
|
||||
- Nuke: deadline submission with search replaced env values from preset [\#1194](https://github.com/pypeclub/OpenPype/pull/1194)
|
||||
- Ftrack custom attributes in bulks [\#1312](https://github.com/pypeclub/OpenPype/pull/1312)
|
||||
- Ftrack optional pypclub role [\#1303](https://github.com/pypeclub/OpenPype/pull/1303)
|
||||
- After Effects: remove orphaned instances [\#1275](https://github.com/pypeclub/OpenPype/pull/1275)
|
||||
- Avalon schema names [\#1242](https://github.com/pypeclub/OpenPype/pull/1242)
|
||||
- Handle duplication of Task name [\#1226](https://github.com/pypeclub/OpenPype/pull/1226)
|
||||
- Modified path of plugin loads for Harmony and TVPaint [\#1217](https://github.com/pypeclub/OpenPype/pull/1217)
|
||||
- Regex checks in profiles filtering [\#1214](https://github.com/pypeclub/OpenPype/pull/1214)
|
||||
- Bulk mov strict task [\#1204](https://github.com/pypeclub/OpenPype/pull/1204)
|
||||
- Update custom ftrack session attributes [\#1202](https://github.com/pypeclub/OpenPype/pull/1202)
|
||||
- Nuke: write node colorspace ignore `default\(\)` label [\#1199](https://github.com/pypeclub/OpenPype/pull/1199)
|
||||
- Nuke: reverse search to make it more versatile [\#1178](https://github.com/pypeclub/OpenPype/pull/1178)
|
||||
|
||||
|
||||
|
||||
## [2.16.0](https://github.com/pypeclub/pype/tree/2.16.0) (2021-03-22)
|
||||
|
||||
[Full Changelog](https://github.com/pypeclub/pype/compare/2.15.3...2.16.0)
|
||||
|
|
@ -1057,4 +1568,7 @@ A large cleanup release. Most of the change are under the hood.
|
|||
\* *This Changelog was automatically generated by [github_changelog_generator](https://github.com/github-changelog-generator/github-changelog-generator)*
|
||||
|
||||
|
||||
\* *This Changelog was automatically generated by [github_changelog_generator](https://github.com/github-changelog-generator/github-changelog-generator)*
|
||||
|
||||
|
||||
\* *This Changelog was automatically generated by [github_changelog_generator](https://github.com/github-changelog-generator/github-changelog-generator)*
|
||||
|
|
|
|||
|
|
@ -1,4 +1,4 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
"""Definition of Igniter version."""
|
||||
|
||||
__version__ = "1.0.0-rc1"
|
||||
__version__ = "1.0.0"
|
||||
|
|
|
|||
|
|
@ -26,7 +26,7 @@ def main(ctx):
|
|||
|
||||
@main.command()
|
||||
@click.option("-d", "--dev", is_flag=True, help="Settings in Dev mode")
|
||||
def settings(dev=False):
|
||||
def settings(dev):
|
||||
"""Show Pype Settings UI."""
|
||||
PypeCommands().launch_settings_gui(dev)
|
||||
|
||||
|
|
|
|||
|
|
@ -8,8 +8,19 @@ class AddLastWorkfileToLaunchArgs(PreLaunchHook):
|
|||
This is not possible to do for all applications the same way.
|
||||
"""
|
||||
|
||||
order = 0
|
||||
app_groups = ["maya", "nuke", "nukex", "hiero", "nukestudio"]
|
||||
# Execute after workfile template copy
|
||||
order = 10
|
||||
app_groups = [
|
||||
"maya",
|
||||
"nuke",
|
||||
"nukex",
|
||||
"hiero",
|
||||
"nukestudio",
|
||||
"blender",
|
||||
"photoshop",
|
||||
"tvpaint",
|
||||
"afftereffects"
|
||||
]
|
||||
|
||||
def execute(self):
|
||||
if not self.data.get("start_last_workfile"):
|
||||
|
|
|
|||
127
openpype/hooks/pre_copy_template_workfile.py
Normal file
127
openpype/hooks/pre_copy_template_workfile.py
Normal file
|
|
@ -0,0 +1,127 @@
|
|||
import os
|
||||
import shutil
|
||||
from openpype.lib import (
|
||||
PreLaunchHook,
|
||||
get_custom_workfile_template_by_context,
|
||||
get_custom_workfile_template_by_string_context
|
||||
)
|
||||
from openpype.settings import get_project_settings
|
||||
|
||||
|
||||
class CopyTemplateWorkfile(PreLaunchHook):
|
||||
"""Copy workfile template.
|
||||
|
||||
This is not possible to do for all applications the same way.
|
||||
|
||||
Prelaunch hook works only if last workfile leads to not existing file.
|
||||
- That is possible only if it's first version.
|
||||
"""
|
||||
|
||||
# Before `AddLastWorkfileToLaunchArgs`
|
||||
order = 0
|
||||
app_groups = ["blender", "photoshop", "tvpaint", "afftereffects"]
|
||||
|
||||
def execute(self):
|
||||
"""Check if can copy template for context and do it if possible.
|
||||
|
||||
First check if host for current project should create first workfile.
|
||||
Second check is if template is reachable and can be copied.
|
||||
|
||||
Args:
|
||||
last_workfile(str): Path where template will be copied.
|
||||
|
||||
Returns:
|
||||
None: This is a void method.
|
||||
"""
|
||||
|
||||
last_workfile = self.data.get("last_workfile_path")
|
||||
if not last_workfile:
|
||||
self.log.warning((
|
||||
"Last workfile was not collected."
|
||||
" Can't add it to launch arguments or determine if should"
|
||||
" copy template."
|
||||
))
|
||||
return
|
||||
|
||||
if os.path.exists(last_workfile):
|
||||
self.log.debug("Last workfile exits. Skipping {} process.".format(
|
||||
self.__class__.__name__
|
||||
))
|
||||
return
|
||||
|
||||
self.log.info("Last workfile does not exits.")
|
||||
|
||||
project_name = self.data["project_name"]
|
||||
asset_name = self.data["asset_name"]
|
||||
task_name = self.data["task_name"]
|
||||
|
||||
project_settings = get_project_settings(project_name)
|
||||
host_settings = project_settings[self.application.host_name]
|
||||
|
||||
workfile_builder_settings = host_settings.get("workfile_builder")
|
||||
if not workfile_builder_settings:
|
||||
# TODO remove warning when deprecated
|
||||
self.log.warning((
|
||||
"Seems like old version of settings is used."
|
||||
" Can't access custom templates in host \"{}\"."
|
||||
).format(self.application.full_label))
|
||||
return
|
||||
|
||||
if not workfile_builder_settings["create_first_version"]:
|
||||
self.log.info((
|
||||
"Project \"{}\" has turned off to create first workfile for"
|
||||
" application \"{}\""
|
||||
).format(project_name, self.application.full_label))
|
||||
return
|
||||
|
||||
# Backwards compatibility
|
||||
template_profiles = workfile_builder_settings.get("custom_templates")
|
||||
if not template_profiles:
|
||||
self.log.info(
|
||||
"Custom templates are not filled. Skipping template copy."
|
||||
)
|
||||
return
|
||||
|
||||
project_doc = self.data.get("project_doc")
|
||||
asset_doc = self.data.get("asset_doc")
|
||||
anatomy = self.data.get("anatomy")
|
||||
if project_doc and asset_doc:
|
||||
self.log.debug("Started filtering of custom template paths.")
|
||||
template_path = get_custom_workfile_template_by_context(
|
||||
template_profiles, project_doc, asset_doc, task_name, anatomy
|
||||
)
|
||||
|
||||
else:
|
||||
self.log.warning((
|
||||
"Global data collection probably did not execute."
|
||||
" Using backup solution."
|
||||
))
|
||||
dbcon = self.data.get("dbcon")
|
||||
template_path = get_custom_workfile_template_by_string_context(
|
||||
template_profiles, project_name, asset_name, task_name,
|
||||
dbcon, anatomy
|
||||
)
|
||||
|
||||
if not template_path:
|
||||
self.log.info(
|
||||
"Registered custom templates didn't match current context."
|
||||
)
|
||||
return
|
||||
|
||||
if not os.path.exists(template_path):
|
||||
self.log.warning(
|
||||
"Couldn't find workfile template file \"{}\"".format(
|
||||
template_path
|
||||
)
|
||||
)
|
||||
return
|
||||
|
||||
self.log.info(
|
||||
f"Creating workfile from template: \"{template_path}\""
|
||||
)
|
||||
|
||||
# Copy template workfile to new destinantion
|
||||
shutil.copy2(
|
||||
os.path.normpath(template_path),
|
||||
os.path.normpath(last_workfile)
|
||||
)
|
||||
|
|
@ -1,4 +1,5 @@
|
|||
import os
|
||||
import subprocess
|
||||
|
||||
from openpype.lib import (
|
||||
PreLaunchHook,
|
||||
|
|
@ -17,6 +18,8 @@ class NonPythonHostHook(PreLaunchHook):
|
|||
"""
|
||||
app_groups = ["harmony", "photoshop", "aftereffects"]
|
||||
|
||||
order = 20
|
||||
|
||||
def execute(self):
|
||||
# Pop executable
|
||||
executable_path = self.launch_context.launch_args.pop(0)
|
||||
|
|
@ -45,3 +48,6 @@ class NonPythonHostHook(PreLaunchHook):
|
|||
|
||||
if remainders:
|
||||
self.launch_context.launch_args.extend(remainders)
|
||||
|
||||
self.launch_context.kwargs["stdout"] = subprocess.DEVNULL
|
||||
self.launch_context.kwargs["stderr"] = subprocess.STDOUT
|
||||
|
|
|
|||
|
|
@ -11,12 +11,14 @@ class LaunchWithWindowsShell(PreLaunchHook):
|
|||
instead.
|
||||
"""
|
||||
|
||||
# Should be as last hook becuase must change launch arguments to string
|
||||
# Should be as last hook because must change launch arguments to string
|
||||
order = 1000
|
||||
app_groups = ["nuke", "nukex", "hiero", "nukestudio"]
|
||||
platforms = ["windows"]
|
||||
|
||||
def execute(self):
|
||||
launch_args = self.launch_context.clear_launch_args(
|
||||
self.launch_context.launch_args)
|
||||
new_args = [
|
||||
# Get comspec which is cmd.exe in most cases.
|
||||
os.environ.get("COMSPEC", "cmd.exe"),
|
||||
|
|
@ -24,7 +26,7 @@ class LaunchWithWindowsShell(PreLaunchHook):
|
|||
"/c",
|
||||
# Convert arguments to command line arguments (as string)
|
||||
"\"{}\"".format(
|
||||
subprocess.list2cmdline(self.launch_context.launch_args)
|
||||
subprocess.list2cmdline(launch_args)
|
||||
)
|
||||
]
|
||||
# Convert list to string
|
||||
|
|
|
|||
218
openpype/hosts/blender/plugins/load/load_look.py
Normal file
218
openpype/hosts/blender/plugins/load/load_look.py
Normal file
|
|
@ -0,0 +1,218 @@
|
|||
"""Load a model asset in Blender."""
|
||||
|
||||
from pathlib import Path
|
||||
from pprint import pformat
|
||||
from typing import Dict, List, Optional
|
||||
|
||||
import os
|
||||
import json
|
||||
import bpy
|
||||
|
||||
from avalon import api, blender
|
||||
import openpype.hosts.blender.api.plugin as plugin
|
||||
|
||||
|
||||
class BlendLookLoader(plugin.AssetLoader):
|
||||
"""Load models from a .blend file.
|
||||
|
||||
Because they come from a .blend file we can simply link the collection that
|
||||
contains the model. There is no further need to 'containerise' it.
|
||||
"""
|
||||
|
||||
families = ["look"]
|
||||
representations = ["json"]
|
||||
|
||||
label = "Load Look"
|
||||
icon = "code-fork"
|
||||
color = "orange"
|
||||
|
||||
def get_all_children(self, obj):
|
||||
children = list(obj.children)
|
||||
|
||||
for child in children:
|
||||
children.extend(child.children)
|
||||
|
||||
return children
|
||||
|
||||
def _process(self, libpath, container_name, objects):
|
||||
with open(libpath, "r") as fp:
|
||||
data = json.load(fp)
|
||||
|
||||
path = os.path.dirname(libpath)
|
||||
materials_path = f"{path}/resources"
|
||||
|
||||
materials = []
|
||||
|
||||
for entry in data:
|
||||
file = entry.get('fbx_filename')
|
||||
if file is None:
|
||||
continue
|
||||
|
||||
bpy.ops.import_scene.fbx(filepath=f"{materials_path}/{file}")
|
||||
|
||||
mesh = [o for o in bpy.context.scene.objects if o.select_get()][0]
|
||||
material = mesh.data.materials[0]
|
||||
material.name = f"{material.name}:{container_name}"
|
||||
|
||||
texture_file = entry.get('tga_filename')
|
||||
if texture_file:
|
||||
node_tree = material.node_tree
|
||||
pbsdf = node_tree.nodes['Principled BSDF']
|
||||
base_color = pbsdf.inputs[0]
|
||||
tex_node = base_color.links[0].from_node
|
||||
tex_node.image.filepath = f"{materials_path}/{texture_file}"
|
||||
|
||||
materials.append(material)
|
||||
|
||||
for obj in objects:
|
||||
for child in self.get_all_children(obj):
|
||||
mesh_name = child.name.split(':')[0]
|
||||
if mesh_name == material.name.split(':')[0]:
|
||||
child.data.materials.clear()
|
||||
child.data.materials.append(material)
|
||||
break
|
||||
|
||||
bpy.data.objects.remove(mesh)
|
||||
|
||||
return materials, objects
|
||||
|
||||
def process_asset(
|
||||
self, context: dict, name: str, namespace: Optional[str] = None,
|
||||
options: Optional[Dict] = None
|
||||
) -> Optional[List]:
|
||||
"""
|
||||
Arguments:
|
||||
name: Use pre-defined name
|
||||
namespace: Use pre-defined namespace
|
||||
context: Full parenthood of representation to load
|
||||
options: Additional settings dictionary
|
||||
"""
|
||||
|
||||
libpath = self.fname
|
||||
asset = context["asset"]["name"]
|
||||
subset = context["subset"]["name"]
|
||||
|
||||
lib_container = plugin.asset_name(
|
||||
asset, subset
|
||||
)
|
||||
unique_number = plugin.get_unique_number(
|
||||
asset, subset
|
||||
)
|
||||
namespace = namespace or f"{asset}_{unique_number}"
|
||||
container_name = plugin.asset_name(
|
||||
asset, subset, unique_number
|
||||
)
|
||||
|
||||
container = bpy.data.collections.new(lib_container)
|
||||
container.name = container_name
|
||||
blender.pipeline.containerise_existing(
|
||||
container,
|
||||
name,
|
||||
namespace,
|
||||
context,
|
||||
self.__class__.__name__,
|
||||
)
|
||||
|
||||
metadata = container.get(blender.pipeline.AVALON_PROPERTY)
|
||||
|
||||
metadata["libpath"] = libpath
|
||||
metadata["lib_container"] = lib_container
|
||||
|
||||
selected = [o for o in bpy.context.scene.objects if o.select_get()]
|
||||
|
||||
materials, objects = self._process(libpath, container_name, selected)
|
||||
|
||||
# Save the list of imported materials in the metadata container
|
||||
metadata["objects"] = objects
|
||||
metadata["materials"] = materials
|
||||
|
||||
metadata["parent"] = str(context["representation"]["parent"])
|
||||
metadata["family"] = context["representation"]["context"]["family"]
|
||||
|
||||
nodes = list(container.objects)
|
||||
nodes.append(container)
|
||||
self[:] = nodes
|
||||
return nodes
|
||||
|
||||
def update(self, container: Dict, representation: Dict):
|
||||
collection = bpy.data.collections.get(container["objectName"])
|
||||
libpath = Path(api.get_representation_path(representation))
|
||||
extension = libpath.suffix.lower()
|
||||
|
||||
self.log.info(
|
||||
"Container: %s\nRepresentation: %s",
|
||||
pformat(container, indent=2),
|
||||
pformat(representation, indent=2),
|
||||
)
|
||||
|
||||
assert collection, (
|
||||
f"The asset is not loaded: {container['objectName']}"
|
||||
)
|
||||
assert not (collection.children), (
|
||||
"Nested collections are not supported."
|
||||
)
|
||||
assert libpath, (
|
||||
"No existing library file found for {container['objectName']}"
|
||||
)
|
||||
assert libpath.is_file(), (
|
||||
f"The file doesn't exist: {libpath}"
|
||||
)
|
||||
assert extension in plugin.VALID_EXTENSIONS, (
|
||||
f"Unsupported file: {libpath}"
|
||||
)
|
||||
|
||||
collection_metadata = collection.get(blender.pipeline.AVALON_PROPERTY)
|
||||
collection_libpath = collection_metadata["libpath"]
|
||||
|
||||
normalized_collection_libpath = (
|
||||
str(Path(bpy.path.abspath(collection_libpath)).resolve())
|
||||
)
|
||||
normalized_libpath = (
|
||||
str(Path(bpy.path.abspath(str(libpath))).resolve())
|
||||
)
|
||||
self.log.debug(
|
||||
"normalized_collection_libpath:\n %s\nnormalized_libpath:\n %s",
|
||||
normalized_collection_libpath,
|
||||
normalized_libpath,
|
||||
)
|
||||
if normalized_collection_libpath == normalized_libpath:
|
||||
self.log.info("Library already loaded, not updating...")
|
||||
return
|
||||
|
||||
for obj in collection_metadata['objects']:
|
||||
for child in self.get_all_children(obj):
|
||||
child.data.materials.clear()
|
||||
|
||||
for material in collection_metadata['materials']:
|
||||
bpy.data.materials.remove(material)
|
||||
|
||||
namespace = collection_metadata['namespace']
|
||||
name = collection_metadata['name']
|
||||
|
||||
container_name = f"{namespace}_{name}"
|
||||
|
||||
materials, objects = self._process(
|
||||
libpath, container_name, collection_metadata['objects'])
|
||||
|
||||
collection_metadata["objects"] = objects
|
||||
collection_metadata["materials"] = materials
|
||||
collection_metadata["libpath"] = str(libpath)
|
||||
collection_metadata["representation"] = str(representation["_id"])
|
||||
|
||||
def remove(self, container: Dict) -> bool:
|
||||
collection = bpy.data.collections.get(container["objectName"])
|
||||
if not collection:
|
||||
return False
|
||||
|
||||
collection_metadata = collection.get(blender.pipeline.AVALON_PROPERTY)
|
||||
|
||||
for obj in collection_metadata['objects']:
|
||||
for child in self.get_all_children(obj):
|
||||
child.data.materials.clear()
|
||||
|
||||
for material in collection_metadata['materials']:
|
||||
bpy.data.materials.remove(material)
|
||||
|
||||
bpy.data.collections.remove(collection)
|
||||
|
||||
return True
|
||||
|
|
@ -40,6 +40,14 @@ class ExtractFBX(api.Extractor):
|
|||
context = plugin.create_blender_context(
|
||||
active=asset_group, selected=selected)
|
||||
|
||||
new_materials = []
|
||||
|
||||
for obj in collections[0].all_objects:
|
||||
if obj.type == 'MESH':
|
||||
mat = bpy.data.materials.new(obj.name)
|
||||
obj.data.materials.append(mat)
|
||||
new_materials.append(mat)
|
||||
|
||||
# We export the fbx
|
||||
bpy.ops.export_scene.fbx(
|
||||
context,
|
||||
|
|
@ -52,6 +60,13 @@ class ExtractFBX(api.Extractor):
|
|||
|
||||
bpy.ops.object.select_all(action='DESELECT')
|
||||
|
||||
for mat in new_materials:
|
||||
bpy.data.materials.remove(mat)
|
||||
|
||||
for obj in collections[0].all_objects:
|
||||
if obj.type == 'MESH':
|
||||
obj.data.materials.pop()
|
||||
|
||||
if "representations" not in instance.data:
|
||||
instance.data["representations"] = []
|
||||
|
||||
|
|
|
|||
|
|
@ -62,6 +62,76 @@ def _get_metadata(item):
|
|||
return {}
|
||||
|
||||
|
||||
def create_time_effects(otio_clip, track_item):
|
||||
# get all subtrack items
|
||||
subTrackItems = flatten(track_item.parent().subTrackItems())
|
||||
speed = track_item.playbackSpeed()
|
||||
|
||||
otio_effect = None
|
||||
# retime on track item
|
||||
if speed != 1.:
|
||||
# make effect
|
||||
otio_effect = otio.schema.LinearTimeWarp()
|
||||
otio_effect.name = "Speed"
|
||||
otio_effect.time_scalar = speed
|
||||
otio_effect.metadata = {}
|
||||
|
||||
# freeze frame effect
|
||||
if speed == 0.:
|
||||
otio_effect = otio.schema.FreezeFrame()
|
||||
otio_effect.name = "FreezeFrame"
|
||||
otio_effect.metadata = {}
|
||||
|
||||
if otio_effect:
|
||||
# add otio effect to clip effects
|
||||
otio_clip.effects.append(otio_effect)
|
||||
|
||||
# loop trought and get all Timewarps
|
||||
for effect in subTrackItems:
|
||||
if ((track_item not in effect.linkedItems())
|
||||
and (len(effect.linkedItems()) > 0)):
|
||||
continue
|
||||
# avoid all effect which are not TimeWarp and disabled
|
||||
if "TimeWarp" not in effect.name():
|
||||
continue
|
||||
|
||||
if not effect.isEnabled():
|
||||
continue
|
||||
|
||||
node = effect.node()
|
||||
name = node["name"].value()
|
||||
|
||||
# solve effect class as effect name
|
||||
_name = effect.name()
|
||||
if "_" in _name:
|
||||
effect_name = re.sub(r"(?:_)[_0-9]+", "", _name) # more numbers
|
||||
else:
|
||||
effect_name = re.sub(r"\d+", "", _name) # one number
|
||||
|
||||
metadata = {}
|
||||
# add knob to metadata
|
||||
for knob in ["lookup", "length"]:
|
||||
value = node[knob].value()
|
||||
animated = node[knob].isAnimated()
|
||||
if animated:
|
||||
value = [
|
||||
((node[knob].getValueAt(i)) - i)
|
||||
for i in range(
|
||||
track_item.timelineIn(), track_item.timelineOut() + 1)
|
||||
]
|
||||
|
||||
metadata[knob] = value
|
||||
|
||||
# make effect
|
||||
otio_effect = otio.schema.TimeEffect()
|
||||
otio_effect.name = name
|
||||
otio_effect.effect_name = effect_name
|
||||
otio_effect.metadata = metadata
|
||||
|
||||
# add otio effect to clip effects
|
||||
otio_clip.effects.append(otio_effect)
|
||||
|
||||
|
||||
def create_otio_reference(clip):
|
||||
metadata = _get_metadata(clip)
|
||||
media_source = clip.mediaSource()
|
||||
|
|
@ -197,8 +267,12 @@ def create_otio_markers(otio_item, item):
|
|||
|
||||
def create_otio_clip(track_item):
|
||||
clip = track_item.source()
|
||||
source_in = track_item.sourceIn()
|
||||
duration = track_item.sourceDuration()
|
||||
speed = track_item.playbackSpeed()
|
||||
# flip if speed is in minus
|
||||
source_in = track_item.sourceIn() if speed > 0 else track_item.sourceOut()
|
||||
|
||||
duration = int(track_item.duration())
|
||||
|
||||
fps = utils.get_rate(track_item) or self.project_fps
|
||||
name = track_item.name()
|
||||
|
||||
|
|
@ -220,6 +294,11 @@ def create_otio_clip(track_item):
|
|||
create_otio_markers(otio_clip, track_item)
|
||||
create_otio_markers(otio_clip, track_item.source())
|
||||
|
||||
# only if video
|
||||
if not clip.mediaSource().hasAudio():
|
||||
# Add effects to clips
|
||||
create_time_effects(otio_clip, track_item)
|
||||
|
||||
return otio_clip
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -1,121 +0,0 @@
|
|||
from pyblish import api
|
||||
import hiero
|
||||
import math
|
||||
|
||||
|
||||
class CollectCalculateRetime(api.InstancePlugin):
|
||||
"""Calculate Retiming of selected track items."""
|
||||
|
||||
order = api.CollectorOrder + 0.02
|
||||
label = "Collect Calculate Retiming"
|
||||
hosts = ["hiero"]
|
||||
families = ['retime']
|
||||
|
||||
def process(self, instance):
|
||||
margin_in = instance.data["retimeMarginIn"]
|
||||
margin_out = instance.data["retimeMarginOut"]
|
||||
self.log.debug("margin_in: '{0}', margin_out: '{1}'".format(margin_in, margin_out))
|
||||
|
||||
handle_start = instance.data["handleStart"]
|
||||
handle_end = instance.data["handleEnd"]
|
||||
|
||||
track_item = instance.data["item"]
|
||||
|
||||
# define basic clip frame range variables
|
||||
timeline_in = int(track_item.timelineIn())
|
||||
timeline_out = int(track_item.timelineOut())
|
||||
source_in = int(track_item.sourceIn())
|
||||
source_out = int(track_item.sourceOut())
|
||||
speed = track_item.playbackSpeed()
|
||||
self.log.debug("_BEFORE: \n timeline_in: `{0}`,\n timeline_out: `{1}`,\
|
||||
\n source_in: `{2}`,\n source_out: `{3}`,\n speed: `{4}`,\n handle_start: `{5}`,\n handle_end: `{6}`".format(
|
||||
timeline_in,
|
||||
timeline_out,
|
||||
source_in,
|
||||
source_out,
|
||||
speed,
|
||||
handle_start,
|
||||
handle_end
|
||||
))
|
||||
|
||||
# loop withing subtrack items
|
||||
source_in_change = 0
|
||||
source_out_change = 0
|
||||
for s_track_item in track_item.linkedItems():
|
||||
if isinstance(s_track_item, hiero.core.EffectTrackItem) \
|
||||
and "TimeWarp" in s_track_item.node().Class():
|
||||
|
||||
# adding timewarp attribute to instance
|
||||
if not instance.data.get("timeWarpNodes", None):
|
||||
instance.data["timeWarpNodes"] = list()
|
||||
|
||||
# ignore item if not enabled
|
||||
if s_track_item.isEnabled():
|
||||
node = s_track_item.node()
|
||||
name = node["name"].value()
|
||||
look_up = node["lookup"].value()
|
||||
animated = node["lookup"].isAnimated()
|
||||
if animated:
|
||||
look_up = [((node["lookup"].getValueAt(i)) - i)
|
||||
for i in range((timeline_in - handle_start), (timeline_out + handle_end) + 1)
|
||||
]
|
||||
# calculate differnce
|
||||
diff_in = (node["lookup"].getValueAt(
|
||||
timeline_in)) - timeline_in
|
||||
diff_out = (node["lookup"].getValueAt(
|
||||
timeline_out)) - timeline_out
|
||||
|
||||
# calculate source
|
||||
source_in_change += diff_in
|
||||
source_out_change += diff_out
|
||||
|
||||
# calculate speed
|
||||
speed_in = (node["lookup"].getValueAt(timeline_in) / (
|
||||
float(timeline_in) * .01)) * .01
|
||||
speed_out = (node["lookup"].getValueAt(timeline_out) / (
|
||||
float(timeline_out) * .01)) * .01
|
||||
|
||||
# calculate handles
|
||||
handle_start = int(
|
||||
math.ceil(
|
||||
(handle_start * speed_in * 1000) / 1000.0)
|
||||
)
|
||||
|
||||
handle_end = int(
|
||||
math.ceil(
|
||||
(handle_end * speed_out * 1000) / 1000.0)
|
||||
)
|
||||
self.log.debug(
|
||||
("diff_in, diff_out", diff_in, diff_out))
|
||||
self.log.debug(
|
||||
("source_in_change, source_out_change", source_in_change, source_out_change))
|
||||
|
||||
instance.data["timeWarpNodes"].append({"Class": "TimeWarp",
|
||||
"name": name,
|
||||
"lookup": look_up})
|
||||
|
||||
self.log.debug((source_in_change, source_out_change))
|
||||
# recalculate handles by the speed
|
||||
handle_start *= speed
|
||||
handle_end *= speed
|
||||
self.log.debug("speed: handle_start: '{0}', handle_end: '{1}'".format(handle_start, handle_end))
|
||||
|
||||
source_in += int(source_in_change)
|
||||
source_out += int(source_out_change * speed)
|
||||
handle_start += (margin_in)
|
||||
handle_end += (margin_out)
|
||||
self.log.debug("margin: handle_start: '{0}', handle_end: '{1}'".format(handle_start, handle_end))
|
||||
|
||||
# add all data to Instance
|
||||
instance.data["sourceIn"] = source_in
|
||||
instance.data["sourceOut"] = source_out
|
||||
instance.data["sourceInH"] = int(source_in - math.ceil(
|
||||
(handle_start * 1000) / 1000.0))
|
||||
instance.data["sourceOutH"] = int(source_out + math.ceil(
|
||||
(handle_end * 1000) / 1000.0))
|
||||
instance.data["speed"] = speed
|
||||
|
||||
self.log.debug("timeWarpNodes: {}".format(instance.data["timeWarpNodes"]))
|
||||
self.log.debug("sourceIn: {}".format(instance.data["sourceIn"]))
|
||||
self.log.debug("sourceOut: {}".format(instance.data["sourceOut"]))
|
||||
self.log.debug("speed: {}".format(instance.data["speed"]))
|
||||
|
|
@ -1,23 +0,0 @@
|
|||
from pyblish import api
|
||||
|
||||
|
||||
class CollectFramerate(api.ContextPlugin):
|
||||
"""Collect framerate from selected sequence."""
|
||||
|
||||
order = api.CollectorOrder + 0.001
|
||||
label = "Collect Framerate"
|
||||
hosts = ["hiero"]
|
||||
|
||||
def process(self, context):
|
||||
sequence = context.data["activeSequence"]
|
||||
context.data["fps"] = self.get_rate(sequence)
|
||||
self.log.info("Framerate is collected: {}".format(context.data["fps"]))
|
||||
|
||||
def get_rate(self, sequence):
|
||||
num, den = sequence.framerate().toRational()
|
||||
rate = float(num) / float(den)
|
||||
|
||||
if rate.is_integer():
|
||||
return rate
|
||||
|
||||
return round(rate, 3)
|
||||
|
|
@ -1,30 +0,0 @@
|
|||
from pyblish import api
|
||||
|
||||
|
||||
class CollectClipMetadata(api.InstancePlugin):
|
||||
"""Collect Metadata from selected track items."""
|
||||
|
||||
order = api.CollectorOrder + 0.01
|
||||
label = "Collect Metadata"
|
||||
hosts = ["hiero"]
|
||||
|
||||
def process(self, instance):
|
||||
item = instance.data["item"]
|
||||
ti_metadata = self.metadata_to_string(dict(item.metadata()))
|
||||
ms_metadata = self.metadata_to_string(
|
||||
dict(item.source().mediaSource().metadata()))
|
||||
|
||||
instance.data["clipMetadata"] = ti_metadata
|
||||
instance.data["mediaSourceMetadata"] = ms_metadata
|
||||
|
||||
self.log.info(instance.data["clipMetadata"])
|
||||
self.log.info(instance.data["mediaSourceMetadata"])
|
||||
return
|
||||
|
||||
def metadata_to_string(self, metadata):
|
||||
data = dict()
|
||||
for k, v in metadata.items():
|
||||
if v not in ["-", ""]:
|
||||
data[str(k)] = v
|
||||
|
||||
return data
|
||||
|
|
@ -1,90 +0,0 @@
|
|||
import pyblish.api
|
||||
import opentimelineio.opentime as otio_ot
|
||||
|
||||
|
||||
class CollectClipTimecodes(pyblish.api.InstancePlugin):
|
||||
"""Collect time with OpenTimelineIO:
|
||||
source_h(In,Out)[timecode, sec]
|
||||
timeline(In,Out)[timecode, sec]
|
||||
"""
|
||||
|
||||
order = pyblish.api.CollectorOrder + 0.101
|
||||
label = "Collect Timecodes"
|
||||
hosts = ["hiero"]
|
||||
|
||||
def process(self, instance):
|
||||
|
||||
data = dict()
|
||||
self.log.debug("__ instance.data: {}".format(instance.data))
|
||||
# Timeline data.
|
||||
handle_start = instance.data["handleStart"]
|
||||
handle_end = instance.data["handleEnd"]
|
||||
|
||||
source_in_h = instance.data("sourceInH",
|
||||
instance.data("sourceIn") - handle_start)
|
||||
source_out_h = instance.data("sourceOutH",
|
||||
instance.data("sourceOut") + handle_end)
|
||||
|
||||
timeline_in = instance.data["clipIn"]
|
||||
timeline_out = instance.data["clipOut"]
|
||||
|
||||
# set frame start with tag or take it from timeline
|
||||
frame_start = instance.data.get("startingFrame")
|
||||
|
||||
if not frame_start:
|
||||
frame_start = timeline_in
|
||||
|
||||
source = instance.data.get("source")
|
||||
|
||||
otio_data = dict()
|
||||
self.log.debug("__ source: `{}`".format(source))
|
||||
|
||||
rate_fps = instance.context.data["fps"]
|
||||
|
||||
otio_in_h_ratio = otio_ot.RationalTime(
|
||||
value=(source.timecodeStart() + (
|
||||
source_in_h + (source_out_h - source_in_h))),
|
||||
rate=rate_fps)
|
||||
|
||||
otio_out_h_ratio = otio_ot.RationalTime(
|
||||
value=(source.timecodeStart() + source_in_h),
|
||||
rate=rate_fps)
|
||||
|
||||
otio_timeline_in_ratio = otio_ot.RationalTime(
|
||||
value=int(
|
||||
instance.data.get("timelineTimecodeStart", 0)) + timeline_in,
|
||||
rate=rate_fps)
|
||||
|
||||
otio_timeline_out_ratio = otio_ot.RationalTime(
|
||||
value=int(
|
||||
instance.data.get("timelineTimecodeStart", 0)) + timeline_out,
|
||||
rate=rate_fps)
|
||||
|
||||
otio_data.update({
|
||||
|
||||
"otioClipInHTimecode": otio_ot.to_timecode(otio_in_h_ratio),
|
||||
|
||||
"otioClipOutHTimecode": otio_ot.to_timecode(otio_out_h_ratio),
|
||||
|
||||
"otioClipInHSec": otio_ot.to_seconds(otio_in_h_ratio),
|
||||
|
||||
"otioClipOutHSec": otio_ot.to_seconds(otio_out_h_ratio),
|
||||
|
||||
"otioTimelineInTimecode": otio_ot.to_timecode(
|
||||
otio_timeline_in_ratio),
|
||||
|
||||
"otioTimelineOutTimecode": otio_ot.to_timecode(
|
||||
otio_timeline_out_ratio),
|
||||
|
||||
"otioTimelineInSec": otio_ot.to_seconds(otio_timeline_in_ratio),
|
||||
|
||||
"otioTimelineOutSec": otio_ot.to_seconds(otio_timeline_out_ratio)
|
||||
})
|
||||
|
||||
data.update({
|
||||
"otioData": otio_data,
|
||||
"sourceTimecodeIn": otio_ot.to_timecode(otio_in_h_ratio),
|
||||
"sourceTimecodeOut": otio_ot.to_timecode(otio_out_h_ratio)
|
||||
})
|
||||
instance.data.update(data)
|
||||
self.log.debug("data: {}".format(instance.data))
|
||||
|
|
@ -6,7 +6,7 @@ class PreCollectClipEffects(pyblish.api.InstancePlugin):
|
|||
"""Collect soft effects instances."""
|
||||
|
||||
order = pyblish.api.CollectorOrder - 0.579
|
||||
label = "Pre-collect Clip Effects Instances"
|
||||
label = "Precollect Clip Effects Instances"
|
||||
families = ["clip"]
|
||||
|
||||
def process(self, instance):
|
||||
|
|
@ -40,6 +40,12 @@ class PreCollectClipEffects(pyblish.api.InstancePlugin):
|
|||
if review and review_track_index == _track_index:
|
||||
continue
|
||||
for sitem in sub_track_items:
|
||||
effect = None
|
||||
# make sure this subtrack item is relative of track item
|
||||
if ((track_item not in sitem.linkedItems())
|
||||
and (len(sitem.linkedItems()) > 0)):
|
||||
continue
|
||||
|
||||
if not (track_index <= _track_index):
|
||||
continue
|
||||
|
||||
|
|
@ -162,7 +168,7 @@ class PreCollectClipEffects(pyblish.api.InstancePlugin):
|
|||
# grab animation including handles
|
||||
knob_anim = [node[knob].getValueAt(i)
|
||||
for i in range(
|
||||
self.clip_in_h, self.clip_in_h + 1)]
|
||||
self.clip_in_h, self.clip_out_h + 1)]
|
||||
|
||||
node_serialized[knob] = knob_anim
|
||||
else:
|
||||
|
|
|
|||
|
|
@ -133,6 +133,13 @@ class PrecollectInstances(pyblish.api.ContextPlugin):
|
|||
# create audio subset instance
|
||||
self.create_audio_instance(context, **data)
|
||||
|
||||
# add colorspace data
|
||||
instance.data.update({
|
||||
"versionData": {
|
||||
"colorspace": track_item.sourceMediaColourTransform(),
|
||||
}
|
||||
})
|
||||
|
||||
# add audioReview attribute to plate instance data
|
||||
# if reviewTrack is on
|
||||
if tag_data.get("reviewTrack") is not None:
|
||||
|
|
@ -304,9 +311,10 @@ class PrecollectInstances(pyblish.api.ContextPlugin):
|
|||
|
||||
@staticmethod
|
||||
def create_otio_time_range_from_timeline_item_data(track_item):
|
||||
speed = track_item.playbackSpeed()
|
||||
timeline = phiero.get_current_sequence()
|
||||
frame_start = int(track_item.timelineIn())
|
||||
frame_duration = int(track_item.sourceDuration())
|
||||
frame_duration = int(track_item.sourceDuration() / speed)
|
||||
fps = timeline.framerate().toFloat()
|
||||
|
||||
return hiero_export.create_otio_time_range(
|
||||
|
|
@ -376,6 +384,8 @@ class PrecollectInstances(pyblish.api.ContextPlugin):
|
|||
subtracks = []
|
||||
subTrackItems = flatten(clip.parent().subTrackItems())
|
||||
for item in subTrackItems:
|
||||
if "TimeWarp" in item.name():
|
||||
continue
|
||||
# avoid all anotation
|
||||
if isinstance(item, hiero.core.Annotation):
|
||||
continue
|
||||
|
|
|
|||
|
|
@ -1,70 +0,0 @@
|
|||
import pyblish.api
|
||||
|
||||
|
||||
class CollectFrameRanges(pyblish.api.InstancePlugin):
|
||||
""" Collect all framranges.
|
||||
"""
|
||||
|
||||
order = pyblish.api.CollectorOrder - 0.1
|
||||
label = "Collect Frame Ranges"
|
||||
hosts = ["hiero"]
|
||||
families = ["clip", "effect"]
|
||||
|
||||
def process(self, instance):
|
||||
|
||||
data = dict()
|
||||
track_item = instance.data["item"]
|
||||
|
||||
# handles
|
||||
handle_start = instance.data["handleStart"]
|
||||
handle_end = instance.data["handleEnd"]
|
||||
|
||||
# source frame ranges
|
||||
source_in = int(track_item.sourceIn())
|
||||
source_out = int(track_item.sourceOut())
|
||||
source_in_h = int(source_in - handle_start)
|
||||
source_out_h = int(source_out + handle_end)
|
||||
|
||||
# timeline frame ranges
|
||||
clip_in = int(track_item.timelineIn())
|
||||
clip_out = int(track_item.timelineOut())
|
||||
clip_in_h = clip_in - handle_start
|
||||
clip_out_h = clip_out + handle_end
|
||||
|
||||
# durations
|
||||
clip_duration = (clip_out - clip_in) + 1
|
||||
clip_duration_h = clip_duration + (handle_start + handle_end)
|
||||
|
||||
# set frame start with tag or take it from timeline `startingFrame`
|
||||
frame_start = instance.data.get("workfileFrameStart")
|
||||
|
||||
if not frame_start:
|
||||
frame_start = clip_in
|
||||
|
||||
frame_end = frame_start + (clip_out - clip_in)
|
||||
|
||||
data.update({
|
||||
# media source frame range
|
||||
"sourceIn": source_in,
|
||||
"sourceOut": source_out,
|
||||
"sourceInH": source_in_h,
|
||||
"sourceOutH": source_out_h,
|
||||
|
||||
# timeline frame range
|
||||
"clipIn": clip_in,
|
||||
"clipOut": clip_out,
|
||||
"clipInH": clip_in_h,
|
||||
"clipOutH": clip_out_h,
|
||||
|
||||
# workfile frame range
|
||||
"frameStart": frame_start,
|
||||
"frameEnd": frame_end,
|
||||
|
||||
"clipDuration": clip_duration,
|
||||
"clipDurationH": clip_duration_h,
|
||||
|
||||
"fps": instance.context.data["fps"]
|
||||
})
|
||||
self.log.info("Frame range data for instance `{}` are: {}".format(
|
||||
instance, data))
|
||||
instance.data.update(data)
|
||||
|
|
@ -1,116 +0,0 @@
|
|||
import pyblish.api
|
||||
import avalon.api as avalon
|
||||
|
||||
|
||||
class CollectHierarchy(pyblish.api.ContextPlugin):
|
||||
"""Collecting hierarchy from `parents`.
|
||||
|
||||
present in `clip` family instances coming from the request json data file
|
||||
|
||||
It will add `hierarchical_context` into each instance for integrate
|
||||
plugins to be able to create needed parents for the context if they
|
||||
don't exist yet
|
||||
"""
|
||||
|
||||
label = "Collect Hierarchy"
|
||||
order = pyblish.api.CollectorOrder
|
||||
families = ["clip"]
|
||||
|
||||
def process(self, context):
|
||||
temp_context = {}
|
||||
project_name = avalon.Session["AVALON_PROJECT"]
|
||||
final_context = {}
|
||||
final_context[project_name] = {}
|
||||
final_context[project_name]['entity_type'] = 'Project'
|
||||
|
||||
for instance in context:
|
||||
self.log.info("Processing instance: `{}` ...".format(instance))
|
||||
|
||||
# shot data dict
|
||||
shot_data = {}
|
||||
families = instance.data.get("families")
|
||||
|
||||
# filter out all unepropriate instances
|
||||
if not instance.data["publish"]:
|
||||
continue
|
||||
if not families:
|
||||
continue
|
||||
# exclude other families then self.families with intersection
|
||||
if not set(self.families).intersection(families):
|
||||
continue
|
||||
|
||||
# exclude if not heroTrack True
|
||||
if not instance.data.get("heroTrack"):
|
||||
continue
|
||||
|
||||
# update families to include `shot` for hierarchy integration
|
||||
instance.data["families"] = families + ["shot"]
|
||||
|
||||
# get asset build data if any available
|
||||
shot_data["inputs"] = [
|
||||
x["_id"] for x in instance.data.get("assetbuilds", [])
|
||||
]
|
||||
|
||||
# suppose that all instances are Shots
|
||||
shot_data['entity_type'] = 'Shot'
|
||||
shot_data['tasks'] = instance.data.get("tasks") or []
|
||||
shot_data["comments"] = instance.data.get("comments", [])
|
||||
|
||||
shot_data['custom_attributes'] = {
|
||||
"handleStart": instance.data["handleStart"],
|
||||
"handleEnd": instance.data["handleEnd"],
|
||||
"frameStart": instance.data["frameStart"],
|
||||
"frameEnd": instance.data["frameEnd"],
|
||||
"clipIn": instance.data["clipIn"],
|
||||
"clipOut": instance.data["clipOut"],
|
||||
'fps': instance.context.data["fps"],
|
||||
"resolutionWidth": instance.data["resolutionWidth"],
|
||||
"resolutionHeight": instance.data["resolutionHeight"],
|
||||
"pixelAspect": instance.data["pixelAspect"]
|
||||
}
|
||||
|
||||
actual = {instance.data["asset"]: shot_data}
|
||||
|
||||
for parent in reversed(instance.data["parents"]):
|
||||
next_dict = {}
|
||||
parent_name = parent["entity_name"]
|
||||
next_dict[parent_name] = {}
|
||||
next_dict[parent_name]["entity_type"] = parent[
|
||||
"entity_type"].capitalize()
|
||||
next_dict[parent_name]["childs"] = actual
|
||||
actual = next_dict
|
||||
|
||||
temp_context = self._update_dict(temp_context, actual)
|
||||
|
||||
# skip if nothing for hierarchy available
|
||||
if not temp_context:
|
||||
return
|
||||
|
||||
final_context[project_name]['childs'] = temp_context
|
||||
|
||||
# adding hierarchy context to context
|
||||
context.data["hierarchyContext"] = final_context
|
||||
self.log.debug("context.data[hierarchyContext] is: {}".format(
|
||||
context.data["hierarchyContext"]))
|
||||
|
||||
def _update_dict(self, parent_dict, child_dict):
|
||||
"""
|
||||
Nesting each children into its parent.
|
||||
|
||||
Args:
|
||||
parent_dict (dict): parent dict wich should be nested with children
|
||||
child_dict (dict): children dict which should be injested
|
||||
"""
|
||||
|
||||
for key in parent_dict:
|
||||
if key in child_dict and isinstance(parent_dict[key], dict):
|
||||
child_dict[key] = self._update_dict(
|
||||
parent_dict[key], child_dict[key]
|
||||
)
|
||||
else:
|
||||
if parent_dict.get(key) and child_dict.get(key):
|
||||
continue
|
||||
else:
|
||||
child_dict[key] = parent_dict[key]
|
||||
|
||||
return child_dict
|
||||
|
|
@ -1,169 +0,0 @@
|
|||
from pyblish import api
|
||||
import os
|
||||
import re
|
||||
import clique
|
||||
|
||||
|
||||
class CollectPlates(api.InstancePlugin):
|
||||
"""Collect plate representations.
|
||||
"""
|
||||
|
||||
# Run just before CollectSubsets
|
||||
order = api.CollectorOrder + 0.1020
|
||||
label = "Collect Plates"
|
||||
hosts = ["hiero"]
|
||||
families = ["plate"]
|
||||
|
||||
def process(self, instance):
|
||||
# add to representations
|
||||
if not instance.data.get("representations"):
|
||||
instance.data["representations"] = list()
|
||||
|
||||
self.main_clip = instance.data["item"]
|
||||
# get plate source attributes
|
||||
source_media = instance.data["sourceMedia"]
|
||||
source_path = instance.data["sourcePath"]
|
||||
source_first = instance.data["sourceFirst"]
|
||||
frame_start = instance.data["frameStart"]
|
||||
frame_end = instance.data["frameEnd"]
|
||||
handle_start = instance.data["handleStart"]
|
||||
handle_end = instance.data["handleEnd"]
|
||||
source_in = instance.data["sourceIn"]
|
||||
source_out = instance.data["sourceOut"]
|
||||
source_in_h = instance.data["sourceInH"]
|
||||
source_out_h = instance.data["sourceOutH"]
|
||||
|
||||
# define if review media is sequence
|
||||
is_sequence = bool(not source_media.singleFile())
|
||||
self.log.debug("is_sequence: {}".format(is_sequence))
|
||||
|
||||
file_dir = os.path.dirname(source_path)
|
||||
file = os.path.basename(source_path)
|
||||
ext = os.path.splitext(file)[-1]
|
||||
|
||||
# detect if sequence
|
||||
if not is_sequence:
|
||||
# is video file
|
||||
files = file
|
||||
else:
|
||||
files = list()
|
||||
spliter, padding = self.detect_sequence(file)
|
||||
self.log.debug("_ spliter, padding: {}, {}".format(
|
||||
spliter, padding))
|
||||
base_name = file.split(spliter)[0]
|
||||
|
||||
# define collection and calculate frame range
|
||||
collection = clique.Collection(
|
||||
base_name,
|
||||
ext,
|
||||
padding,
|
||||
set(range(
|
||||
int(source_first + source_in_h),
|
||||
int(source_first + source_out_h) + 1
|
||||
))
|
||||
)
|
||||
self.log.debug("_ collection: {}".format(collection))
|
||||
|
||||
real_files = os.listdir(file_dir)
|
||||
self.log.debug("_ real_files: {}".format(real_files))
|
||||
|
||||
# collect frames to repre files list
|
||||
self.handle_start_exclude = list()
|
||||
self.handle_end_exclude = list()
|
||||
for findex, item in enumerate(collection):
|
||||
if item not in real_files:
|
||||
self.log.debug("_ item: {}".format(item))
|
||||
test_index = findex + int(source_first + source_in_h)
|
||||
test_start = int(source_first + source_in)
|
||||
test_end = int(source_first + source_out)
|
||||
if (test_index < test_start):
|
||||
self.handle_start_exclude.append(test_index)
|
||||
elif (test_index > test_end):
|
||||
self.handle_end_exclude.append(test_index)
|
||||
continue
|
||||
files.append(item)
|
||||
|
||||
# change label
|
||||
instance.data["label"] = "{0} - ({1})".format(
|
||||
instance.data["label"], ext
|
||||
)
|
||||
|
||||
self.log.debug("Instance review: {}".format(instance.data["name"]))
|
||||
|
||||
# adding representation for review mov
|
||||
representation = {
|
||||
"files": files,
|
||||
"stagingDir": file_dir,
|
||||
"frameStart": frame_start - handle_start,
|
||||
"frameEnd": frame_end + handle_end,
|
||||
"name": ext[1:],
|
||||
"ext": ext[1:]
|
||||
}
|
||||
|
||||
instance.data["representations"].append(representation)
|
||||
self.version_data(instance)
|
||||
|
||||
self.log.debug(
|
||||
"Added representations: {}".format(
|
||||
instance.data["representations"]))
|
||||
|
||||
self.log.debug(
|
||||
"instance.data: {}".format(instance.data))
|
||||
|
||||
def version_data(self, instance):
|
||||
transfer_data = [
|
||||
"handleStart", "handleEnd", "sourceIn", "sourceOut",
|
||||
"frameStart", "frameEnd", "sourceInH", "sourceOutH",
|
||||
"clipIn", "clipOut", "clipInH", "clipOutH", "asset",
|
||||
"track"
|
||||
]
|
||||
|
||||
version_data = dict()
|
||||
# pass data to version
|
||||
version_data.update({k: instance.data[k] for k in transfer_data})
|
||||
|
||||
if 'version' in instance.data:
|
||||
version_data["version"] = instance.data["version"]
|
||||
|
||||
handle_start = instance.data["handleStart"]
|
||||
handle_end = instance.data["handleEnd"]
|
||||
|
||||
if self.handle_start_exclude:
|
||||
handle_start -= len(self.handle_start_exclude)
|
||||
|
||||
if self.handle_end_exclude:
|
||||
handle_end -= len(self.handle_end_exclude)
|
||||
|
||||
# add to data of representation
|
||||
version_data.update({
|
||||
"colorspace": self.main_clip.sourceMediaColourTransform(),
|
||||
"families": instance.data["families"],
|
||||
"subset": instance.data["subset"],
|
||||
"fps": instance.data["fps"],
|
||||
"handleStart": handle_start,
|
||||
"handleEnd": handle_end
|
||||
})
|
||||
instance.data["versionData"] = version_data
|
||||
|
||||
def detect_sequence(self, file):
|
||||
""" Get identificating pater for image sequence
|
||||
|
||||
Can find file.0001.ext, file.%02d.ext, file.####.ext
|
||||
|
||||
Return:
|
||||
string: any matching sequence patern
|
||||
int: padding of sequnce numbering
|
||||
"""
|
||||
foundall = re.findall(
|
||||
r"(#+)|(%\d+d)|(?<=[^a-zA-Z0-9])(\d+)(?=\.\w+$)", file)
|
||||
if foundall:
|
||||
found = sorted(list(set(foundall[0])))[-1]
|
||||
|
||||
if "%" in found:
|
||||
padding = int(re.findall(r"\d+", found)[-1])
|
||||
else:
|
||||
padding = len(found)
|
||||
|
||||
return found, padding
|
||||
else:
|
||||
return None, None
|
||||
|
|
@ -1,261 +0,0 @@
|
|||
from pyblish import api
|
||||
import os
|
||||
import clique
|
||||
from openpype.hosts.hiero.api import (
|
||||
is_overlapping, get_sequence_pattern_and_padding)
|
||||
|
||||
|
||||
class CollectReview(api.InstancePlugin):
|
||||
"""Collect review representation.
|
||||
"""
|
||||
|
||||
# Run just before CollectSubsets
|
||||
order = api.CollectorOrder + 0.1022
|
||||
label = "Collect Review"
|
||||
hosts = ["hiero"]
|
||||
families = ["review"]
|
||||
|
||||
def get_review_item(self, instance):
|
||||
"""
|
||||
Get review clip track item from review track name
|
||||
|
||||
Args:
|
||||
instance (obj): publishing instance
|
||||
|
||||
Returns:
|
||||
hiero.core.TrackItem: corresponding track item
|
||||
|
||||
Raises:
|
||||
Exception: description
|
||||
|
||||
"""
|
||||
review_track = instance.data.get("reviewTrack")
|
||||
video_tracks = instance.context.data["videoTracks"]
|
||||
for track in video_tracks:
|
||||
if review_track not in track.name():
|
||||
continue
|
||||
for item in track.items():
|
||||
self.log.debug(item)
|
||||
if is_overlapping(item, self.main_clip):
|
||||
self.log.debug("Winner is: {}".format(item))
|
||||
break
|
||||
|
||||
# validate the clip is fully converted with review clip
|
||||
assert is_overlapping(
|
||||
item, self.main_clip, strict=True), (
|
||||
"Review clip not cowering fully "
|
||||
"the clip `{}`").format(self.main_clip.name())
|
||||
|
||||
return item
|
||||
|
||||
def process(self, instance):
|
||||
tags = ["review", "ftrackreview"]
|
||||
|
||||
# get reviewable item from `review` instance.data attribute
|
||||
self.main_clip = instance.data.get("item")
|
||||
self.rw_clip = self.get_review_item(instance)
|
||||
|
||||
# let user know there is missing review clip and convert instance
|
||||
# back as not reviewable
|
||||
assert self.rw_clip, "Missing reviewable clip for '{}'".format(
|
||||
self.main_clip.name()
|
||||
)
|
||||
|
||||
# add to representations
|
||||
if not instance.data.get("representations"):
|
||||
instance.data["representations"] = list()
|
||||
|
||||
# get review media main info
|
||||
rw_source = self.rw_clip.source().mediaSource()
|
||||
rw_source_duration = int(rw_source.duration())
|
||||
self.rw_source_path = rw_source.firstpath()
|
||||
rw_source_file_info = rw_source.fileinfos().pop()
|
||||
|
||||
# define if review media is sequence
|
||||
is_sequence = bool(not rw_source.singleFile())
|
||||
self.log.debug("is_sequence: {}".format(is_sequence))
|
||||
|
||||
# get handles
|
||||
handle_start = instance.data["handleStart"]
|
||||
handle_end = instance.data["handleEnd"]
|
||||
|
||||
# review timeline and source frame ranges
|
||||
rw_clip_in = int(self.rw_clip.timelineIn())
|
||||
rw_clip_out = int(self.rw_clip.timelineOut())
|
||||
self.rw_clip_source_in = int(self.rw_clip.sourceIn())
|
||||
self.rw_clip_source_out = int(self.rw_clip.sourceOut())
|
||||
rw_source_first = int(rw_source_file_info.startFrame())
|
||||
|
||||
# calculate delivery source_in and source_out
|
||||
# main_clip_timeline_in - review_item_timeline_in + 1
|
||||
main_clip_in = self.main_clip.timelineIn()
|
||||
main_clip_out = self.main_clip.timelineOut()
|
||||
|
||||
source_in_diff = main_clip_in - rw_clip_in
|
||||
source_out_diff = main_clip_out - rw_clip_out
|
||||
|
||||
if source_in_diff:
|
||||
self.rw_clip_source_in += source_in_diff
|
||||
if source_out_diff:
|
||||
self.rw_clip_source_out += source_out_diff
|
||||
|
||||
# review clip durations
|
||||
rw_clip_duration = (
|
||||
self.rw_clip_source_out - self.rw_clip_source_in) + 1
|
||||
rw_clip_duration_h = rw_clip_duration + (
|
||||
handle_start + handle_end)
|
||||
|
||||
# add created data to review item data
|
||||
instance.data["reviewItemData"] = {
|
||||
"mediaDuration": rw_source_duration
|
||||
}
|
||||
|
||||
file_dir = os.path.dirname(self.rw_source_path)
|
||||
file = os.path.basename(self.rw_source_path)
|
||||
ext = os.path.splitext(file)[-1]
|
||||
|
||||
# detect if sequence
|
||||
if not is_sequence:
|
||||
# is video file
|
||||
files = file
|
||||
else:
|
||||
files = list()
|
||||
spliter, padding = get_sequence_pattern_and_padding(file)
|
||||
self.log.debug("_ spliter, padding: {}, {}".format(
|
||||
spliter, padding))
|
||||
base_name = file.split(spliter)[0]
|
||||
|
||||
# define collection and calculate frame range
|
||||
collection = clique.Collection(base_name, ext, padding, set(range(
|
||||
int(rw_source_first + int(
|
||||
self.rw_clip_source_in - handle_start)),
|
||||
int(rw_source_first + int(
|
||||
self.rw_clip_source_out + handle_end) + 1))))
|
||||
self.log.debug("_ collection: {}".format(collection))
|
||||
|
||||
real_files = os.listdir(file_dir)
|
||||
self.log.debug("_ real_files: {}".format(real_files))
|
||||
|
||||
# collect frames to repre files list
|
||||
for item in collection:
|
||||
if item not in real_files:
|
||||
self.log.debug("_ item: {}".format(item))
|
||||
continue
|
||||
files.append(item)
|
||||
|
||||
# add prep tag
|
||||
tags.extend(["prep", "delete"])
|
||||
|
||||
# change label
|
||||
instance.data["label"] = "{0} - ({1})".format(
|
||||
instance.data["label"], ext
|
||||
)
|
||||
|
||||
self.log.debug("Instance review: {}".format(instance.data["name"]))
|
||||
|
||||
# adding representation for review mov
|
||||
representation = {
|
||||
"files": files,
|
||||
"stagingDir": file_dir,
|
||||
"frameStart": rw_source_first + self.rw_clip_source_in,
|
||||
"frameEnd": rw_source_first + self.rw_clip_source_out,
|
||||
"frameStartFtrack": int(
|
||||
self.rw_clip_source_in - handle_start),
|
||||
"frameEndFtrack": int(self.rw_clip_source_out + handle_end),
|
||||
"step": 1,
|
||||
"fps": instance.data["fps"],
|
||||
"name": "review",
|
||||
"tags": tags,
|
||||
"ext": ext[1:]
|
||||
}
|
||||
|
||||
if rw_source_duration > rw_clip_duration_h:
|
||||
self.log.debug("Media duration higher: {}".format(
|
||||
(rw_source_duration - rw_clip_duration_h)))
|
||||
representation.update({
|
||||
"frameStart": rw_source_first + int(
|
||||
self.rw_clip_source_in - handle_start),
|
||||
"frameEnd": rw_source_first + int(
|
||||
self.rw_clip_source_out + handle_end),
|
||||
"tags": ["_cut-bigger", "prep", "delete"]
|
||||
})
|
||||
elif rw_source_duration < rw_clip_duration_h:
|
||||
self.log.debug("Media duration higher: {}".format(
|
||||
(rw_source_duration - rw_clip_duration_h)))
|
||||
representation.update({
|
||||
"frameStart": rw_source_first + int(
|
||||
self.rw_clip_source_in - handle_start),
|
||||
"frameEnd": rw_source_first + int(
|
||||
self.rw_clip_source_out + handle_end),
|
||||
"tags": ["prep", "delete"]
|
||||
})
|
||||
|
||||
instance.data["representations"].append(representation)
|
||||
|
||||
self.create_thumbnail(instance)
|
||||
|
||||
self.log.debug(
|
||||
"Added representations: {}".format(
|
||||
instance.data["representations"]))
|
||||
|
||||
def create_thumbnail(self, instance):
|
||||
source_file = os.path.basename(self.rw_source_path)
|
||||
spliter, padding = get_sequence_pattern_and_padding(source_file)
|
||||
|
||||
if spliter:
|
||||
head, ext = source_file.split(spliter)
|
||||
else:
|
||||
head, ext = os.path.splitext(source_file)
|
||||
|
||||
# staging dir creation
|
||||
staging_dir = os.path.dirname(
|
||||
self.rw_source_path)
|
||||
|
||||
# get thumbnail frame from the middle
|
||||
thumb_frame = int(self.rw_clip_source_in + (
|
||||
(self.rw_clip_source_out - self.rw_clip_source_in) / 2))
|
||||
|
||||
thumb_file = "{}thumbnail{}{}".format(head, thumb_frame, ".png")
|
||||
thumb_path = os.path.join(staging_dir, thumb_file)
|
||||
|
||||
thumbnail = self.rw_clip.thumbnail(thumb_frame).save(
|
||||
thumb_path,
|
||||
format='png'
|
||||
)
|
||||
self.log.debug(
|
||||
"__ thumbnail: `{}`, frame: `{}`".format(thumbnail, thumb_frame))
|
||||
|
||||
self.log.debug("__ thumbnail: {}".format(thumbnail))
|
||||
thumb_representation = {
|
||||
'files': thumb_file,
|
||||
'stagingDir': staging_dir,
|
||||
'name': "thumbnail",
|
||||
'thumbnail': True,
|
||||
'ext': "png"
|
||||
}
|
||||
instance.data["representations"].append(
|
||||
thumb_representation)
|
||||
|
||||
def version_data(self, instance):
|
||||
transfer_data = [
|
||||
"handleStart", "handleEnd", "sourceIn", "sourceOut",
|
||||
"frameStart", "frameEnd", "sourceInH", "sourceOutH",
|
||||
"clipIn", "clipOut", "clipInH", "clipOutH", "asset",
|
||||
"track"
|
||||
]
|
||||
|
||||
version_data = dict()
|
||||
# pass data to version
|
||||
version_data.update({k: instance.data[k] for k in transfer_data})
|
||||
|
||||
if 'version' in instance.data:
|
||||
version_data["version"] = instance.data["version"]
|
||||
|
||||
# add to data of representation
|
||||
version_data.update({
|
||||
"colorspace": self.rw_clip.sourceMediaColourTransform(),
|
||||
"families": instance.data["families"],
|
||||
"subset": instance.data["subset"],
|
||||
"fps": instance.data["fps"]
|
||||
})
|
||||
instance.data["versionData"] = version_data
|
||||
|
|
@ -1,57 +0,0 @@
|
|||
import os
|
||||
from hiero.exporters.FnExportUtil import writeSequenceAudioWithHandles
|
||||
import pyblish
|
||||
import openpype
|
||||
|
||||
|
||||
class ExtractAudioFile(openpype.api.Extractor):
|
||||
"""Extracts audio subset file from all active timeline audio tracks"""
|
||||
|
||||
order = pyblish.api.ExtractorOrder
|
||||
label = "Extract Subset Audio"
|
||||
hosts = ["hiero"]
|
||||
families = ["audio"]
|
||||
match = pyblish.api.Intersection
|
||||
|
||||
def process(self, instance):
|
||||
# get sequence
|
||||
sequence = instance.context.data["activeSequence"]
|
||||
subset = instance.data["subset"]
|
||||
|
||||
# get timeline in / out
|
||||
clip_in = instance.data["clipIn"]
|
||||
clip_out = instance.data["clipOut"]
|
||||
# get handles from context
|
||||
handle_start = instance.data["handleStart"]
|
||||
handle_end = instance.data["handleEnd"]
|
||||
|
||||
staging_dir = self.staging_dir(instance)
|
||||
self.log.info("Created staging dir: {}...".format(staging_dir))
|
||||
|
||||
# path to wav file
|
||||
audio_file = os.path.join(
|
||||
staging_dir, "{}.wav".format(subset)
|
||||
)
|
||||
|
||||
# export audio to disk
|
||||
writeSequenceAudioWithHandles(
|
||||
audio_file,
|
||||
sequence,
|
||||
clip_in,
|
||||
clip_out,
|
||||
handle_start,
|
||||
handle_end
|
||||
)
|
||||
|
||||
# add to representations
|
||||
if not instance.data.get("representations"):
|
||||
instance.data["representations"] = list()
|
||||
|
||||
representation = {
|
||||
'files': os.path.basename(audio_file),
|
||||
'stagingDir': staging_dir,
|
||||
'name': "wav",
|
||||
'ext': "wav"
|
||||
}
|
||||
|
||||
instance.data["representations"].append(representation)
|
||||
|
|
@ -1,334 +0,0 @@
|
|||
import os
|
||||
import sys
|
||||
import six
|
||||
import errno
|
||||
from pyblish import api
|
||||
import openpype
|
||||
import clique
|
||||
from avalon.vendor import filelink
|
||||
|
||||
|
||||
class ExtractReviewPreparation(openpype.api.Extractor):
|
||||
"""Cut up clips from long video file"""
|
||||
|
||||
order = api.ExtractorOrder
|
||||
label = "Extract Review Preparation"
|
||||
hosts = ["hiero"]
|
||||
families = ["review"]
|
||||
|
||||
# presets
|
||||
tags_addition = []
|
||||
|
||||
def process(self, instance):
|
||||
inst_data = instance.data
|
||||
asset = inst_data["asset"]
|
||||
review_item_data = instance.data.get("reviewItemData")
|
||||
|
||||
# get representation and loop them
|
||||
representations = inst_data["representations"]
|
||||
|
||||
# get resolution default
|
||||
resolution_width = inst_data["resolutionWidth"]
|
||||
resolution_height = inst_data["resolutionHeight"]
|
||||
|
||||
# frame range data
|
||||
media_duration = review_item_data["mediaDuration"]
|
||||
|
||||
ffmpeg_path = openpype.lib.get_ffmpeg_tool_path("ffmpeg")
|
||||
ffprobe_path = openpype.lib.get_ffmpeg_tool_path("ffprobe")
|
||||
|
||||
# filter out mov and img sequences
|
||||
representations_new = representations[:]
|
||||
for repre in representations:
|
||||
input_args = list()
|
||||
output_args = list()
|
||||
|
||||
tags = repre.get("tags", [])
|
||||
|
||||
# check if supported tags are in representation for activation
|
||||
filter_tag = False
|
||||
for tag in ["_cut-bigger", "prep"]:
|
||||
if tag in tags:
|
||||
filter_tag = True
|
||||
break
|
||||
if not filter_tag:
|
||||
continue
|
||||
|
||||
self.log.debug("__ repre: {}".format(repre))
|
||||
|
||||
files = repre.get("files")
|
||||
staging_dir = repre.get("stagingDir")
|
||||
fps = repre.get("fps")
|
||||
ext = repre.get("ext")
|
||||
|
||||
# make paths
|
||||
full_output_dir = os.path.join(
|
||||
staging_dir, "cuts")
|
||||
|
||||
if isinstance(files, list):
|
||||
new_files = list()
|
||||
|
||||
# frame range delivery included handles
|
||||
frame_start = (
|
||||
inst_data["frameStart"] - inst_data["handleStart"])
|
||||
frame_end = (
|
||||
inst_data["frameEnd"] + inst_data["handleEnd"])
|
||||
self.log.debug("_ frame_start: {}".format(frame_start))
|
||||
self.log.debug("_ frame_end: {}".format(frame_end))
|
||||
|
||||
# make collection from input files list
|
||||
collections, remainder = clique.assemble(files)
|
||||
collection = collections.pop()
|
||||
self.log.debug("_ collection: {}".format(collection))
|
||||
|
||||
# name components
|
||||
head = collection.format("{head}")
|
||||
padding = collection.format("{padding}")
|
||||
tail = collection.format("{tail}")
|
||||
self.log.debug("_ head: {}".format(head))
|
||||
self.log.debug("_ padding: {}".format(padding))
|
||||
self.log.debug("_ tail: {}".format(tail))
|
||||
|
||||
# make destination file with instance data
|
||||
# frame start and end range
|
||||
index = 0
|
||||
for image in collection:
|
||||
dst_file_num = frame_start + index
|
||||
dst_file_name = head + str(padding % dst_file_num) + tail
|
||||
src = os.path.join(staging_dir, image)
|
||||
dst = os.path.join(full_output_dir, dst_file_name)
|
||||
self.log.info("Creating temp hardlinks: {}".format(dst))
|
||||
self.hardlink_file(src, dst)
|
||||
new_files.append(dst_file_name)
|
||||
index += 1
|
||||
|
||||
self.log.debug("_ new_files: {}".format(new_files))
|
||||
|
||||
else:
|
||||
# ffmpeg when single file
|
||||
new_files = "{}_{}".format(asset, files)
|
||||
|
||||
# frame range
|
||||
frame_start = repre.get("frameStart")
|
||||
frame_end = repre.get("frameEnd")
|
||||
|
||||
full_input_path = os.path.join(
|
||||
staging_dir, files)
|
||||
|
||||
os.path.isdir(full_output_dir) or os.makedirs(full_output_dir)
|
||||
|
||||
full_output_path = os.path.join(
|
||||
full_output_dir, new_files)
|
||||
|
||||
self.log.debug(
|
||||
"__ full_input_path: {}".format(full_input_path))
|
||||
self.log.debug(
|
||||
"__ full_output_path: {}".format(full_output_path))
|
||||
|
||||
# check if audio stream is in input video file
|
||||
ffprob_cmd = (
|
||||
"\"{ffprobe_path}\" -i \"{full_input_path}\" -show_streams"
|
||||
" -select_streams a -loglevel error"
|
||||
).format(**locals())
|
||||
|
||||
self.log.debug("ffprob_cmd: {}".format(ffprob_cmd))
|
||||
audio_check_output = openpype.api.run_subprocess(ffprob_cmd)
|
||||
self.log.debug(
|
||||
"audio_check_output: {}".format(audio_check_output))
|
||||
|
||||
# Fix one frame difference
|
||||
""" TODO: this is just work-around for issue:
|
||||
https://github.com/pypeclub/pype/issues/659
|
||||
"""
|
||||
frame_duration_extend = 1
|
||||
if audio_check_output and ("audio" in inst_data["families"]):
|
||||
frame_duration_extend = 0
|
||||
|
||||
# translate frame to sec
|
||||
start_sec = float(frame_start) / fps
|
||||
duration_sec = float(
|
||||
(frame_end - frame_start) + frame_duration_extend) / fps
|
||||
|
||||
empty_add = None
|
||||
|
||||
# check if not missing frames at start
|
||||
if (start_sec < 0) or (media_duration < frame_end):
|
||||
# for later swithing off `-c:v copy` output arg
|
||||
empty_add = True
|
||||
|
||||
# init empty variables
|
||||
video_empty_start = video_layer_start = ""
|
||||
audio_empty_start = audio_layer_start = ""
|
||||
video_empty_end = video_layer_end = ""
|
||||
audio_empty_end = audio_layer_end = ""
|
||||
audio_input = audio_output = ""
|
||||
v_inp_idx = 0
|
||||
concat_n = 1
|
||||
|
||||
# try to get video native resolution data
|
||||
try:
|
||||
resolution_output = openpype.api.run_subprocess((
|
||||
"\"{ffprobe_path}\" -i \"{full_input_path}\""
|
||||
" -v error "
|
||||
"-select_streams v:0 -show_entries "
|
||||
"stream=width,height -of csv=s=x:p=0"
|
||||
).format(**locals()))
|
||||
|
||||
x, y = resolution_output.split("x")
|
||||
resolution_width = int(x)
|
||||
resolution_height = int(y)
|
||||
except Exception as _ex:
|
||||
self.log.warning(
|
||||
"Video native resolution is untracable: {}".format(
|
||||
_ex))
|
||||
|
||||
if audio_check_output:
|
||||
# adding input for empty audio
|
||||
input_args.append("-f lavfi -i anullsrc")
|
||||
|
||||
# define audio empty concat variables
|
||||
audio_input = "[1:a]"
|
||||
audio_output = ":a=1"
|
||||
v_inp_idx = 1
|
||||
|
||||
# adding input for video black frame
|
||||
input_args.append((
|
||||
"-f lavfi -i \"color=c=black:"
|
||||
"s={resolution_width}x{resolution_height}:r={fps}\""
|
||||
).format(**locals()))
|
||||
|
||||
if (start_sec < 0):
|
||||
# recalculate input video timing
|
||||
empty_start_dur = abs(start_sec)
|
||||
start_sec = 0
|
||||
duration_sec = float(frame_end - (
|
||||
frame_start + (empty_start_dur * fps)) + 1) / fps
|
||||
|
||||
# define starting empty video concat variables
|
||||
video_empty_start = (
|
||||
"[{v_inp_idx}]trim=duration={empty_start_dur}[gv0];" # noqa
|
||||
).format(**locals())
|
||||
video_layer_start = "[gv0]"
|
||||
|
||||
if audio_check_output:
|
||||
# define starting empty audio concat variables
|
||||
audio_empty_start = (
|
||||
"[0]atrim=duration={empty_start_dur}[ga0];"
|
||||
).format(**locals())
|
||||
audio_layer_start = "[ga0]"
|
||||
|
||||
# alter concat number of clips
|
||||
concat_n += 1
|
||||
|
||||
# check if not missing frames at the end
|
||||
if (media_duration < frame_end):
|
||||
# recalculate timing
|
||||
empty_end_dur = float(
|
||||
frame_end - media_duration + 1) / fps
|
||||
duration_sec = float(
|
||||
media_duration - frame_start) / fps
|
||||
|
||||
# define ending empty video concat variables
|
||||
video_empty_end = (
|
||||
"[{v_inp_idx}]trim=duration={empty_end_dur}[gv1];"
|
||||
).format(**locals())
|
||||
video_layer_end = "[gv1]"
|
||||
|
||||
if audio_check_output:
|
||||
# define ending empty audio concat variables
|
||||
audio_empty_end = (
|
||||
"[0]atrim=duration={empty_end_dur}[ga1];"
|
||||
).format(**locals())
|
||||
audio_layer_end = "[ga0]"
|
||||
|
||||
# alter concat number of clips
|
||||
concat_n += 1
|
||||
|
||||
# concatting black frame togather
|
||||
output_args.append((
|
||||
"-filter_complex \""
|
||||
"{audio_empty_start}"
|
||||
"{video_empty_start}"
|
||||
"{audio_empty_end}"
|
||||
"{video_empty_end}"
|
||||
"{video_layer_start}{audio_layer_start}[1:v]{audio_input}" # noqa
|
||||
"{video_layer_end}{audio_layer_end}"
|
||||
"concat=n={concat_n}:v=1{audio_output}\""
|
||||
).format(**locals()))
|
||||
|
||||
# append ffmpeg input video clip
|
||||
input_args.append("-ss {}".format(start_sec))
|
||||
input_args.append("-t {}".format(duration_sec))
|
||||
input_args.append("-i \"{}\"".format(full_input_path))
|
||||
|
||||
# add copy audio video codec if only shortening clip
|
||||
if ("_cut-bigger" in tags) and (not empty_add):
|
||||
output_args.append("-c:v copy")
|
||||
|
||||
# make sure it is having no frame to frame comprassion
|
||||
output_args.append("-intra")
|
||||
|
||||
# output filename
|
||||
output_args.append("-y \"{}\"".format(full_output_path))
|
||||
|
||||
mov_args = [
|
||||
"\"{}\"".format(ffmpeg_path),
|
||||
" ".join(input_args),
|
||||
" ".join(output_args)
|
||||
]
|
||||
subprcs_cmd = " ".join(mov_args)
|
||||
|
||||
# run subprocess
|
||||
self.log.debug("Executing: {}".format(subprcs_cmd))
|
||||
output = openpype.api.run_subprocess(subprcs_cmd)
|
||||
self.log.debug("Output: {}".format(output))
|
||||
|
||||
repre_new = {
|
||||
"files": new_files,
|
||||
"stagingDir": full_output_dir,
|
||||
"frameStart": frame_start,
|
||||
"frameEnd": frame_end,
|
||||
"frameStartFtrack": frame_start,
|
||||
"frameEndFtrack": frame_end,
|
||||
"step": 1,
|
||||
"fps": fps,
|
||||
"name": "cut_up_preview",
|
||||
"tags": [
|
||||
"review", "ftrackreview", "delete"] + self.tags_addition,
|
||||
"ext": ext,
|
||||
"anatomy_template": "publish"
|
||||
}
|
||||
|
||||
representations_new.append(repre_new)
|
||||
|
||||
for repre in representations_new:
|
||||
if ("delete" in repre.get("tags", [])) and (
|
||||
"cut_up_preview" not in repre["name"]):
|
||||
representations_new.remove(repre)
|
||||
|
||||
self.log.debug(
|
||||
"Representations: {}".format(representations_new))
|
||||
instance.data["representations"] = representations_new
|
||||
|
||||
def hardlink_file(self, src, dst):
|
||||
dirname = os.path.dirname(dst)
|
||||
|
||||
# make sure the destination folder exist
|
||||
try:
|
||||
os.makedirs(dirname)
|
||||
except OSError as e:
|
||||
if e.errno == errno.EEXIST:
|
||||
pass
|
||||
else:
|
||||
self.log.critical("An unexpected error occurred.")
|
||||
six.reraise(*sys.exc_info())
|
||||
|
||||
# create hardlined file
|
||||
try:
|
||||
filelink.create(src, dst, filelink.HARDLINK)
|
||||
except OSError as e:
|
||||
if e.errno == errno.EEXIST:
|
||||
pass
|
||||
else:
|
||||
self.log.critical("An unexpected error occurred.")
|
||||
six.reraise(*sys.exc_info())
|
||||
|
|
@ -0,0 +1,171 @@
|
|||
from pyblish import api
|
||||
import hiero
|
||||
import math
|
||||
from openpype.hosts.hiero.otio.hiero_export import create_otio_time_range
|
||||
|
||||
class PrecollectRetime(api.InstancePlugin):
|
||||
"""Calculate Retiming of selected track items."""
|
||||
|
||||
order = api.CollectorOrder - 0.578
|
||||
label = "Precollect Retime"
|
||||
hosts = ["hiero"]
|
||||
families = ['retime_']
|
||||
|
||||
def process(self, instance):
|
||||
if not instance.data.get("versionData"):
|
||||
instance.data["versionData"] = {}
|
||||
|
||||
# get basic variables
|
||||
otio_clip = instance.data["otioClip"]
|
||||
|
||||
source_range = otio_clip.source_range
|
||||
oc_source_fps = source_range.start_time.rate
|
||||
oc_source_in = source_range.start_time.value
|
||||
|
||||
handle_start = instance.data["handleStart"]
|
||||
handle_end = instance.data["handleEnd"]
|
||||
frame_start = instance.data["frameStart"]
|
||||
|
||||
track_item = instance.data["item"]
|
||||
|
||||
# define basic clip frame range variables
|
||||
timeline_in = int(track_item.timelineIn())
|
||||
timeline_out = int(track_item.timelineOut())
|
||||
source_in = int(track_item.sourceIn())
|
||||
source_out = int(track_item.sourceOut())
|
||||
speed = track_item.playbackSpeed()
|
||||
|
||||
# calculate available material before retime
|
||||
available_in = int(track_item.handleInLength() * speed)
|
||||
available_out = int(track_item.handleOutLength() * speed)
|
||||
|
||||
self.log.debug((
|
||||
"_BEFORE: \n timeline_in: `{0}`,\n timeline_out: `{1}`, \n "
|
||||
"source_in: `{2}`,\n source_out: `{3}`,\n speed: `{4}`,\n "
|
||||
"handle_start: `{5}`,\n handle_end: `{6}`").format(
|
||||
timeline_in,
|
||||
timeline_out,
|
||||
source_in,
|
||||
source_out,
|
||||
speed,
|
||||
handle_start,
|
||||
handle_end
|
||||
))
|
||||
|
||||
# loop withing subtrack items
|
||||
time_warp_nodes = []
|
||||
source_in_change = 0
|
||||
source_out_change = 0
|
||||
for s_track_item in track_item.linkedItems():
|
||||
if isinstance(s_track_item, hiero.core.EffectTrackItem) \
|
||||
and "TimeWarp" in s_track_item.node().Class():
|
||||
|
||||
# adding timewarp attribute to instance
|
||||
time_warp_nodes = []
|
||||
|
||||
# ignore item if not enabled
|
||||
if s_track_item.isEnabled():
|
||||
node = s_track_item.node()
|
||||
name = node["name"].value()
|
||||
look_up = node["lookup"].value()
|
||||
animated = node["lookup"].isAnimated()
|
||||
if animated:
|
||||
look_up = [
|
||||
((node["lookup"].getValueAt(i)) - i)
|
||||
for i in range(
|
||||
(timeline_in - handle_start),
|
||||
(timeline_out + handle_end) + 1)
|
||||
]
|
||||
# calculate differnce
|
||||
diff_in = (node["lookup"].getValueAt(
|
||||
timeline_in)) - timeline_in
|
||||
diff_out = (node["lookup"].getValueAt(
|
||||
timeline_out)) - timeline_out
|
||||
|
||||
# calculate source
|
||||
source_in_change += diff_in
|
||||
source_out_change += diff_out
|
||||
|
||||
# calculate speed
|
||||
speed_in = (node["lookup"].getValueAt(timeline_in) / (
|
||||
float(timeline_in) * .01)) * .01
|
||||
speed_out = (node["lookup"].getValueAt(timeline_out) / (
|
||||
float(timeline_out) * .01)) * .01
|
||||
|
||||
# calculate handles
|
||||
handle_start = int(
|
||||
math.ceil(
|
||||
(handle_start * speed_in * 1000) / 1000.0)
|
||||
)
|
||||
|
||||
handle_end = int(
|
||||
math.ceil(
|
||||
(handle_end * speed_out * 1000) / 1000.0)
|
||||
)
|
||||
self.log.debug(
|
||||
("diff_in, diff_out", diff_in, diff_out))
|
||||
self.log.debug(
|
||||
("source_in_change, source_out_change",
|
||||
source_in_change, source_out_change))
|
||||
|
||||
time_warp_nodes.append({
|
||||
"Class": "TimeWarp",
|
||||
"name": name,
|
||||
"lookup": look_up
|
||||
})
|
||||
|
||||
self.log.debug(
|
||||
"timewarp source in changes: in {}, out {}".format(
|
||||
source_in_change, source_out_change))
|
||||
|
||||
# recalculate handles by the speed
|
||||
handle_start *= speed
|
||||
handle_end *= speed
|
||||
self.log.debug("speed: handle_start: '{0}', handle_end: '{1}'".format(
|
||||
handle_start, handle_end))
|
||||
|
||||
# recalculate source with timewarp and by the speed
|
||||
source_in += int(source_in_change)
|
||||
source_out += int(source_out_change * speed)
|
||||
|
||||
source_in_h = int(source_in - math.ceil(
|
||||
(handle_start * 1000) / 1000.0))
|
||||
source_out_h = int(source_out + math.ceil(
|
||||
(handle_end * 1000) / 1000.0))
|
||||
|
||||
self.log.debug(
|
||||
"retimed: source_in_h: '{0}', source_out_h: '{1}'".format(
|
||||
source_in_h, source_out_h))
|
||||
|
||||
# add all data to Instance
|
||||
instance.data["handleStart"] = handle_start
|
||||
instance.data["handleEnd"] = handle_end
|
||||
instance.data["sourceIn"] = source_in
|
||||
instance.data["sourceOut"] = source_out
|
||||
instance.data["sourceInH"] = source_in_h
|
||||
instance.data["sourceOutH"] = source_out_h
|
||||
instance.data["speed"] = speed
|
||||
|
||||
source_handle_start = source_in_h - source_in
|
||||
# frame_start = instance.data["frameStart"] + source_handle_start
|
||||
duration = source_out_h - source_in_h
|
||||
frame_end = int(frame_start + duration - (handle_start + handle_end))
|
||||
|
||||
instance.data["versionData"].update({
|
||||
"retime": True,
|
||||
"speed": speed,
|
||||
"timewarps": time_warp_nodes,
|
||||
"frameStart": frame_start,
|
||||
"frameEnd": frame_end,
|
||||
"handleStart": abs(source_handle_start),
|
||||
"handleEnd": source_out_h - source_out
|
||||
})
|
||||
self.log.debug("versionData: {}".format(instance.data["versionData"]))
|
||||
self.log.debug("sourceIn: {}".format(instance.data["sourceIn"]))
|
||||
self.log.debug("sourceOut: {}".format(instance.data["sourceOut"]))
|
||||
self.log.debug("speed: {}".format(instance.data["speed"]))
|
||||
|
||||
# change otio clip data
|
||||
instance.data["otioClip"].source_range = create_otio_time_range(
|
||||
oc_source_in, (source_out - source_in + 1), oc_source_fps)
|
||||
self.log.debug("otioClip: {}".format(instance.data["otioClip"]))
|
||||
|
|
@ -1,25 +0,0 @@
|
|||
import pyblish
|
||||
from openpype.hosts.hiero.api import is_overlapping
|
||||
|
||||
|
||||
class ValidateAudioFile(pyblish.api.InstancePlugin):
|
||||
"""Validate audio subset has avilable audio track clips"""
|
||||
|
||||
order = pyblish.api.ValidatorOrder
|
||||
label = "Validate Audio Tracks"
|
||||
hosts = ["hiero"]
|
||||
families = ["audio"]
|
||||
|
||||
def process(self, instance):
|
||||
clip = instance.data["item"]
|
||||
audio_tracks = instance.context.data["audioTracks"]
|
||||
audio_clip = None
|
||||
|
||||
for a_track in audio_tracks:
|
||||
for item in a_track.items():
|
||||
if is_overlapping(item, clip):
|
||||
audio_clip = item
|
||||
|
||||
assert audio_clip, "Missing relative audio clip for clip {}".format(
|
||||
clip.name()
|
||||
)
|
||||
|
|
@ -1,22 +0,0 @@
|
|||
from pyblish import api
|
||||
|
||||
|
||||
class ValidateHierarchy(api.InstancePlugin):
|
||||
"""Validate clip's hierarchy data.
|
||||
|
||||
"""
|
||||
|
||||
order = api.ValidatorOrder
|
||||
families = ["clip", "shot"]
|
||||
label = "Validate Hierarchy"
|
||||
hosts = ["hiero"]
|
||||
|
||||
def process(self, instance):
|
||||
asset_name = instance.data.get("asset", None)
|
||||
hierarchy = instance.data.get("hierarchy", None)
|
||||
parents = instance.data.get("parents", None)
|
||||
|
||||
assert hierarchy, "Hierarchy Tag has to be set \
|
||||
and added to clip `{}`".format(asset_name)
|
||||
assert parents, "Parents build from Hierarchy Tag has \
|
||||
to be set and added to clip `{}`".format(asset_name)
|
||||
|
|
@ -1,31 +0,0 @@
|
|||
from pyblish import api
|
||||
|
||||
|
||||
class ValidateNames(api.InstancePlugin):
|
||||
"""Validate sequence, video track and track item names.
|
||||
|
||||
When creating output directories with the name of an item, ending with a
|
||||
whitespace will fail the extraction.
|
||||
Exact matching to optimize processing.
|
||||
"""
|
||||
|
||||
order = api.ValidatorOrder
|
||||
families = ["clip"]
|
||||
match = api.Exact
|
||||
label = "Names"
|
||||
hosts = ["hiero"]
|
||||
|
||||
def process(self, instance):
|
||||
|
||||
item = instance.data["item"]
|
||||
|
||||
msg = "Track item \"{0}\" ends with a whitespace."
|
||||
assert not item.name().endswith(" "), msg.format(item.name())
|
||||
|
||||
msg = "Video track \"{0}\" ends with a whitespace."
|
||||
msg = msg.format(item.parent().name())
|
||||
assert not item.parent().name().endswith(" "), msg
|
||||
|
||||
msg = "Sequence \"{0}\" ends with a whitespace."
|
||||
msg = msg.format(item.parent().parent().name())
|
||||
assert not item.parent().parent().name().endswith(" "), msg
|
||||
|
|
@ -43,6 +43,7 @@ import os
|
|||
from abc import ABCMeta, abstractmethod
|
||||
|
||||
import six
|
||||
import attr
|
||||
|
||||
import openpype.hosts.maya.api.lib as lib
|
||||
|
||||
|
|
@ -88,6 +89,22 @@ IMAGE_PREFIXES = {
|
|||
}
|
||||
|
||||
|
||||
@attr.s
|
||||
class LayerMetadata(object):
|
||||
"""Data class for Render Layer metadata."""
|
||||
frameStart = attr.ib()
|
||||
frameEnd = attr.ib()
|
||||
cameras = attr.ib()
|
||||
sceneName = attr.ib()
|
||||
layerName = attr.ib()
|
||||
renderer = attr.ib()
|
||||
defaultExt = attr.ib()
|
||||
filePrefix = attr.ib()
|
||||
enabledAOVs = attr.ib()
|
||||
frameStep = attr.ib(default=1)
|
||||
padding = attr.ib(default=4)
|
||||
|
||||
|
||||
class ExpectedFiles:
|
||||
"""Class grouping functionality for all supported renderers.
|
||||
|
||||
|
|
@ -95,7 +112,6 @@ class ExpectedFiles:
|
|||
multipart (bool): Flag if multipart exrs are used.
|
||||
|
||||
"""
|
||||
|
||||
multipart = False
|
||||
|
||||
def __init__(self, render_instance):
|
||||
|
|
@ -142,6 +158,7 @@ class ExpectedFiles:
|
|||
)
|
||||
|
||||
def _get_files(self, renderer):
|
||||
# type: (AExpectedFiles) -> list
|
||||
files = renderer.get_files()
|
||||
self.multipart = renderer.multipart
|
||||
return files
|
||||
|
|
@ -193,7 +210,7 @@ class AExpectedFiles:
|
|||
def get_renderer_prefix(self):
|
||||
"""Return prefix for specific renderer.
|
||||
|
||||
This is for most renderers the same and can be overriden if needed.
|
||||
This is for most renderers the same and can be overridden if needed.
|
||||
|
||||
Returns:
|
||||
str: String with image prefix containing tokens
|
||||
|
|
@ -214,6 +231,7 @@ class AExpectedFiles:
|
|||
return file_prefix
|
||||
|
||||
def _get_layer_data(self):
|
||||
# type: () -> LayerMetadata
|
||||
# ______________________________________________
|
||||
# ____________________/ ____________________________________________/
|
||||
# 1 - get scene name /__________________/
|
||||
|
|
@ -230,30 +248,31 @@ class AExpectedFiles:
|
|||
if self.layer.startswith("rs_"):
|
||||
layer_name = self.layer[3:]
|
||||
|
||||
return {
|
||||
"frameStart": int(self.get_render_attribute("startFrame")),
|
||||
"frameEnd": int(self.get_render_attribute("endFrame")),
|
||||
"frameStep": int(self.get_render_attribute("byFrameStep")),
|
||||
"padding": int(self.get_render_attribute("extensionPadding")),
|
||||
return LayerMetadata(
|
||||
frameStart=int(self.get_render_attribute("startFrame")),
|
||||
frameEnd=int(self.get_render_attribute("endFrame")),
|
||||
frameStep=int(self.get_render_attribute("byFrameStep")),
|
||||
padding=int(self.get_render_attribute("extensionPadding")),
|
||||
# if we have <camera> token in prefix path we'll expect output for
|
||||
# every renderable camera in layer.
|
||||
"cameras": self.get_renderable_cameras(),
|
||||
"sceneName": scene_name,
|
||||
"layerName": layer_name,
|
||||
"renderer": self.renderer,
|
||||
"defaultExt": cmds.getAttr("defaultRenderGlobals.imfPluginKey"),
|
||||
"filePrefix": file_prefix,
|
||||
"enabledAOVs": self.get_aovs(),
|
||||
}
|
||||
cameras=self.get_renderable_cameras(),
|
||||
sceneName=scene_name,
|
||||
layerName=layer_name,
|
||||
renderer=self.renderer,
|
||||
defaultExt=cmds.getAttr("defaultRenderGlobals.imfPluginKey"),
|
||||
filePrefix=file_prefix,
|
||||
enabledAOVs=self.get_aovs()
|
||||
)
|
||||
|
||||
def _generate_single_file_sequence(
|
||||
self, layer_data, force_aov_name=None):
|
||||
# type: (LayerMetadata, str) -> list
|
||||
expected_files = []
|
||||
for cam in layer_data["cameras"]:
|
||||
file_prefix = layer_data["filePrefix"]
|
||||
for cam in layer_data.cameras:
|
||||
file_prefix = layer_data.filePrefix
|
||||
mappings = (
|
||||
(R_SUBSTITUTE_SCENE_TOKEN, layer_data["sceneName"]),
|
||||
(R_SUBSTITUTE_LAYER_TOKEN, layer_data["layerName"]),
|
||||
(R_SUBSTITUTE_SCENE_TOKEN, layer_data.sceneName),
|
||||
(R_SUBSTITUTE_LAYER_TOKEN, layer_data.layerName),
|
||||
(R_SUBSTITUTE_CAMERA_TOKEN, self.sanitize_camera_name(cam)),
|
||||
# this is required to remove unfilled aov token, for example
|
||||
# in Redshift
|
||||
|
|
@ -268,29 +287,30 @@ class AExpectedFiles:
|
|||
file_prefix = re.sub(regex, value, file_prefix)
|
||||
|
||||
for frame in range(
|
||||
int(layer_data["frameStart"]),
|
||||
int(layer_data["frameEnd"]) + 1,
|
||||
int(layer_data["frameStep"]),
|
||||
int(layer_data.frameStart),
|
||||
int(layer_data.frameEnd) + 1,
|
||||
int(layer_data.frameStep),
|
||||
):
|
||||
expected_files.append(
|
||||
"{}.{}.{}".format(
|
||||
file_prefix,
|
||||
str(frame).rjust(layer_data["padding"], "0"),
|
||||
layer_data["defaultExt"],
|
||||
str(frame).rjust(layer_data.padding, "0"),
|
||||
layer_data.defaultExt,
|
||||
)
|
||||
)
|
||||
return expected_files
|
||||
|
||||
def _generate_aov_file_sequences(self, layer_data):
|
||||
# type: (LayerMetadata) -> list
|
||||
expected_files = []
|
||||
aov_file_list = {}
|
||||
for aov in layer_data["enabledAOVs"]:
|
||||
for cam in layer_data["cameras"]:
|
||||
file_prefix = layer_data["filePrefix"]
|
||||
for aov in layer_data.enabledAOVs:
|
||||
for cam in layer_data.cameras:
|
||||
file_prefix = layer_data.filePrefix
|
||||
|
||||
mappings = (
|
||||
(R_SUBSTITUTE_SCENE_TOKEN, layer_data["sceneName"]),
|
||||
(R_SUBSTITUTE_LAYER_TOKEN, layer_data["layerName"]),
|
||||
(R_SUBSTITUTE_SCENE_TOKEN, layer_data.sceneName),
|
||||
(R_SUBSTITUTE_LAYER_TOKEN, layer_data.layerName),
|
||||
(R_SUBSTITUTE_CAMERA_TOKEN,
|
||||
self.sanitize_camera_name(cam)),
|
||||
(R_SUBSTITUTE_AOV_TOKEN, aov[0]),
|
||||
|
|
@ -303,14 +323,14 @@ class AExpectedFiles:
|
|||
|
||||
aov_files = []
|
||||
for frame in range(
|
||||
int(layer_data["frameStart"]),
|
||||
int(layer_data["frameEnd"]) + 1,
|
||||
int(layer_data["frameStep"]),
|
||||
int(layer_data.frameStart),
|
||||
int(layer_data.frameEnd) + 1,
|
||||
int(layer_data.frameStep),
|
||||
):
|
||||
aov_files.append(
|
||||
"{}.{}.{}".format(
|
||||
file_prefix,
|
||||
str(frame).rjust(layer_data["padding"], "0"),
|
||||
str(frame).rjust(layer_data.padding, "0"),
|
||||
aov[1],
|
||||
)
|
||||
)
|
||||
|
|
@ -318,12 +338,12 @@ class AExpectedFiles:
|
|||
# if we have more then one renderable camera, append
|
||||
# camera name to AOV to allow per camera AOVs.
|
||||
aov_name = aov[0]
|
||||
if len(layer_data["cameras"]) > 1:
|
||||
if len(layer_data.cameras) > 1:
|
||||
aov_name = "{}_{}".format(aov[0],
|
||||
self.sanitize_camera_name(cam))
|
||||
|
||||
aov_file_list[aov_name] = aov_files
|
||||
file_prefix = layer_data["filePrefix"]
|
||||
file_prefix = layer_data.filePrefix
|
||||
|
||||
expected_files.append(aov_file_list)
|
||||
return expected_files
|
||||
|
|
@ -340,14 +360,13 @@ class AExpectedFiles:
|
|||
layer_data = self._get_layer_data()
|
||||
|
||||
expected_files = []
|
||||
if layer_data.get("enabledAOVs"):
|
||||
expected_files = self._generate_aov_file_sequences(layer_data)
|
||||
if layer_data.enabledAOVs:
|
||||
return self._generate_aov_file_sequences(layer_data)
|
||||
else:
|
||||
expected_files = self._generate_single_file_sequence(layer_data)
|
||||
|
||||
return expected_files
|
||||
return self._generate_single_file_sequence(layer_data)
|
||||
|
||||
def get_renderable_cameras(self):
|
||||
# type: () -> list
|
||||
"""Get all renderable cameras.
|
||||
|
||||
Returns:
|
||||
|
|
@ -358,12 +377,11 @@ class AExpectedFiles:
|
|||
cmds.listRelatives(x, ap=True)[-1] for x in cmds.ls(cameras=True)
|
||||
]
|
||||
|
||||
renderable_cameras = []
|
||||
for cam in cam_parents:
|
||||
if self.maya_is_true(cmds.getAttr("{}.renderable".format(cam))):
|
||||
renderable_cameras.append(cam)
|
||||
|
||||
return renderable_cameras
|
||||
return [
|
||||
cam
|
||||
for cam in cam_parents
|
||||
if self.maya_is_true(cmds.getAttr("{}.renderable".format(cam)))
|
||||
]
|
||||
|
||||
@staticmethod
|
||||
def maya_is_true(attr_val):
|
||||
|
|
@ -388,18 +406,17 @@ class AExpectedFiles:
|
|||
return bool(attr_val)
|
||||
|
||||
@staticmethod
|
||||
def get_layer_overrides(attr):
|
||||
"""Get overrides for attribute on given render layer.
|
||||
def get_layer_overrides(attribute):
|
||||
"""Get overrides for attribute on current render layer.
|
||||
|
||||
Args:
|
||||
attr (str): Maya attribute name.
|
||||
layer (str): Maya render layer name.
|
||||
attribute (str): Maya attribute name.
|
||||
|
||||
Returns:
|
||||
Value of attribute override.
|
||||
|
||||
"""
|
||||
connections = cmds.listConnections(attr, plugs=True)
|
||||
connections = cmds.listConnections(attribute, plugs=True)
|
||||
if connections:
|
||||
for connection in connections:
|
||||
if connection:
|
||||
|
|
@ -410,18 +427,18 @@ class AExpectedFiles:
|
|||
)
|
||||
yield cmds.getAttr(attr_name)
|
||||
|
||||
def get_render_attribute(self, attr):
|
||||
def get_render_attribute(self, attribute):
|
||||
"""Get attribute from render options.
|
||||
|
||||
Args:
|
||||
attr (str): name of attribute to be looked up.
|
||||
attribute (str): name of attribute to be looked up.
|
||||
|
||||
Returns:
|
||||
Attribute value
|
||||
|
||||
"""
|
||||
return lib.get_attr_in_layer(
|
||||
"defaultRenderGlobals.{}".format(attr), layer=self.layer
|
||||
"defaultRenderGlobals.{}".format(attribute), layer=self.layer
|
||||
)
|
||||
|
||||
|
||||
|
|
@ -543,13 +560,14 @@ class ExpectedFilesVray(AExpectedFiles):
|
|||
return prefix
|
||||
|
||||
def _get_layer_data(self):
|
||||
# type: () -> LayerMetadata
|
||||
"""Override to get vray specific extension."""
|
||||
layer_data = super(ExpectedFilesVray, self)._get_layer_data()
|
||||
default_ext = cmds.getAttr("vraySettings.imageFormatStr")
|
||||
if default_ext in ["exr (multichannel)", "exr (deep)"]:
|
||||
default_ext = "exr"
|
||||
layer_data["defaultExt"] = default_ext
|
||||
layer_data["padding"] = cmds.getAttr("vraySettings.fileNamePadding")
|
||||
layer_data.defaultExt = default_ext
|
||||
layer_data.padding = cmds.getAttr("vraySettings.fileNamePadding")
|
||||
return layer_data
|
||||
|
||||
def get_files(self):
|
||||
|
|
@ -565,7 +583,7 @@ class ExpectedFilesVray(AExpectedFiles):
|
|||
layer_data = self._get_layer_data()
|
||||
# remove 'beauty' from filenames as vray doesn't output it
|
||||
update = {}
|
||||
if layer_data.get("enabledAOVs"):
|
||||
if layer_data.enabledAOVs:
|
||||
for aov, seqs in expected_files[0].items():
|
||||
if aov.startswith("beauty"):
|
||||
new_list = []
|
||||
|
|
@ -653,13 +671,14 @@ class ExpectedFilesVray(AExpectedFiles):
|
|||
vray_name = None
|
||||
vray_explicit_name = None
|
||||
vray_file_name = None
|
||||
for attr in cmds.listAttr(node):
|
||||
if attr.startswith("vray_filename"):
|
||||
vray_file_name = cmds.getAttr("{}.{}".format(node, attr))
|
||||
elif attr.startswith("vray_name"):
|
||||
vray_name = cmds.getAttr("{}.{}".format(node, attr))
|
||||
elif attr.startswith("vray_explicit_name"):
|
||||
vray_explicit_name = cmds.getAttr("{}.{}".format(node, attr))
|
||||
for node_attr in cmds.listAttr(node):
|
||||
if node_attr.startswith("vray_filename"):
|
||||
vray_file_name = cmds.getAttr("{}.{}".format(node, node_attr))
|
||||
elif node_attr.startswith("vray_name"):
|
||||
vray_name = cmds.getAttr("{}.{}".format(node, node_attr))
|
||||
elif node_attr.startswith("vray_explicit_name"):
|
||||
vray_explicit_name = cmds.getAttr(
|
||||
"{}.{}".format(node, node_attr))
|
||||
|
||||
if vray_file_name is not None and vray_file_name != "":
|
||||
final_name = vray_file_name
|
||||
|
|
@ -725,7 +744,7 @@ class ExpectedFilesRedshift(AExpectedFiles):
|
|||
# Redshift doesn't merge Cryptomatte AOV to final exr. We need to check
|
||||
# for such condition and add it to list of expected files.
|
||||
|
||||
for aov in layer_data.get("enabledAOVs"):
|
||||
for aov in layer_data.enabledAOVs:
|
||||
if aov[0].lower() == "cryptomatte":
|
||||
aov_name = aov[0]
|
||||
expected_files.append(
|
||||
|
|
|
|||
|
|
@ -2124,7 +2124,7 @@ def bake_to_world_space(nodes,
|
|||
return world_space_nodes
|
||||
|
||||
|
||||
def load_capture_preset(path=None, data=None):
|
||||
def load_capture_preset(data=None):
|
||||
import capture
|
||||
|
||||
preset = data
|
||||
|
|
@ -2139,11 +2139,7 @@ def load_capture_preset(path=None, data=None):
|
|||
# GENERIC
|
||||
id = 'Generic'
|
||||
for key in preset[id]:
|
||||
if key.startswith('isolate'):
|
||||
pass
|
||||
# options['isolate'] = preset[id][key]
|
||||
else:
|
||||
options[str(key)] = preset[id][key]
|
||||
options[str(key)] = preset[id][key]
|
||||
|
||||
# RESOLUTION
|
||||
id = 'Resolution'
|
||||
|
|
@ -2156,6 +2152,10 @@ def load_capture_preset(path=None, data=None):
|
|||
for key in preset['Display Options']:
|
||||
if key.startswith('background'):
|
||||
disp_options[key] = preset['Display Options'][key]
|
||||
disp_options[key][0] = (float(disp_options[key][0])/255)
|
||||
disp_options[key][1] = (float(disp_options[key][1])/255)
|
||||
disp_options[key][2] = (float(disp_options[key][2])/255)
|
||||
disp_options[key].pop()
|
||||
else:
|
||||
disp_options['displayGradient'] = True
|
||||
|
||||
|
|
@ -2220,16 +2220,6 @@ def load_capture_preset(path=None, data=None):
|
|||
# use active sound track
|
||||
scene = capture.parse_active_scene()
|
||||
options['sound'] = scene['sound']
|
||||
cam_options = dict()
|
||||
cam_options['overscan'] = 1.0
|
||||
cam_options['displayFieldChart'] = False
|
||||
cam_options['displayFilmGate'] = False
|
||||
cam_options['displayFilmOrigin'] = False
|
||||
cam_options['displayFilmPivot'] = False
|
||||
cam_options['displayGateMask'] = False
|
||||
cam_options['displayResolution'] = False
|
||||
cam_options['displaySafeAction'] = False
|
||||
cam_options['displaySafeTitle'] = False
|
||||
|
||||
# options['display_options'] = temp_options
|
||||
|
||||
|
|
|
|||
|
|
@ -81,7 +81,10 @@ class AssProxyLoader(openpype.hosts.maya.api.plugin.ReferenceLoader):
|
|||
if c is not None:
|
||||
cmds.setAttr(groupName + ".useOutlinerColor", 1)
|
||||
cmds.setAttr(groupName + ".outlinerColor",
|
||||
c[0], c[1], c[2])
|
||||
(float(c[0])/255),
|
||||
(float(c[1])/255),
|
||||
(float(c[2])/255)
|
||||
)
|
||||
|
||||
self[:] = nodes
|
||||
|
||||
|
|
|
|||
|
|
@ -38,7 +38,10 @@ class GpuCacheLoader(api.Loader):
|
|||
if c is not None:
|
||||
cmds.setAttr(root + ".useOutlinerColor", 1)
|
||||
cmds.setAttr(root + ".outlinerColor",
|
||||
c[0], c[1], c[2])
|
||||
(float(c[0])/255),
|
||||
(float(c[1])/255),
|
||||
(float(c[2])/255)
|
||||
)
|
||||
|
||||
# Create transform with shape
|
||||
transform_name = label + "_GPU"
|
||||
|
|
|
|||
|
|
@ -85,7 +85,11 @@ class ReferenceLoader(openpype.hosts.maya.api.plugin.ReferenceLoader):
|
|||
c = colors.get(family)
|
||||
if c is not None:
|
||||
groupNode.useOutlinerColor.set(1)
|
||||
groupNode.outlinerColor.set(c[0], c[1], c[2])
|
||||
groupNode.outlinerColor.set(
|
||||
(float(c[0])/255),
|
||||
(float(c[1])/255),
|
||||
(float(c[2])/255)
|
||||
)
|
||||
|
||||
self[:] = newNodes
|
||||
|
||||
|
|
|
|||
|
|
@ -62,7 +62,10 @@ class LoadVDBtoRedShift(api.Loader):
|
|||
if c is not None:
|
||||
cmds.setAttr(root + ".useOutlinerColor", 1)
|
||||
cmds.setAttr(root + ".outlinerColor",
|
||||
c[0], c[1], c[2])
|
||||
(float(c[0])/255),
|
||||
(float(c[1])/255),
|
||||
(float(c[2])/255)
|
||||
)
|
||||
|
||||
# Create VR
|
||||
volume_node = cmds.createNode("RedshiftVolumeShape",
|
||||
|
|
|
|||
|
|
@ -55,7 +55,10 @@ class LoadVDBtoVRay(api.Loader):
|
|||
if c is not None:
|
||||
cmds.setAttr(root + ".useOutlinerColor", 1)
|
||||
cmds.setAttr(root + ".outlinerColor",
|
||||
c[0], c[1], c[2])
|
||||
(float(c[0])/255),
|
||||
(float(c[1])/255),
|
||||
(float(c[2])/255)
|
||||
)
|
||||
|
||||
# Create VR
|
||||
grid_node = cmds.createNode("VRayVolumeGrid",
|
||||
|
|
|
|||
|
|
@ -74,7 +74,10 @@ class VRayProxyLoader(api.Loader):
|
|||
if c is not None:
|
||||
cmds.setAttr("{0}.useOutlinerColor".format(group_node), 1)
|
||||
cmds.setAttr("{0}.outlinerColor".format(group_node),
|
||||
c[0], c[1], c[2])
|
||||
(float(c[0])/255),
|
||||
(float(c[1])/255),
|
||||
(float(c[2])/255)
|
||||
)
|
||||
|
||||
return containerise(
|
||||
name=name,
|
||||
|
|
|
|||
|
|
@ -53,7 +53,10 @@ class VRaySceneLoader(api.Loader):
|
|||
if c is not None:
|
||||
cmds.setAttr("{0}.useOutlinerColor".format(group_node), 1)
|
||||
cmds.setAttr("{0}.outlinerColor".format(group_node),
|
||||
c[0], c[1], c[2])
|
||||
(float(c[0])/255),
|
||||
(float(c[1])/255),
|
||||
(float(c[2])/255)
|
||||
)
|
||||
|
||||
return containerise(
|
||||
name=name,
|
||||
|
|
|
|||
|
|
@ -66,7 +66,10 @@ class YetiCacheLoader(api.Loader):
|
|||
if c is not None:
|
||||
cmds.setAttr(group_name + ".useOutlinerColor", 1)
|
||||
cmds.setAttr(group_name + ".outlinerColor",
|
||||
c[0], c[1], c[2])
|
||||
(float(c[0])/255),
|
||||
(float(c[1])/255),
|
||||
(float(c[2])/255)
|
||||
)
|
||||
|
||||
nodes.append(group_node)
|
||||
|
||||
|
|
|
|||
|
|
@ -84,7 +84,10 @@ class YetiRigLoader(openpype.hosts.maya.api.plugin.ReferenceLoader):
|
|||
if c is not None:
|
||||
cmds.setAttr(groupName + ".useOutlinerColor", 1)
|
||||
cmds.setAttr(groupName + ".outlinerColor",
|
||||
c[0], c[1], c[2])
|
||||
(float(c[0])/255),
|
||||
(float(c[1])/255),
|
||||
(float(c[2])/255)
|
||||
)
|
||||
self[:] = nodes
|
||||
|
||||
return nodes
|
||||
|
|
|
|||
|
|
@ -49,9 +49,6 @@ class ExtractPlayblast(openpype.api.Extractor):
|
|||
|
||||
|
||||
preset['camera'] = camera
|
||||
preset['format'] = "image"
|
||||
preset['quality'] = 95
|
||||
preset['compression'] = "png"
|
||||
preset['start_frame'] = start
|
||||
preset['end_frame'] = end
|
||||
camera_option = preset.get("camera_option", {})
|
||||
|
|
@ -75,7 +72,7 @@ class ExtractPlayblast(openpype.api.Extractor):
|
|||
|
||||
# Isolate view is requested by having objects in the set besides a
|
||||
# camera.
|
||||
if instance.data.get("isolate"):
|
||||
if preset.pop("isolate_view", False) or instance.data.get("isolate"):
|
||||
preset["isolate"] = instance.data["setMembers"]
|
||||
|
||||
# Show/Hide image planes on request.
|
||||
|
|
@ -93,9 +90,6 @@ class ExtractPlayblast(openpype.api.Extractor):
|
|||
# playblast and viewer
|
||||
preset['viewer'] = False
|
||||
|
||||
# Remove panel key since it's internal value to capture_gui
|
||||
preset.pop("panel", None)
|
||||
|
||||
self.log.info('using viewport preset: {}'.format(preset))
|
||||
|
||||
path = capture.capture(**preset)
|
||||
|
|
|
|||
|
|
@ -12,10 +12,10 @@ import pymel.core as pm
|
|||
|
||||
|
||||
class ExtractThumbnail(openpype.api.Extractor):
|
||||
"""Extract a Camera as Alembic.
|
||||
"""Extract viewport thumbnail.
|
||||
|
||||
The cameras gets baked to world space by default. Only when the instance's
|
||||
`bakeToWorldSpace` is set to False it will include its full hierarchy.
|
||||
Takes review camera and creates a thumbnail based on viewport
|
||||
capture.
|
||||
|
||||
"""
|
||||
|
||||
|
|
@ -35,17 +35,14 @@ class ExtractThumbnail(openpype.api.Extractor):
|
|||
|
||||
try:
|
||||
preset = lib.load_capture_preset(data=capture_preset)
|
||||
except:
|
||||
except KeyError as ke:
|
||||
self.log.error('Error loading capture presets: {}'.format(str(ke)))
|
||||
preset = {}
|
||||
self.log.info('using viewport preset: {}'.format(capture_preset))
|
||||
self.log.info('Using viewport preset: {}'.format(preset))
|
||||
|
||||
# preset["off_screen"] = False
|
||||
|
||||
preset['camera'] = camera
|
||||
preset['format'] = "image"
|
||||
# preset['compression'] = "qt"
|
||||
preset['quality'] = 50
|
||||
preset['compression'] = "jpg"
|
||||
preset['start_frame'] = instance.data["frameStart"]
|
||||
preset['end_frame'] = instance.data["frameStart"]
|
||||
preset['camera_options'] = {
|
||||
|
|
@ -78,7 +75,7 @@ class ExtractThumbnail(openpype.api.Extractor):
|
|||
|
||||
# Isolate view is requested by having objects in the set besides a
|
||||
# camera.
|
||||
if instance.data.get("isolate"):
|
||||
if preset.pop("isolate_view", False) or instance.data.get("isolate"):
|
||||
preset["isolate"] = instance.data["setMembers"]
|
||||
|
||||
with maintained_time():
|
||||
|
|
@ -89,9 +86,6 @@ class ExtractThumbnail(openpype.api.Extractor):
|
|||
# playblast and viewer
|
||||
preset['viewer'] = False
|
||||
|
||||
# Remove panel key since it's internal value to capture_gui
|
||||
preset.pop("panel", None)
|
||||
|
||||
path = capture.capture(**preset)
|
||||
playblast = self._fix_playblast_output_path(path)
|
||||
|
||||
|
|
|
|||
|
|
@ -243,7 +243,7 @@ class ValidateRenderSettings(pyblish.api.InstancePlugin):
|
|||
"Cannot get value of {}.{}".format(
|
||||
node, attribute_name))
|
||||
else:
|
||||
if value != render_value:
|
||||
if str(value) != str(render_value):
|
||||
invalid = True
|
||||
cls.log.error(
|
||||
("Invalid value {} set on {}.{}. "
|
||||
|
|
|
|||
|
|
@ -80,7 +80,7 @@ def install():
|
|||
# Set context settings.
|
||||
nuke.addOnCreate(workfile_settings.set_context_settings, nodeClass="Root")
|
||||
nuke.addOnCreate(workfile_settings.set_favorites, nodeClass="Root")
|
||||
nuke.addOnCreate(lib.open_last_workfile, nodeClass="Root")
|
||||
nuke.addOnCreate(lib.process_workfile_builder, nodeClass="Root")
|
||||
nuke.addOnCreate(lib.launch_workfiles_app, nodeClass="Root")
|
||||
menu.install()
|
||||
|
||||
|
|
|
|||
|
|
@ -16,6 +16,7 @@ from avalon.nuke import (
|
|||
from openpype.api import (
|
||||
Logger,
|
||||
Anatomy,
|
||||
BuildWorkfile,
|
||||
get_version_from_path,
|
||||
get_anatomy_settings,
|
||||
get_hierarchy,
|
||||
|
|
@ -1641,23 +1642,69 @@ def launch_workfiles_app():
|
|||
workfiles.show(os.environ["AVALON_WORKDIR"])
|
||||
|
||||
|
||||
def open_last_workfile():
|
||||
# get state from settings
|
||||
open_last_version = get_current_project_settings()["nuke"].get(
|
||||
"general", {}).get("create_initial_workfile")
|
||||
def process_workfile_builder():
|
||||
from openpype.lib import (
|
||||
env_value_to_bool,
|
||||
get_custom_workfile_template
|
||||
)
|
||||
|
||||
# get state from settings
|
||||
workfile_builder = get_current_project_settings()["nuke"].get(
|
||||
"workfile_builder", {})
|
||||
|
||||
# get all imortant settings
|
||||
openlv_on = env_value_to_bool(
|
||||
env_key="AVALON_OPEN_LAST_WORKFILE",
|
||||
default=None)
|
||||
|
||||
# get settings
|
||||
createfv_on = workfile_builder.get("create_first_version") or None
|
||||
custom_templates = workfile_builder.get("custom_templates") or None
|
||||
builder_on = workfile_builder.get("builder_on_start") or None
|
||||
|
||||
log.info("Opening last workfile...")
|
||||
last_workfile_path = os.environ.get("AVALON_LAST_WORKFILE")
|
||||
|
||||
if not os.path.exists(last_workfile_path):
|
||||
# return if none is defined
|
||||
if not open_last_version:
|
||||
return
|
||||
# generate first version in file not existing and feature is enabled
|
||||
if createfv_on and not os.path.exists(last_workfile_path):
|
||||
# get custom template path if any
|
||||
custom_template_path = get_custom_workfile_template(
|
||||
custom_templates
|
||||
)
|
||||
|
||||
# if custom template is defined
|
||||
if custom_template_path:
|
||||
log.info("Adding nodes from `{}`...".format(
|
||||
custom_template_path
|
||||
))
|
||||
try:
|
||||
# import nodes into current script
|
||||
nuke.nodePaste(custom_template_path)
|
||||
except RuntimeError:
|
||||
raise RuntimeError((
|
||||
"Template defined for project: {} is not working. "
|
||||
"Talk to your manager for an advise").format(
|
||||
custom_template_path))
|
||||
|
||||
# if builder at start is defined
|
||||
if builder_on:
|
||||
log.info("Building nodes from presets...")
|
||||
# build nodes by defined presets
|
||||
BuildWorkfile().process()
|
||||
|
||||
log.info("Saving script as version `{}`...".format(
|
||||
last_workfile_path
|
||||
))
|
||||
# safe file as version
|
||||
save_file(last_workfile_path)
|
||||
else:
|
||||
# to avoid looping of the callback, remove it!
|
||||
nuke.removeOnCreate(open_last_workfile, nodeClass="Root")
|
||||
return
|
||||
|
||||
# open workfile
|
||||
open_file(last_workfile_path)
|
||||
# skip opening of last version if it is not enabled
|
||||
if not openlv_on or not os.path.exists(last_workfile_path):
|
||||
return
|
||||
|
||||
# to avoid looping of the callback, remove it!
|
||||
nuke.removeOnCreate(process_workfile_builder, nodeClass="Root")
|
||||
|
||||
log.info("Opening last workfile...")
|
||||
# open workfile
|
||||
open_file(last_workfile_path)
|
||||
|
|
|
|||
|
|
@ -41,7 +41,7 @@ class LoadMov(api.Loader):
|
|||
icon = "code-fork"
|
||||
color = "orange"
|
||||
|
||||
script_start = nuke.root()["first_frame"].value()
|
||||
first_frame = nuke.root()["first_frame"].value()
|
||||
|
||||
# options gui
|
||||
defaults = {
|
||||
|
|
@ -71,6 +71,9 @@ class LoadMov(api.Loader):
|
|||
version_data = version.get("data", {})
|
||||
repr_id = context["representation"]["_id"]
|
||||
|
||||
self.handle_start = version_data.get("handleStart", 0)
|
||||
self.handle_end = version_data.get("handleEnd", 0)
|
||||
|
||||
orig_first = version_data.get("frameStart")
|
||||
orig_last = version_data.get("frameEnd")
|
||||
diff = orig_first - 1
|
||||
|
|
@ -78,9 +81,6 @@ class LoadMov(api.Loader):
|
|||
first = orig_first - diff
|
||||
last = orig_last - diff
|
||||
|
||||
handle_start = version_data.get("handleStart", 0)
|
||||
handle_end = version_data.get("handleEnd", 0)
|
||||
|
||||
colorspace = version_data.get("colorspace")
|
||||
repr_cont = context["representation"]["context"]
|
||||
|
||||
|
|
@ -89,7 +89,7 @@ class LoadMov(api.Loader):
|
|||
|
||||
context["representation"]["_id"]
|
||||
# create handles offset (only to last, because of mov)
|
||||
last += handle_start + handle_end
|
||||
last += self.handle_start + self.handle_end
|
||||
|
||||
# Fallback to asset name when namespace is None
|
||||
if namespace is None:
|
||||
|
|
@ -133,10 +133,11 @@ class LoadMov(api.Loader):
|
|||
|
||||
if start_at_workfile:
|
||||
# start at workfile start
|
||||
read_node['frame'].setValue(str(self.script_start))
|
||||
read_node['frame'].setValue(str(self.first_frame))
|
||||
else:
|
||||
# start at version frame start
|
||||
read_node['frame'].setValue(str(orig_first - handle_start))
|
||||
read_node['frame'].setValue(
|
||||
str(orig_first - self.handle_start))
|
||||
|
||||
if colorspace:
|
||||
read_node["colorspace"].setValue(str(colorspace))
|
||||
|
|
@ -167,6 +168,11 @@ class LoadMov(api.Loader):
|
|||
|
||||
read_node["tile_color"].setValue(int("0x4ecd25ff", 16))
|
||||
|
||||
if version_data.get("retime", None):
|
||||
speed = version_data.get("speed", 1)
|
||||
time_warp_nodes = version_data.get("timewarps", [])
|
||||
self.make_retimes(speed, time_warp_nodes)
|
||||
|
||||
return containerise(
|
||||
read_node,
|
||||
name=name,
|
||||
|
|
@ -229,9 +235,8 @@ class LoadMov(api.Loader):
|
|||
# set first to 1
|
||||
first = orig_first - diff
|
||||
last = orig_last - diff
|
||||
handles = version_data.get("handles", 0)
|
||||
handle_start = version_data.get("handleStart", 0)
|
||||
handle_end = version_data.get("handleEnd", 0)
|
||||
self.handle_start = version_data.get("handleStart", 0)
|
||||
self.handle_end = version_data.get("handleEnd", 0)
|
||||
colorspace = version_data.get("colorspace")
|
||||
|
||||
if first is None:
|
||||
|
|
@ -242,13 +247,8 @@ class LoadMov(api.Loader):
|
|||
read_node['name'].value(), representation))
|
||||
first = 0
|
||||
|
||||
# fix handle start and end if none are available
|
||||
if not handle_start and not handle_end:
|
||||
handle_start = handles
|
||||
handle_end = handles
|
||||
|
||||
# create handles offset (only to last, because of mov)
|
||||
last += handle_start + handle_end
|
||||
last += self.handle_start + self.handle_end
|
||||
|
||||
read_node["file"].setValue(file)
|
||||
|
||||
|
|
@ -259,12 +259,12 @@ class LoadMov(api.Loader):
|
|||
read_node["last"].setValue(last)
|
||||
read_node['frame_mode'].setValue("start at")
|
||||
|
||||
if int(self.script_start) == int(read_node['frame'].value()):
|
||||
if int(self.first_frame) == int(read_node['frame'].value()):
|
||||
# start at workfile start
|
||||
read_node['frame'].setValue(str(self.script_start))
|
||||
read_node['frame'].setValue(str(self.first_frame))
|
||||
else:
|
||||
# start at version frame start
|
||||
read_node['frame'].setValue(str(orig_first - handle_start))
|
||||
read_node['frame'].setValue(str(orig_first - self.handle_start))
|
||||
|
||||
if colorspace:
|
||||
read_node["colorspace"].setValue(str(colorspace))
|
||||
|
|
@ -282,8 +282,8 @@ class LoadMov(api.Loader):
|
|||
"version": str(version.get("name")),
|
||||
"colorspace": version_data.get("colorspace"),
|
||||
"source": version_data.get("source"),
|
||||
"handleStart": str(handle_start),
|
||||
"handleEnd": str(handle_end),
|
||||
"handleStart": str(self.handle_start),
|
||||
"handleEnd": str(self.handle_end),
|
||||
"fps": str(version_data.get("fps")),
|
||||
"author": version_data.get("author"),
|
||||
"outputDir": version_data.get("outputDir")
|
||||
|
|
@ -295,6 +295,11 @@ class LoadMov(api.Loader):
|
|||
else:
|
||||
read_node["tile_color"].setValue(int("0x4ecd25ff", 16))
|
||||
|
||||
if version_data.get("retime", None):
|
||||
speed = version_data.get("speed", 1)
|
||||
time_warp_nodes = version_data.get("timewarps", [])
|
||||
self.make_retimes(speed, time_warp_nodes)
|
||||
|
||||
# Update the imprinted representation
|
||||
update_container(
|
||||
read_node, updated_dict
|
||||
|
|
@ -310,3 +315,32 @@ class LoadMov(api.Loader):
|
|||
|
||||
with viewer_update_and_undo_stop():
|
||||
nuke.delete(read_node)
|
||||
|
||||
def make_retimes(self, speed, time_warp_nodes):
|
||||
''' Create all retime and timewarping nodes with coppied animation '''
|
||||
if speed != 1:
|
||||
rtn = nuke.createNode(
|
||||
"Retime",
|
||||
"speed {}".format(speed))
|
||||
rtn["before"].setValue("continue")
|
||||
rtn["after"].setValue("continue")
|
||||
rtn["input.first_lock"].setValue(True)
|
||||
rtn["input.first"].setValue(
|
||||
self.first_frame
|
||||
)
|
||||
|
||||
if time_warp_nodes != []:
|
||||
start_anim = self.first_frame + (self.handle_start / speed)
|
||||
for timewarp in time_warp_nodes:
|
||||
twn = nuke.createNode(timewarp["Class"],
|
||||
"name {}".format(timewarp["name"]))
|
||||
if isinstance(timewarp["lookup"], list):
|
||||
# if array for animation
|
||||
twn["lookup"].setAnimated()
|
||||
for i, value in enumerate(timewarp["lookup"]):
|
||||
twn["lookup"].setValueAt(
|
||||
(start_anim + i) + value,
|
||||
(start_anim + i))
|
||||
else:
|
||||
# if static value `int`
|
||||
twn["lookup"].setValue(timewarp["lookup"])
|
||||
|
|
|
|||
|
|
@ -140,7 +140,7 @@ class LoadSequence(api.Loader):
|
|||
if version_data.get("retime", None):
|
||||
speed = version_data.get("speed", 1)
|
||||
time_warp_nodes = version_data.get("timewarps", [])
|
||||
self.make_retimes(read_node, speed, time_warp_nodes)
|
||||
self.make_retimes(speed, time_warp_nodes)
|
||||
|
||||
return containerise(read_node,
|
||||
name=name,
|
||||
|
|
@ -256,7 +256,7 @@ class LoadSequence(api.Loader):
|
|||
if version_data.get("retime", None):
|
||||
speed = version_data.get("speed", 1)
|
||||
time_warp_nodes = version_data.get("timewarps", [])
|
||||
self.make_retimes(read_node, speed, time_warp_nodes)
|
||||
self.make_retimes(speed, time_warp_nodes)
|
||||
|
||||
# Update the imprinted representation
|
||||
update_container(
|
||||
|
|
@ -285,10 +285,11 @@ class LoadSequence(api.Loader):
|
|||
rtn["after"].setValue("continue")
|
||||
rtn["input.first_lock"].setValue(True)
|
||||
rtn["input.first"].setValue(
|
||||
self.handle_start + self.first_frame
|
||||
self.first_frame
|
||||
)
|
||||
|
||||
if time_warp_nodes != []:
|
||||
start_anim = self.first_frame + (self.handle_start / speed)
|
||||
for timewarp in time_warp_nodes:
|
||||
twn = nuke.createNode(timewarp["Class"],
|
||||
"name {}".format(timewarp["name"]))
|
||||
|
|
@ -297,8 +298,8 @@ class LoadSequence(api.Loader):
|
|||
twn["lookup"].setAnimated()
|
||||
for i, value in enumerate(timewarp["lookup"]):
|
||||
twn["lookup"].setValueAt(
|
||||
(self.first_frame + i) + value,
|
||||
(self.first_frame + i))
|
||||
(start_anim + i) + value,
|
||||
(start_anim + i))
|
||||
else:
|
||||
# if static value `int`
|
||||
twn["lookup"].setValue(timewarp["lookup"])
|
||||
|
|
|
|||
|
|
@ -34,20 +34,6 @@ class TvpaintPrelaunchHook(PreLaunchHook):
|
|||
"run", self.launch_script_path(), executable_path
|
||||
)
|
||||
|
||||
# Add workfile to launch arguments
|
||||
workfile_path = self.workfile_path()
|
||||
if workfile_path:
|
||||
new_launch_args.append(workfile_path)
|
||||
|
||||
# How to create new command line
|
||||
# if platform.system().lower() == "windows":
|
||||
# new_launch_args = [
|
||||
# "cmd.exe",
|
||||
# "/c",
|
||||
# "Call cmd.exe /k",
|
||||
# *new_launch_args
|
||||
# ]
|
||||
|
||||
# Append as whole list as these areguments should not be separated
|
||||
self.launch_context.launch_args.append(new_launch_args)
|
||||
|
||||
|
|
@ -64,38 +50,4 @@ class TvpaintPrelaunchHook(PreLaunchHook):
|
|||
"tvpaint",
|
||||
"launch_script.py"
|
||||
)
|
||||
return script_path
|
||||
|
||||
def workfile_path(self):
|
||||
workfile_path = self.data["last_workfile_path"]
|
||||
|
||||
# copy workfile from template if doesnt exist any on path
|
||||
if not os.path.exists(workfile_path):
|
||||
# TODO add ability to set different template workfile path via
|
||||
# settings
|
||||
pype_dir = os.path.dirname(os.path.abspath(tvpaint.__file__))
|
||||
template_path = os.path.join(
|
||||
pype_dir, "resources", "template.tvpp"
|
||||
)
|
||||
|
||||
if not os.path.exists(template_path):
|
||||
self.log.warning(
|
||||
"Couldn't find workfile template file in {}".format(
|
||||
template_path
|
||||
)
|
||||
)
|
||||
return
|
||||
|
||||
self.log.info(
|
||||
f"Creating workfile from template: \"{template_path}\""
|
||||
)
|
||||
|
||||
# Copy template workfile to new destinantion
|
||||
shutil.copy2(
|
||||
os.path.normpath(template_path),
|
||||
os.path.normpath(workfile_path)
|
||||
)
|
||||
|
||||
self.log.info(f"Workfile to open: \"{workfile_path}\"")
|
||||
|
||||
return workfile_path
|
||||
return script_path
|
||||
|
|
@ -1,3 +1,4 @@
|
|||
import os
|
||||
import json
|
||||
import copy
|
||||
import pyblish.api
|
||||
|
|
@ -109,7 +110,7 @@ class CollectInstances(pyblish.api.ContextPlugin):
|
|||
|
||||
return {
|
||||
"family": "review",
|
||||
"asset": context.data["workfile_context"]["asset"],
|
||||
"asset": context.data["asset"],
|
||||
# Dummy subset name
|
||||
"subset": "reviewMain"
|
||||
}
|
||||
|
|
|
|||
43
openpype/hosts/tvpaint/plugins/publish/collect_workfile.py
Normal file
43
openpype/hosts/tvpaint/plugins/publish/collect_workfile.py
Normal file
|
|
@ -0,0 +1,43 @@
|
|||
import os
|
||||
import json
|
||||
import pyblish.api
|
||||
from avalon import io
|
||||
|
||||
|
||||
class CollectWorkfile(pyblish.api.ContextPlugin):
|
||||
label = "Collect Workfile"
|
||||
order = pyblish.api.CollectorOrder - 1
|
||||
hosts = ["tvpaint"]
|
||||
|
||||
def process(self, context):
|
||||
current_file = context.data["currentFile"]
|
||||
|
||||
self.log.info(
|
||||
"Workfile path used for workfile family: {}".format(current_file)
|
||||
)
|
||||
|
||||
dirpath, filename = os.path.split(current_file)
|
||||
basename, ext = os.path.splitext(filename)
|
||||
instance = context.create_instance(name=basename)
|
||||
|
||||
task_name = io.Session["AVALON_TASK"]
|
||||
subset_name = "workfile" + task_name.capitalize()
|
||||
|
||||
# Create Workfile instance
|
||||
instance.data.update({
|
||||
"subset": subset_name,
|
||||
"asset": context.data["asset"],
|
||||
"label": subset_name,
|
||||
"publish": True,
|
||||
"family": "workfile",
|
||||
"families": ["workfile"],
|
||||
"representations": [{
|
||||
"name": ext.lstrip("."),
|
||||
"ext": ext.lstrip("."),
|
||||
"files": filename,
|
||||
"stagingDir": dirpath
|
||||
}]
|
||||
})
|
||||
self.log.info("Collected workfile instance: {}".format(
|
||||
json.dumps(instance.data, indent=4)
|
||||
))
|
||||
|
|
@ -0,0 +1,49 @@
|
|||
import pyblish.api
|
||||
from avalon.tvpaint import save_file
|
||||
|
||||
|
||||
class ValidateWorkfileMetadataRepair(pyblish.api.Action):
|
||||
"""Store current context into workfile metadata."""
|
||||
|
||||
label = "Use current context"
|
||||
icon = "wrench"
|
||||
on = "failed"
|
||||
|
||||
def process(self, context, _plugin):
|
||||
"""Save current workfile which should trigger storing of metadata."""
|
||||
current_file = context.data["currentFile"]
|
||||
# Save file should trigger
|
||||
save_file(current_file)
|
||||
|
||||
|
||||
class ValidateWorkfileMetadata(pyblish.api.ContextPlugin):
|
||||
"""Validate if wokrfile contain required metadata for publising."""
|
||||
|
||||
label = "Validate Workfile Metadata"
|
||||
order = pyblish.api.ValidatorOrder
|
||||
|
||||
families = ["workfile"]
|
||||
|
||||
actions = [ValidateWorkfileMetadataRepair]
|
||||
|
||||
required_keys = {"project", "asset", "task"}
|
||||
|
||||
def process(self, context):
|
||||
workfile_context = context.data["workfile_context"]
|
||||
if not workfile_context:
|
||||
raise AssertionError(
|
||||
"Current workfile is missing whole metadata about context."
|
||||
)
|
||||
|
||||
missing_keys = []
|
||||
for key in self.required_keys:
|
||||
value = workfile_context.get(key)
|
||||
if not value:
|
||||
missing_keys.append(key)
|
||||
|
||||
if missing_keys:
|
||||
raise AssertionError(
|
||||
"Current workfile is missing metadata about {}.".format(
|
||||
", ".join(missing_keys)
|
||||
)
|
||||
)
|
||||
66
openpype/hosts/unreal/plugins/create/create_look.py
Normal file
66
openpype/hosts/unreal/plugins/create/create_look.py
Normal file
|
|
@ -0,0 +1,66 @@
|
|||
import unreal
|
||||
from openpype.hosts.unreal.api.plugin import Creator
|
||||
from avalon.unreal import pipeline
|
||||
|
||||
|
||||
class CreateLook(Creator):
|
||||
"""Shader connections defining shape look"""
|
||||
|
||||
name = "unrealLook"
|
||||
label = "Unreal - Look"
|
||||
family = "look"
|
||||
icon = "paint-brush"
|
||||
|
||||
root = "/Game/Avalon/Assets"
|
||||
suffix = "_INS"
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
super(CreateLook, self).__init__(*args, **kwargs)
|
||||
|
||||
def process(self):
|
||||
name = self.data["subset"]
|
||||
|
||||
selection = []
|
||||
if (self.options or {}).get("useSelection"):
|
||||
sel_objects = unreal.EditorUtilityLibrary.get_selected_assets()
|
||||
selection = [a.get_path_name() for a in sel_objects]
|
||||
|
||||
# Create the folder
|
||||
path = f"{self.root}/{self.data['asset']}"
|
||||
new_name = pipeline.create_folder(path, name)
|
||||
full_path = f"{path}/{new_name}"
|
||||
|
||||
# Create a new cube static mesh
|
||||
ar = unreal.AssetRegistryHelpers.get_asset_registry()
|
||||
cube = ar.get_asset_by_object_path("/Engine/BasicShapes/Cube.Cube")
|
||||
|
||||
# Create the avalon publish instance object
|
||||
container_name = f"{name}{self.suffix}"
|
||||
pipeline.create_publish_instance(
|
||||
instance=container_name, path=full_path)
|
||||
|
||||
# Get the mesh of the selected object
|
||||
original_mesh = ar.get_asset_by_object_path(selection[0]).get_asset()
|
||||
materials = original_mesh.get_editor_property('materials')
|
||||
|
||||
self.data["members"] = []
|
||||
|
||||
# Add the materials to the cube
|
||||
for material in materials:
|
||||
name = material.get_editor_property('material_slot_name')
|
||||
object_path = f"{full_path}/{name}.{name}"
|
||||
object = unreal.EditorAssetLibrary.duplicate_loaded_asset(
|
||||
cube.get_asset(), object_path
|
||||
)
|
||||
|
||||
# Remove the default material of the cube object
|
||||
object.get_editor_property('static_materials').pop()
|
||||
|
||||
object.add_material(
|
||||
material.get_editor_property('material_interface'))
|
||||
|
||||
self.data["members"].append(object_path)
|
||||
|
||||
unreal.EditorAssetLibrary.save_asset(object_path)
|
||||
|
||||
pipeline.imprint(f"{full_path}/{container_name}", self.data)
|
||||
120
openpype/hosts/unreal/plugins/publish/extract_look.py
Normal file
120
openpype/hosts/unreal/plugins/publish/extract_look.py
Normal file
|
|
@ -0,0 +1,120 @@
|
|||
import json
|
||||
import os
|
||||
|
||||
import unreal
|
||||
from unreal import MaterialEditingLibrary as mat_lib
|
||||
|
||||
import openpype.api
|
||||
|
||||
|
||||
class ExtractLook(openpype.api.Extractor):
|
||||
"""Extract look."""
|
||||
|
||||
label = "Extract Look"
|
||||
hosts = ["unreal"]
|
||||
families = ["look"]
|
||||
optional = True
|
||||
|
||||
def process(self, instance):
|
||||
# Define extract output file path
|
||||
stagingdir = self.staging_dir(instance)
|
||||
resources_dir = instance.data["resourcesDir"]
|
||||
|
||||
ar = unreal.AssetRegistryHelpers.get_asset_registry()
|
||||
|
||||
transfers = []
|
||||
|
||||
json_data = []
|
||||
|
||||
for member in instance:
|
||||
asset = ar.get_asset_by_object_path(member)
|
||||
object = asset.get_asset()
|
||||
|
||||
name = asset.get_editor_property('asset_name')
|
||||
|
||||
json_element = {'material': str(name)}
|
||||
|
||||
material_obj = object.get_editor_property('static_materials')[0]
|
||||
material = material_obj.material_interface
|
||||
|
||||
base_color = mat_lib.get_material_property_input_node(
|
||||
material, unreal.MaterialProperty.MP_BASE_COLOR)
|
||||
|
||||
base_color_name = base_color.get_editor_property('parameter_name')
|
||||
|
||||
texture = mat_lib.get_material_default_texture_parameter_value(
|
||||
material, base_color_name)
|
||||
|
||||
if texture:
|
||||
# Export Texture
|
||||
tga_filename = f"{instance.name}_{name}_texture.tga"
|
||||
|
||||
tga_exporter = unreal.TextureExporterTGA()
|
||||
|
||||
tga_export_task = unreal.AssetExportTask()
|
||||
|
||||
tga_export_task.set_editor_property('exporter', tga_exporter)
|
||||
tga_export_task.set_editor_property('automated', True)
|
||||
tga_export_task.set_editor_property('object', texture)
|
||||
tga_export_task.set_editor_property(
|
||||
'filename', f"{stagingdir}/{tga_filename}")
|
||||
tga_export_task.set_editor_property('prompt', False)
|
||||
tga_export_task.set_editor_property('selected', False)
|
||||
|
||||
unreal.Exporter.run_asset_export_task(tga_export_task)
|
||||
|
||||
json_element['tga_filename'] = tga_filename
|
||||
|
||||
transfers.append((
|
||||
f"{stagingdir}/{tga_filename}",
|
||||
f"{resources_dir}/{tga_filename}"))
|
||||
|
||||
fbx_filename = f"{instance.name}_{name}.fbx"
|
||||
|
||||
fbx_exporter = unreal.StaticMeshExporterFBX()
|
||||
fbx_exporter.set_editor_property('text', False)
|
||||
|
||||
options = unreal.FbxExportOption()
|
||||
options.set_editor_property('ascii', False)
|
||||
options.set_editor_property('collision', False)
|
||||
|
||||
task = unreal.AssetExportTask()
|
||||
task.set_editor_property('exporter', fbx_exporter)
|
||||
task.set_editor_property('options', options)
|
||||
task.set_editor_property('automated', True)
|
||||
task.set_editor_property('object', object)
|
||||
task.set_editor_property(
|
||||
'filename', f"{stagingdir}/{fbx_filename}")
|
||||
task.set_editor_property('prompt', False)
|
||||
task.set_editor_property('selected', False)
|
||||
|
||||
unreal.Exporter.run_asset_export_task(task)
|
||||
|
||||
json_element['fbx_filename'] = fbx_filename
|
||||
|
||||
transfers.append((
|
||||
f"{stagingdir}/{fbx_filename}",
|
||||
f"{resources_dir}/{fbx_filename}"))
|
||||
|
||||
json_data.append(json_element)
|
||||
|
||||
json_filename = f"{instance.name}.json"
|
||||
json_path = os.path.join(stagingdir, json_filename)
|
||||
|
||||
with open(json_path, "w+") as file:
|
||||
json.dump(json_data, fp=file, indent=2)
|
||||
|
||||
if "transfers" not in instance.data:
|
||||
instance.data["transfers"] = []
|
||||
if "representations" not in instance.data:
|
||||
instance.data["representations"] = []
|
||||
|
||||
json_representation = {
|
||||
'name': 'json',
|
||||
'ext': 'json',
|
||||
'files': json_filename,
|
||||
"stagingDir": stagingdir,
|
||||
}
|
||||
|
||||
instance.data["representations"].append(json_representation)
|
||||
instance.data["transfers"].extend(transfers)
|
||||
|
|
@ -81,7 +81,13 @@ from .avalon_context import (
|
|||
|
||||
get_creator_by_name,
|
||||
|
||||
change_timer_to_current_context
|
||||
get_custom_workfile_template,
|
||||
|
||||
change_timer_to_current_context,
|
||||
|
||||
get_custom_workfile_template_by_context,
|
||||
get_custom_workfile_template_by_string_context,
|
||||
get_custom_workfile_template
|
||||
)
|
||||
|
||||
from .local_settings import (
|
||||
|
|
@ -91,7 +97,8 @@ from .local_settings import (
|
|||
OpenPypeSettingsRegistry,
|
||||
get_local_site_id,
|
||||
change_openpype_mongo_url,
|
||||
get_openpype_username
|
||||
get_openpype_username,
|
||||
is_admin_password_required
|
||||
)
|
||||
|
||||
from .applications import (
|
||||
|
|
@ -192,6 +199,10 @@ __all__ = [
|
|||
|
||||
"change_timer_to_current_context",
|
||||
|
||||
"get_custom_workfile_template_by_context",
|
||||
"get_custom_workfile_template_by_string_context",
|
||||
"get_custom_workfile_template",
|
||||
|
||||
"IniSettingRegistry",
|
||||
"JSONSettingRegistry",
|
||||
"OpenPypeSecureRegistry",
|
||||
|
|
@ -199,6 +210,7 @@ __all__ = [
|
|||
"get_local_site_id",
|
||||
"change_openpype_mongo_url",
|
||||
"get_openpype_username",
|
||||
"is_admin_password_required",
|
||||
|
||||
"ApplicationLaunchFailed",
|
||||
"ApplictionExecutableNotFound",
|
||||
|
|
|
|||
|
|
@ -3,6 +3,7 @@ import os
|
|||
import json
|
||||
import re
|
||||
import copy
|
||||
import platform
|
||||
import logging
|
||||
import collections
|
||||
import functools
|
||||
|
|
@ -755,18 +756,22 @@ class BuildWorkfile:
|
|||
"""
|
||||
host_name = avalon.api.registered_host().__name__.rsplit(".", 1)[-1]
|
||||
presets = get_project_settings(avalon.io.Session["AVALON_PROJECT"])
|
||||
|
||||
# Get presets for host
|
||||
build_presets = (
|
||||
presets.get(host_name, {})
|
||||
.get("workfile_build")
|
||||
.get("profiles")
|
||||
)
|
||||
if not build_presets:
|
||||
wb_settings = presets.get(host_name, {}).get("workfile_builder")
|
||||
|
||||
if not wb_settings:
|
||||
# backward compatibility
|
||||
wb_settings = presets.get(host_name, {}).get("workfile_build")
|
||||
|
||||
builder_presets = wb_settings.get("profiles")
|
||||
|
||||
if not builder_presets:
|
||||
return
|
||||
|
||||
task_name_low = task_name.lower()
|
||||
per_task_preset = None
|
||||
for preset in build_presets:
|
||||
for preset in builder_presets:
|
||||
preset_tasks = preset.get("tasks") or []
|
||||
preset_tasks_low = [task.lower() for task in preset_tasks]
|
||||
if task_name_low in preset_tasks_low:
|
||||
|
|
@ -1266,3 +1271,201 @@ def change_timer_to_current_context():
|
|||
}
|
||||
|
||||
requests.post(rest_api_url, json=data)
|
||||
|
||||
|
||||
def _get_task_context_data_for_anatomy(
|
||||
project_doc, asset_doc, task_name, anatomy=None
|
||||
):
|
||||
"""Prepare Task context for anatomy data.
|
||||
|
||||
WARNING: this data structure is currently used only in workfile templates.
|
||||
Key "task" is currently in rest of pipeline used as string with task
|
||||
name.
|
||||
|
||||
Args:
|
||||
project_doc (dict): Project document with available "name" and
|
||||
"data.code" keys.
|
||||
asset_doc (dict): Asset document from MongoDB.
|
||||
task_name (str): Name of context task.
|
||||
anatomy (Anatomy): Optionally Anatomy for passed project name can be
|
||||
passed as Anatomy creation may be slow.
|
||||
|
||||
Returns:
|
||||
dict: With Anatomy context data.
|
||||
"""
|
||||
|
||||
if anatomy is None:
|
||||
anatomy = Anatomy(project_doc["name"])
|
||||
|
||||
asset_name = asset_doc["name"]
|
||||
project_task_types = anatomy["tasks"]
|
||||
|
||||
# get relevant task type from asset doc
|
||||
assert task_name in asset_doc["data"]["tasks"], (
|
||||
"Task name \"{}\" not found on asset \"{}\"".format(
|
||||
task_name, asset_name
|
||||
)
|
||||
)
|
||||
|
||||
task_type = asset_doc["data"]["tasks"][task_name].get("type")
|
||||
|
||||
assert task_type, (
|
||||
"Task name \"{}\" on asset \"{}\" does not have specified task type."
|
||||
).format(asset_name, task_name)
|
||||
|
||||
# get short name for task type defined in default anatomy settings
|
||||
project_task_type_data = project_task_types.get(task_type)
|
||||
assert project_task_type_data, (
|
||||
"Something went wrong. Default anatomy tasks are not holding"
|
||||
"requested task type: `{}`".format(task_type)
|
||||
)
|
||||
|
||||
return {
|
||||
"project": {
|
||||
"name": project_doc["name"],
|
||||
"code": project_doc["data"].get("code")
|
||||
},
|
||||
"asset": asset_name,
|
||||
"task": {
|
||||
"name": task_name,
|
||||
"type": task_type,
|
||||
"short_name": project_task_type_data["short_name"]
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
def get_custom_workfile_template_by_context(
|
||||
template_profiles, project_doc, asset_doc, task_name, anatomy=None
|
||||
):
|
||||
"""Filter and fill workfile template profiles by passed context.
|
||||
|
||||
It is expected that passed argument are already queried documents of
|
||||
project and asset as parents of processing task name.
|
||||
|
||||
Existence of formatted path is not validated.
|
||||
|
||||
Args:
|
||||
template_profiles(list): Template profiles from settings.
|
||||
project_doc(dict): Project document from MongoDB.
|
||||
asset_doc(dict): Asset document from MongoDB.
|
||||
task_name(str): Name of task for which templates are filtered.
|
||||
anatomy(Anatomy): Optionally passed anatomy object for passed project
|
||||
name.
|
||||
|
||||
Returns:
|
||||
str: Path to template or None if none of profiles match current
|
||||
context. (Existence of formatted path is not validated.)
|
||||
"""
|
||||
|
||||
from openpype.lib import filter_profiles
|
||||
|
||||
if anatomy is None:
|
||||
anatomy = Anatomy(project_doc["name"])
|
||||
|
||||
# get project, asset, task anatomy context data
|
||||
anatomy_context_data = _get_task_context_data_for_anatomy(
|
||||
project_doc, asset_doc, task_name, anatomy
|
||||
)
|
||||
# add root dict
|
||||
anatomy_context_data["root"] = anatomy.roots
|
||||
|
||||
# get task type for the task in context
|
||||
current_task_type = anatomy_context_data["task"]["type"]
|
||||
|
||||
# get path from matching profile
|
||||
matching_item = filter_profiles(
|
||||
template_profiles,
|
||||
{"task_type": current_task_type}
|
||||
)
|
||||
# when path is available try to format it in case
|
||||
# there are some anatomy template strings
|
||||
if matching_item:
|
||||
template = matching_item["path"][platform.system().lower()]
|
||||
return template.format(**anatomy_context_data)
|
||||
|
||||
return None
|
||||
|
||||
|
||||
def get_custom_workfile_template_by_string_context(
|
||||
template_profiles, project_name, asset_name, task_name,
|
||||
dbcon=None, anatomy=None
|
||||
):
|
||||
"""Filter and fill workfile template profiles by passed context.
|
||||
|
||||
Passed context are string representations of project, asset and task.
|
||||
Function will query documents of project and asset to be able use
|
||||
`get_custom_workfile_template_by_context` for rest of logic.
|
||||
|
||||
Args:
|
||||
template_profiles(list): Loaded workfile template profiles.
|
||||
project_name(str): Project name.
|
||||
asset_name(str): Asset name.
|
||||
task_name(str): Task name.
|
||||
dbcon(AvalonMongoDB): Optional avalon implementation of mongo
|
||||
connection with context Session.
|
||||
anatomy(Anatomy): Optionally prepared anatomy object for passed
|
||||
project.
|
||||
|
||||
Returns:
|
||||
str: Path to template or None if none of profiles match current
|
||||
context. (Existence of formatted path is not validated.)
|
||||
"""
|
||||
|
||||
if dbcon is None:
|
||||
from avalon.api import AvalonMongoDB
|
||||
|
||||
dbcon = AvalonMongoDB()
|
||||
|
||||
dbcon.install()
|
||||
|
||||
if dbcon.Session["AVALON_PROJECT"] != project_name:
|
||||
dbcon.Session["AVALON_PROJECT"] = project_name
|
||||
|
||||
project_doc = dbcon.find_one(
|
||||
{"type": "project"},
|
||||
# All we need is "name" and "data.code" keys
|
||||
{
|
||||
"name": 1,
|
||||
"data.code": 1
|
||||
}
|
||||
)
|
||||
asset_doc = dbcon.find_one(
|
||||
{
|
||||
"type": "asset",
|
||||
"name": asset_name
|
||||
},
|
||||
# All we need is "name" and "data.tasks" keys
|
||||
{
|
||||
"name": 1,
|
||||
"data.tasks": 1
|
||||
}
|
||||
)
|
||||
|
||||
return get_custom_workfile_template_by_context(
|
||||
template_profiles, project_doc, asset_doc, task_name, anatomy
|
||||
)
|
||||
|
||||
|
||||
def get_custom_workfile_template(template_profiles):
|
||||
"""Filter and fill workfile template profiles by current context.
|
||||
|
||||
Current context is defined by `avalon.api.Session`. That's why this
|
||||
function should be used only inside host where context is set and stable.
|
||||
|
||||
Args:
|
||||
template_profiles(list): Template profiles from settings.
|
||||
|
||||
Returns:
|
||||
str: Path to template or None if none of profiles match current
|
||||
context. (Existence of formatted path is not validated.)
|
||||
"""
|
||||
# Use `avalon.io` as Mongo connection
|
||||
from avalon import io
|
||||
|
||||
return get_custom_workfile_template_by_string_context(
|
||||
template_profiles,
|
||||
io.Session["AVALON_PROJECT"],
|
||||
io.Session["AVALON_ASSET"],
|
||||
io.Session["AVALON_TASK"],
|
||||
io
|
||||
)
|
||||
|
|
|
|||
|
|
@ -43,7 +43,7 @@ def sizeof_fmt(num, suffix='B'):
|
|||
return "%.1f%s%s" % (num, 'Yi', suffix)
|
||||
|
||||
|
||||
def path_from_represenation(representation, anatomy):
|
||||
def path_from_representation(representation, anatomy):
|
||||
from avalon import pipeline # safer importing
|
||||
|
||||
try:
|
||||
|
|
@ -126,18 +126,22 @@ def check_destination_path(repre_id,
|
|||
anatomy_filled = anatomy.format_all(anatomy_data)
|
||||
dest_path = anatomy_filled["delivery"][template_name]
|
||||
report_items = collections.defaultdict(list)
|
||||
sub_msg = None
|
||||
|
||||
if not dest_path.solved:
|
||||
msg = (
|
||||
"Missing keys in Representation's context"
|
||||
" for anatomy template \"{}\"."
|
||||
).format(template_name)
|
||||
|
||||
sub_msg = (
|
||||
"Representation: {}<br>"
|
||||
).format(repre_id)
|
||||
|
||||
if dest_path.missing_keys:
|
||||
keys = ", ".join(dest_path.missing_keys)
|
||||
sub_msg = (
|
||||
"Representation: {}<br>- Missing keys: \"{}\"<br>"
|
||||
).format(repre_id, keys)
|
||||
sub_msg += (
|
||||
"- Missing keys: \"{}\"<br>"
|
||||
).format(keys)
|
||||
|
||||
if dest_path.invalid_types:
|
||||
items = []
|
||||
|
|
@ -145,10 +149,9 @@ def check_destination_path(repre_id,
|
|||
items.append("\"{}\" {}".format(key, str(value)))
|
||||
|
||||
keys = ", ".join(items)
|
||||
sub_msg = (
|
||||
"Representation: {}<br>"
|
||||
sub_msg += (
|
||||
"- Invalid value DataType: \"{}\"<br>"
|
||||
).format(repre_id, keys)
|
||||
).format(keys)
|
||||
|
||||
report_items[msg].append(sub_msg)
|
||||
|
||||
|
|
|
|||
|
|
@ -4,8 +4,10 @@ import clique
|
|||
from .import_utils import discover_host_vendor_module
|
||||
|
||||
try:
|
||||
import opentimelineio as otio
|
||||
from opentimelineio import opentime as _ot
|
||||
except ImportError:
|
||||
otio = discover_host_vendor_module("opentimelineio")
|
||||
_ot = discover_host_vendor_module("opentimelineio.opentime")
|
||||
|
||||
|
||||
|
|
@ -166,3 +168,119 @@ def make_sequence_collection(path, otio_range, metadata):
|
|||
head=head, tail=tail, padding=metadata["padding"])
|
||||
collection.indexes.update([i for i in range(first, (last + 1))])
|
||||
return dir_path, collection
|
||||
|
||||
|
||||
def _sequence_resize(source, length):
|
||||
step = float(len(source) - 1) / (length - 1)
|
||||
for i in range(length):
|
||||
low, ratio = divmod(i * step, 1)
|
||||
high = low + 1 if ratio > 0 else low
|
||||
yield (1 - ratio) * source[int(low)] + ratio * source[int(high)]
|
||||
|
||||
|
||||
def get_media_range_with_retimes(otio_clip, handle_start, handle_end):
|
||||
source_range = otio_clip.source_range
|
||||
available_range = otio_clip.available_range()
|
||||
media_in = available_range.start_time.value
|
||||
media_out = available_range.end_time_inclusive().value
|
||||
|
||||
# modifiers
|
||||
time_scalar = 1.
|
||||
offset_in = 0
|
||||
offset_out = 0
|
||||
time_warp_nodes = []
|
||||
|
||||
# Check for speed effects and adjust playback speed accordingly
|
||||
for effect in otio_clip.effects:
|
||||
if isinstance(effect, otio.schema.LinearTimeWarp):
|
||||
time_scalar = effect.time_scalar
|
||||
|
||||
elif isinstance(effect, otio.schema.FreezeFrame):
|
||||
# For freeze frame, playback speed must be set after range
|
||||
time_scalar = 0.
|
||||
|
||||
elif isinstance(effect, otio.schema.TimeEffect):
|
||||
# For freeze frame, playback speed must be set after range
|
||||
name = effect.name
|
||||
effect_name = effect.effect_name
|
||||
if "TimeWarp" not in effect_name:
|
||||
continue
|
||||
metadata = effect.metadata
|
||||
lookup = metadata.get("lookup")
|
||||
if not lookup:
|
||||
continue
|
||||
|
||||
# time warp node
|
||||
tw_node = {
|
||||
"Class": "TimeWarp",
|
||||
"name": name
|
||||
}
|
||||
tw_node.update(metadata)
|
||||
|
||||
# get first and last frame offsets
|
||||
offset_in += lookup[0]
|
||||
offset_out += lookup[-1]
|
||||
|
||||
# add to timewarp nodes
|
||||
time_warp_nodes.append(tw_node)
|
||||
|
||||
# multiply by time scalar
|
||||
offset_in *= time_scalar
|
||||
offset_out *= time_scalar
|
||||
|
||||
# filip offset if reversed speed
|
||||
if time_scalar < 0:
|
||||
_offset_in = offset_out
|
||||
_offset_out = offset_in
|
||||
offset_in = _offset_in
|
||||
offset_out = _offset_out
|
||||
|
||||
# scale handles
|
||||
handle_start *= abs(time_scalar)
|
||||
handle_end *= abs(time_scalar)
|
||||
|
||||
# filip handles if reversed speed
|
||||
if time_scalar < 0:
|
||||
_handle_start = handle_end
|
||||
_handle_end = handle_start
|
||||
handle_start = _handle_start
|
||||
handle_end = _handle_end
|
||||
|
||||
source_in = source_range.start_time.value
|
||||
|
||||
media_in_trimmed = (
|
||||
media_in + source_in + offset_in)
|
||||
media_out_trimmed = (
|
||||
media_in + source_in + (
|
||||
((source_range.duration.value - 1) * abs(
|
||||
time_scalar)) + offset_out))
|
||||
|
||||
# calculate available hanles
|
||||
if (media_in_trimmed - media_in) < handle_start:
|
||||
handle_start = (media_in_trimmed - media_in)
|
||||
if (media_out - media_out_trimmed) < handle_end:
|
||||
handle_end = (media_out - media_out_trimmed)
|
||||
|
||||
# create version data
|
||||
version_data = {
|
||||
"versionData": {
|
||||
"retime": True,
|
||||
"speed": time_scalar,
|
||||
"timewarps": time_warp_nodes,
|
||||
"handleStart": handle_start,
|
||||
"handleEnd": handle_end
|
||||
}
|
||||
}
|
||||
|
||||
returning_dict = {
|
||||
"mediaIn": media_in_trimmed,
|
||||
"mediaOut": media_out_trimmed,
|
||||
"handleStart": handle_start,
|
||||
"handleEnd": handle_end
|
||||
}
|
||||
|
||||
# add version data only if retime
|
||||
if time_warp_nodes or time_scalar != 1.:
|
||||
returning_dict.update(version_data)
|
||||
|
||||
return returning_dict
|
||||
|
|
|
|||
|
|
@ -29,7 +29,10 @@ except ImportError:
|
|||
import six
|
||||
import appdirs
|
||||
|
||||
from openpype.settings import get_local_settings
|
||||
from openpype.settings import (
|
||||
get_local_settings,
|
||||
get_system_settings
|
||||
)
|
||||
|
||||
from .import validate_mongo_connection
|
||||
|
||||
|
|
@ -562,3 +565,16 @@ def get_openpype_username():
|
|||
if not username:
|
||||
username = getpass.getuser()
|
||||
return username
|
||||
|
||||
|
||||
def is_admin_password_required():
|
||||
system_settings = get_system_settings()
|
||||
password = system_settings["general"].get("admin_password")
|
||||
if not password:
|
||||
return False
|
||||
|
||||
local_settings = get_local_settings()
|
||||
is_admin = local_settings.get("general", {}).get("is_admin", False)
|
||||
if is_admin:
|
||||
return False
|
||||
return True
|
||||
|
|
|
|||
|
|
@ -36,6 +36,7 @@ from .clockify import ClockifyModule
|
|||
from .log_viewer import LogViewModule
|
||||
from .muster import MusterModule
|
||||
from .deadline import DeadlineModule
|
||||
from .project_manager_action import ProjectManagerAction
|
||||
from .standalonepublish_action import StandAlonePublishAction
|
||||
from .sync_server import SyncServerModule
|
||||
|
||||
|
|
@ -73,6 +74,7 @@ __all__ = (
|
|||
"LogViewModule",
|
||||
"MusterModule",
|
||||
"DeadlineModule",
|
||||
"ProjectManagerAction",
|
||||
"StandAlonePublishAction",
|
||||
|
||||
"SyncServerModule"
|
||||
|
|
|
|||
|
|
@ -86,7 +86,7 @@ class AvalonModule(PypeModule, ITrayModule, IWebServerRoutes):
|
|||
from Qt import QtWidgets
|
||||
# Actions
|
||||
action_library_loader = QtWidgets.QAction(
|
||||
"Library loader", tray_menu
|
||||
"Loader", tray_menu
|
||||
)
|
||||
|
||||
action_library_loader.triggered.connect(self.show_library_loader)
|
||||
|
|
|
|||
|
|
@ -139,6 +139,25 @@ class ITrayModule:
|
|||
"""
|
||||
pass
|
||||
|
||||
def execute_in_main_thread(self, callback):
|
||||
""" Pushes callback to the queue or process 'callback' on a main thread
|
||||
|
||||
Some callbacks need to be processed on main thread (menu actions
|
||||
must be added on main thread or they won't get triggered etc.)
|
||||
"""
|
||||
# called without initialized tray, still main thread needed
|
||||
if not self.tray_initialized:
|
||||
try:
|
||||
callback = self._main_thread_callbacks.popleft()
|
||||
callback()
|
||||
except:
|
||||
self.log.warning(
|
||||
"Failed to execute {} in main thread".format(callback),
|
||||
exc_info=True)
|
||||
|
||||
return
|
||||
self.manager.tray_manager.execute_in_main_thread(callback)
|
||||
|
||||
def show_tray_message(self, title, message, icon=None, msecs=None):
|
||||
"""Show tray message.
|
||||
|
||||
|
|
@ -153,6 +172,10 @@ class ITrayModule:
|
|||
if self._tray_manager:
|
||||
self._tray_manager.show_tray_message(title, message, icon, msecs)
|
||||
|
||||
def add_doubleclick_callback(self, callback):
|
||||
if hasattr(self.manager, "add_doubleclick_callback"):
|
||||
self.manager.add_doubleclick_callback(self, callback)
|
||||
|
||||
|
||||
class ITrayAction(ITrayModule):
|
||||
"""Implementation of Tray action.
|
||||
|
|
@ -165,6 +188,9 @@ class ITrayAction(ITrayModule):
|
|||
necessary.
|
||||
"""
|
||||
|
||||
admin_action = False
|
||||
_admin_submenu = None
|
||||
|
||||
@property
|
||||
@abstractmethod
|
||||
def label(self):
|
||||
|
|
@ -178,9 +204,19 @@ class ITrayAction(ITrayModule):
|
|||
|
||||
def tray_menu(self, tray_menu):
|
||||
from Qt import QtWidgets
|
||||
action = QtWidgets.QAction(self.label, tray_menu)
|
||||
|
||||
if self.admin_action:
|
||||
menu = self.admin_submenu(tray_menu)
|
||||
action = QtWidgets.QAction(self.label, menu)
|
||||
menu.addAction(action)
|
||||
if not menu.menuAction().isVisible():
|
||||
menu.menuAction().setVisible(True)
|
||||
|
||||
else:
|
||||
action = QtWidgets.QAction(self.label, tray_menu)
|
||||
tray_menu.addAction(action)
|
||||
|
||||
action.triggered.connect(self.on_action_trigger)
|
||||
tray_menu.addAction(action)
|
||||
|
||||
def tray_start(self):
|
||||
return
|
||||
|
|
@ -188,6 +224,16 @@ class ITrayAction(ITrayModule):
|
|||
def tray_exit(self):
|
||||
return
|
||||
|
||||
@staticmethod
|
||||
def admin_submenu(tray_menu):
|
||||
if ITrayAction._admin_submenu is None:
|
||||
from Qt import QtWidgets
|
||||
|
||||
admin_submenu = QtWidgets.QMenu("Admin", tray_menu)
|
||||
admin_submenu.menuAction().setVisible(False)
|
||||
ITrayAction._admin_submenu = admin_submenu
|
||||
return ITrayAction._admin_submenu
|
||||
|
||||
|
||||
class ITrayService(ITrayModule):
|
||||
# Module's property
|
||||
|
|
@ -214,6 +260,7 @@ class ITrayService(ITrayModule):
|
|||
def services_submenu(tray_menu):
|
||||
if ITrayService._services_submenu is None:
|
||||
from Qt import QtWidgets
|
||||
|
||||
services_submenu = QtWidgets.QMenu("Services", tray_menu)
|
||||
services_submenu.menuAction().setVisible(False)
|
||||
ITrayService._services_submenu = services_submenu
|
||||
|
|
@ -658,7 +705,7 @@ class TrayModulesManager(ModulesManager):
|
|||
)
|
||||
|
||||
def __init__(self):
|
||||
self.log = PypeLogger().get_logger(self.__class__.__name__)
|
||||
self.log = PypeLogger.get_logger(self.__class__.__name__)
|
||||
|
||||
self.modules = []
|
||||
self.modules_by_id = {}
|
||||
|
|
@ -666,6 +713,28 @@ class TrayModulesManager(ModulesManager):
|
|||
self._report = {}
|
||||
self.tray_manager = None
|
||||
|
||||
self.doubleclick_callbacks = {}
|
||||
self.doubleclick_callback = None
|
||||
|
||||
def add_doubleclick_callback(self, module, callback):
|
||||
"""Register doubleclick callbacks on tray icon.
|
||||
|
||||
Currently there is no way how to determine which is launched. Name of
|
||||
callback can be defined with `doubleclick_callback` attribute.
|
||||
|
||||
Missing feature how to define default callback.
|
||||
"""
|
||||
callback_name = "_".join([module.name, callback.__name__])
|
||||
if callback_name not in self.doubleclick_callbacks:
|
||||
self.doubleclick_callbacks[callback_name] = callback
|
||||
if self.doubleclick_callback is None:
|
||||
self.doubleclick_callback = callback_name
|
||||
return
|
||||
|
||||
self.log.warning((
|
||||
"Callback with name \"{}\" is already registered."
|
||||
).format(callback_name))
|
||||
|
||||
def initialize(self, tray_manager, tray_menu):
|
||||
self.tray_manager = tray_manager
|
||||
self.initialize_modules()
|
||||
|
|
|
|||
|
|
@ -105,7 +105,7 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin):
|
|||
families = ["render.farm", "prerender.farm",
|
||||
"renderlayer", "imagesequence", "vrayscene"]
|
||||
|
||||
aov_filter = {"maya": [r".+(?:\.|_)([Bb]eauty)(?:\.|_).*"],
|
||||
aov_filter = {"maya": [r".*(?:\.|_)*([Bb]eauty)(?:\.|_)*.*"],
|
||||
"aftereffects": [r".*"], # for everything from AE
|
||||
"harmony": [r".*"], # for everything from AE
|
||||
"celaction": [r".*"]}
|
||||
|
|
@ -435,9 +435,7 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin):
|
|||
preview = False
|
||||
if app in self.aov_filter.keys():
|
||||
for aov_pattern in self.aov_filter[app]:
|
||||
if re.match(aov_pattern,
|
||||
aov
|
||||
):
|
||||
if re.match(aov_pattern, aov):
|
||||
preview = True
|
||||
break
|
||||
|
||||
|
|
|
|||
|
|
@ -41,12 +41,9 @@ class PushHierValuesToNonHier(ServerAction):
|
|||
label = "OpenPype Admin"
|
||||
variant = "- Push Hierarchical values To Non-Hierarchical"
|
||||
|
||||
hierarchy_entities_query = (
|
||||
"select id, parent_id from TypedContext where project_id is \"{}\""
|
||||
)
|
||||
entities_query = (
|
||||
"select id, name, parent_id, link from TypedContext"
|
||||
" where project_id is \"{}\" and object_type_id in ({})"
|
||||
entities_query_by_project = (
|
||||
"select id, parent_id, object_type_id from TypedContext"
|
||||
" where project_id is \"{}\""
|
||||
)
|
||||
cust_attrs_query = (
|
||||
"select id, key, object_type_id, is_hierarchical, default"
|
||||
|
|
@ -187,18 +184,18 @@ class PushHierValuesToNonHier(ServerAction):
|
|||
"message": "Nothing has changed."
|
||||
}
|
||||
|
||||
entities = session.query(self.entities_query.format(
|
||||
project_entity["id"],
|
||||
self.join_query_keys(destination_object_type_ids)
|
||||
)).all()
|
||||
(
|
||||
parent_id_by_entity_id,
|
||||
filtered_entities
|
||||
) = self.all_hierarchy_entities(
|
||||
session,
|
||||
selected_ids,
|
||||
project_entity,
|
||||
destination_object_type_ids
|
||||
)
|
||||
|
||||
self.log.debug("Preparing whole project hierarchy by ids.")
|
||||
parent_id_by_entity_id = self.all_hierarchy_ids(
|
||||
session, project_entity
|
||||
)
|
||||
filtered_entities = self.filter_entities_by_selection(
|
||||
entities, selected_ids, parent_id_by_entity_id
|
||||
)
|
||||
|
||||
entities_by_obj_id = {
|
||||
obj_id: []
|
||||
for obj_id in destination_object_type_ids
|
||||
|
|
@ -252,39 +249,77 @@ class PushHierValuesToNonHier(ServerAction):
|
|||
|
||||
return True
|
||||
|
||||
def all_hierarchy_ids(self, session, project_entity):
|
||||
parent_id_by_entity_id = {}
|
||||
|
||||
hierarchy_entities = session.query(
|
||||
self.hierarchy_entities_query.format(project_entity["id"])
|
||||
)
|
||||
for hierarchy_entity in hierarchy_entities:
|
||||
entity_id = hierarchy_entity["id"]
|
||||
parent_id = hierarchy_entity["parent_id"]
|
||||
parent_id_by_entity_id[entity_id] = parent_id
|
||||
return parent_id_by_entity_id
|
||||
|
||||
def filter_entities_by_selection(
|
||||
self, entities, selected_ids, parent_id_by_entity_id
|
||||
def all_hierarchy_entities(
|
||||
self,
|
||||
session,
|
||||
selected_ids,
|
||||
project_entity,
|
||||
destination_object_type_ids
|
||||
):
|
||||
selected_ids = set(selected_ids)
|
||||
|
||||
filtered_entities = []
|
||||
for entity in entities:
|
||||
entity_id = entity["id"]
|
||||
if entity_id in selected_ids:
|
||||
filtered_entities.append(entity)
|
||||
continue
|
||||
parent_id_by_entity_id = {}
|
||||
# Query is simple if project is in selection
|
||||
if project_entity["id"] in selected_ids:
|
||||
entities = session.query(
|
||||
self.entities_query_by_project.format(project_entity["id"])
|
||||
).all()
|
||||
|
||||
parent_id = entity["parent_id"]
|
||||
while True:
|
||||
if parent_id in selected_ids:
|
||||
for entity in entities:
|
||||
if entity["object_type_id"] in destination_object_type_ids:
|
||||
filtered_entities.append(entity)
|
||||
break
|
||||
entity_id = entity["id"]
|
||||
parent_id_by_entity_id[entity_id] = entity["parent_id"]
|
||||
return parent_id_by_entity_id, filtered_entities
|
||||
|
||||
parent_id = parent_id_by_entity_id.get(parent_id)
|
||||
if parent_id is None:
|
||||
break
|
||||
# Query selection and get it's link to be able calculate parentings
|
||||
entities_with_link = session.query((
|
||||
"select id, parent_id, link, object_type_id"
|
||||
" from TypedContext where id in ({})"
|
||||
).format(self.join_query_keys(selected_ids))).all()
|
||||
|
||||
return filtered_entities
|
||||
# Process and store queried entities and store all lower entities to
|
||||
# `bottom_ids`
|
||||
# - bottom_ids should not contain 2 ids where one is parent of second
|
||||
bottom_ids = set(selected_ids)
|
||||
for entity in entities_with_link:
|
||||
if entity["object_type_id"] in destination_object_type_ids:
|
||||
filtered_entities.append(entity)
|
||||
children_id = None
|
||||
for idx, item in enumerate(reversed(entity["link"])):
|
||||
item_id = item["id"]
|
||||
if idx > 0 and item_id in bottom_ids:
|
||||
bottom_ids.remove(item_id)
|
||||
|
||||
if children_id is not None:
|
||||
parent_id_by_entity_id[children_id] = item_id
|
||||
|
||||
children_id = item_id
|
||||
|
||||
# Query all children of selection per one hierarchy level and process
|
||||
# their data the same way as selection but parents are already known
|
||||
chunk_size = 100
|
||||
while bottom_ids:
|
||||
child_entities = []
|
||||
# Query entities in chunks
|
||||
entity_ids = list(bottom_ids)
|
||||
for idx in range(0, len(entity_ids), chunk_size):
|
||||
_entity_ids = entity_ids[idx:idx + chunk_size]
|
||||
child_entities.extend(session.query((
|
||||
"select id, parent_id, object_type_id from"
|
||||
" TypedContext where parent_id in ({})"
|
||||
).format(self.join_query_keys(_entity_ids))).all())
|
||||
|
||||
bottom_ids = set()
|
||||
for entity in child_entities:
|
||||
entity_id = entity["id"]
|
||||
parent_id_by_entity_id[entity_id] = entity["parent_id"]
|
||||
bottom_ids.add(entity_id)
|
||||
if entity["object_type_id"] in destination_object_type_ids:
|
||||
filtered_entities.append(entity)
|
||||
|
||||
return parent_id_by_entity_id, filtered_entities
|
||||
|
||||
def get_hier_values(
|
||||
self,
|
||||
|
|
@ -387,10 +422,10 @@ class PushHierValuesToNonHier(ServerAction):
|
|||
for key, value in parent_values.items():
|
||||
hier_values_by_entity_id[task_id][key] = value
|
||||
configuration_id = hier_attr_id_by_key[key]
|
||||
_entity_key = collections.OrderedDict({
|
||||
"configuration_id": configuration_id,
|
||||
"entity_id": task_id
|
||||
})
|
||||
_entity_key = collections.OrderedDict([
|
||||
("configuration_id", configuration_id),
|
||||
("entity_id", task_id)
|
||||
])
|
||||
|
||||
session.recorded_operations.push(
|
||||
ftrack_api.operation.UpdateEntityOperation(
|
||||
|
|
@ -401,6 +436,9 @@ class PushHierValuesToNonHier(ServerAction):
|
|||
value
|
||||
)
|
||||
)
|
||||
if len(session.recorded_operations) > 100:
|
||||
session.commit()
|
||||
|
||||
session.commit()
|
||||
|
||||
def push_values_to_entities(
|
||||
|
|
@ -425,10 +463,10 @@ class PushHierValuesToNonHier(ServerAction):
|
|||
if value is None:
|
||||
continue
|
||||
|
||||
_entity_key = collections.OrderedDict({
|
||||
"configuration_id": attr["id"],
|
||||
"entity_id": entity_id
|
||||
})
|
||||
_entity_key = collections.OrderedDict([
|
||||
("configuration_id", attr["id"]),
|
||||
("entity_id", entity_id)
|
||||
])
|
||||
|
||||
session.recorded_operations.push(
|
||||
ftrack_api.operation.UpdateEntityOperation(
|
||||
|
|
@ -439,6 +477,9 @@ class PushHierValuesToNonHier(ServerAction):
|
|||
value
|
||||
)
|
||||
)
|
||||
if len(session.recorded_operations) > 100:
|
||||
session.commit()
|
||||
|
||||
session.commit()
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -9,7 +9,7 @@ from openpype.api import Anatomy, config
|
|||
from openpype.modules.ftrack.lib import BaseAction, statics_icon
|
||||
from openpype.modules.ftrack.lib.avalon_sync import CUST_ATTR_ID_KEY
|
||||
from openpype.lib.delivery import (
|
||||
path_from_represenation,
|
||||
path_from_representation,
|
||||
get_format_dict,
|
||||
check_destination_path,
|
||||
process_single_file,
|
||||
|
|
@ -74,7 +74,7 @@ class Delivery(BaseAction):
|
|||
"value": project_name
|
||||
})
|
||||
|
||||
# Prpeare anatomy data
|
||||
# Prepare anatomy data
|
||||
anatomy = Anatomy(project_name)
|
||||
new_anatomies = []
|
||||
first = None
|
||||
|
|
@ -368,12 +368,18 @@ class Delivery(BaseAction):
|
|||
|
||||
def launch(self, session, entities, event):
|
||||
if "values" not in event["data"]:
|
||||
return
|
||||
return {
|
||||
"success": True,
|
||||
"message": "Nothing to do"
|
||||
}
|
||||
|
||||
values = event["data"]["values"]
|
||||
skipped = values.pop("__skipped__")
|
||||
if skipped:
|
||||
return None
|
||||
return {
|
||||
"success": False,
|
||||
"message": "Action skipped"
|
||||
}
|
||||
|
||||
user_id = event["source"]["user"]["id"]
|
||||
user_entity = session.query(
|
||||
|
|
@ -391,27 +397,45 @@ class Delivery(BaseAction):
|
|||
|
||||
try:
|
||||
self.db_con.install()
|
||||
self.real_launch(session, entities, event)
|
||||
job["status"] = "done"
|
||||
report = self.real_launch(session, entities, event)
|
||||
|
||||
except Exception:
|
||||
except Exception as exc:
|
||||
report = {
|
||||
"success": False,
|
||||
"title": "Delivery failed",
|
||||
"items": [{
|
||||
"type": "label",
|
||||
"value": (
|
||||
"Error during delivery action process:<br>{}"
|
||||
"<br><br>Check logs for more information."
|
||||
).format(str(exc))
|
||||
}]
|
||||
}
|
||||
self.log.warning(
|
||||
"Failed during processing delivery action.",
|
||||
exc_info=True
|
||||
)
|
||||
|
||||
finally:
|
||||
if job["status"] != "done":
|
||||
if report["success"]:
|
||||
job["status"] = "done"
|
||||
else:
|
||||
job["status"] = "failed"
|
||||
session.commit()
|
||||
self.db_con.uninstall()
|
||||
|
||||
if job["status"] == "failed":
|
||||
if not report["success"]:
|
||||
self.show_interface(
|
||||
items=report["items"],
|
||||
title=report["title"],
|
||||
event=event
|
||||
)
|
||||
return {
|
||||
"success": False,
|
||||
"message": "Delivery failed. Check logs for more information."
|
||||
"message": "Errors during delivery process. See report."
|
||||
}
|
||||
return True
|
||||
|
||||
return report
|
||||
|
||||
def real_launch(self, session, entities, event):
|
||||
self.log.info("Delivery action just started.")
|
||||
|
|
@ -431,7 +455,7 @@ class Delivery(BaseAction):
|
|||
if not repre_names:
|
||||
return {
|
||||
"success": True,
|
||||
"message": "Not selected components to deliver."
|
||||
"message": "No selected components to deliver."
|
||||
}
|
||||
|
||||
location_path = location_path.strip()
|
||||
|
|
@ -479,7 +503,7 @@ class Delivery(BaseAction):
|
|||
if frame:
|
||||
repre["context"]["frame"] = len(str(frame)) * "#"
|
||||
|
||||
repre_path = path_from_represenation(repre, anatomy)
|
||||
repre_path = path_from_representation(repre, anatomy)
|
||||
# TODO add backup solution where root of path from component
|
||||
# is replaced with root
|
||||
args = (
|
||||
|
|
@ -502,7 +526,7 @@ class Delivery(BaseAction):
|
|||
def report(self, report_items):
|
||||
"""Returns dict with final status of delivery (succes, fail etc.)."""
|
||||
items = []
|
||||
title = "Delivery report"
|
||||
|
||||
for msg, _items in report_items.items():
|
||||
if not _items:
|
||||
continue
|
||||
|
|
@ -533,9 +557,8 @@ class Delivery(BaseAction):
|
|||
|
||||
return {
|
||||
"items": items,
|
||||
"title": title,
|
||||
"success": False,
|
||||
"message": "Delivery Finished"
|
||||
"title": "Delivery report",
|
||||
"success": False
|
||||
}
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -20,7 +20,7 @@ class CollectFtrackApi(pyblish.api.ContextPlugin):
|
|||
# NOTE Import python module here to know if import was successful
|
||||
import ftrack_api
|
||||
|
||||
session = ftrack_api.Session(auto_connect_event_hub=True)
|
||||
session = ftrack_api.Session(auto_connect_event_hub=False)
|
||||
self.log.debug("Ftrack user: \"{0}\"".format(session.api_user))
|
||||
|
||||
# Collect task
|
||||
|
|
|
|||
|
|
@ -15,6 +15,8 @@ class LauncherAction(PypeModule, ITrayAction):
|
|||
def tray_init(self):
|
||||
self.create_window()
|
||||
|
||||
self.add_doubleclick_callback(self.show_launcher)
|
||||
|
||||
def tray_start(self):
|
||||
return
|
||||
|
||||
|
|
|
|||
59
openpype/modules/project_manager_action.py
Normal file
59
openpype/modules/project_manager_action.py
Normal file
|
|
@ -0,0 +1,59 @@
|
|||
from . import PypeModule, ITrayAction
|
||||
|
||||
|
||||
class ProjectManagerAction(PypeModule, ITrayAction):
|
||||
label = "Project Manager (beta)"
|
||||
name = "project_manager"
|
||||
admin_action = True
|
||||
|
||||
def initialize(self, modules_settings):
|
||||
enabled = False
|
||||
module_settings = modules_settings.get(self.name)
|
||||
if module_settings:
|
||||
enabled = module_settings.get("enabled", enabled)
|
||||
self.enabled = enabled
|
||||
|
||||
# Tray attributes
|
||||
self.project_manager_window = None
|
||||
|
||||
def connect_with_modules(self, *_a, **_kw):
|
||||
return
|
||||
|
||||
def tray_init(self):
|
||||
"""Initialization in tray implementation of ITrayAction."""
|
||||
self.create_project_manager_window()
|
||||
|
||||
def on_action_trigger(self):
|
||||
"""Implementation for action trigger of ITrayAction."""
|
||||
self.show_project_manager_window()
|
||||
|
||||
def create_project_manager_window(self):
|
||||
"""Initializa Settings Qt window."""
|
||||
if self.project_manager_window:
|
||||
return
|
||||
from openpype.tools.project_manager import ProjectManagerWindow
|
||||
|
||||
self.project_manager_window = ProjectManagerWindow()
|
||||
|
||||
def show_project_manager_window(self):
|
||||
"""Show project manager tool window.
|
||||
|
||||
Raises:
|
||||
AssertionError: Window must be already created. Call
|
||||
`create_project_manager_window` before calling this method.
|
||||
"""
|
||||
if not self.project_manager_window:
|
||||
raise AssertionError("Window is not initialized.")
|
||||
|
||||
# Store if was visible
|
||||
was_minimized = self.project_manager_window.isMinimized()
|
||||
|
||||
# Show settings gui
|
||||
self.project_manager_window.show()
|
||||
|
||||
if was_minimized:
|
||||
self.project_manager_window.showNormal()
|
||||
|
||||
# Pull window to the front.
|
||||
self.project_manager_window.raise_()
|
||||
self.project_manager_window.activateWindow()
|
||||
|
|
@ -37,7 +37,8 @@ class ISettingsChangeListener:
|
|||
class SettingsAction(PypeModule, ITrayAction):
|
||||
"""Action to show Setttings tool."""
|
||||
name = "settings"
|
||||
label = "Settings"
|
||||
label = "Studio Settings"
|
||||
admin_action = True
|
||||
|
||||
def initialize(self, _modules_settings):
|
||||
# This action is always enabled
|
||||
|
|
@ -45,7 +46,7 @@ class SettingsAction(PypeModule, ITrayAction):
|
|||
|
||||
# User role
|
||||
# TODO should be changeable
|
||||
self.user_role = "developer"
|
||||
self.user_role = "manager"
|
||||
|
||||
# Tray attributes
|
||||
self.settings_window = None
|
||||
|
|
@ -66,7 +67,8 @@ class SettingsAction(PypeModule, ITrayAction):
|
|||
if self.settings_window:
|
||||
return
|
||||
from openpype.tools.settings import MainWidget
|
||||
self.settings_window = MainWidget(self.user_role)
|
||||
|
||||
self.settings_window = MainWidget(self.user_role, reset_on_show=False)
|
||||
self.settings_window.trigger_restart.connect(self._on_trigger_restart)
|
||||
|
||||
def _on_trigger_restart(self):
|
||||
|
|
@ -77,7 +79,7 @@ class SettingsAction(PypeModule, ITrayAction):
|
|||
|
||||
Raises:
|
||||
AssertionError: Window must be already created. Call
|
||||
`create_settings_window` before callint this method.
|
||||
`create_settings_window` before calling this method.
|
||||
"""
|
||||
if not self.settings_window:
|
||||
raise AssertionError("Window is not initialized.")
|
||||
|
|
@ -104,7 +106,7 @@ class SettingsAction(PypeModule, ITrayAction):
|
|||
class LocalSettingsAction(PypeModule, ITrayAction):
|
||||
"""Action to show Setttings tool."""
|
||||
name = "local_settings"
|
||||
label = "Local Settings"
|
||||
label = "Settings"
|
||||
|
||||
def initialize(self, _modules_settings):
|
||||
# This action is always enabled
|
||||
|
|
|
|||
|
|
@ -1,5 +1,5 @@
|
|||
import os
|
||||
import sys
|
||||
import platform
|
||||
import subprocess
|
||||
from openpype.lib import get_pype_execute_args
|
||||
from . import PypeModule, ITrayAction
|
||||
|
|
@ -35,4 +35,14 @@ class StandAlonePublishAction(PypeModule, ITrayAction):
|
|||
|
||||
def run_standalone_publisher(self):
|
||||
args = get_pype_execute_args("standalonepublisher")
|
||||
subprocess.Popen(args, creationflags=subprocess.DETACHED_PROCESS)
|
||||
kwargs = {}
|
||||
if platform.system().lower() == "darwin":
|
||||
new_args = ["open", "-a", args.pop(0), "--args"]
|
||||
new_args.extend(args)
|
||||
args = new_args
|
||||
|
||||
detached_process = getattr(subprocess, "DETACHED_PROCESS", None)
|
||||
if detached_process is not None:
|
||||
kwargs["creationflags"] = detached_process
|
||||
|
||||
subprocess.Popen(args, **kwargs)
|
||||
|
|
|
|||
|
|
@ -45,7 +45,6 @@ class SyncServerWindow(QtWidgets.QDialog):
|
|||
self.pause_btn = QtWidgets.QPushButton("Pause server")
|
||||
|
||||
left_column_layout.addWidget(self.pause_btn)
|
||||
left_column.setLayout(left_column_layout)
|
||||
|
||||
repres = SyncRepresentationSummaryWidget(
|
||||
sync_server,
|
||||
|
|
@ -60,8 +59,6 @@ class SyncServerWindow(QtWidgets.QDialog):
|
|||
split.setSizes([180, 950, 200])
|
||||
container_layout.addWidget(split)
|
||||
|
||||
container.setLayout(container_layout)
|
||||
|
||||
body_layout = QtWidgets.QHBoxLayout(body)
|
||||
body_layout.addWidget(container)
|
||||
body_layout.setContentsMargins(0, 0, 0, 0)
|
||||
|
|
@ -77,7 +74,6 @@ class SyncServerWindow(QtWidgets.QDialog):
|
|||
layout.addWidget(body)
|
||||
layout.addWidget(footer)
|
||||
|
||||
self.setLayout(body_layout)
|
||||
self.setWindowTitle("Sync Queue")
|
||||
|
||||
self.projects.project_changed.connect(
|
||||
|
|
|
|||
|
|
@ -602,7 +602,6 @@ class SyncServerDetailWindow(QtWidgets.QDialog):
|
|||
layout.addWidget(body)
|
||||
layout.addWidget(footer)
|
||||
|
||||
self.setLayout(body_layout)
|
||||
self.setWindowTitle("Sync Representation Detail")
|
||||
|
||||
|
||||
|
|
|
|||
152
openpype/modules/webserver/host_console_listener.py
Normal file
152
openpype/modules/webserver/host_console_listener.py
Normal file
|
|
@ -0,0 +1,152 @@
|
|||
import aiohttp
|
||||
from aiohttp import web
|
||||
import json
|
||||
import logging
|
||||
from concurrent.futures import CancelledError
|
||||
from Qt import QtWidgets
|
||||
|
||||
from openpype.modules import ITrayService
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class IconType:
|
||||
IDLE = "idle"
|
||||
RUNNING = "running"
|
||||
FAILED = "failed"
|
||||
|
||||
|
||||
class MsgAction:
|
||||
CONNECTING = "connecting"
|
||||
INITIALIZED = "initialized"
|
||||
ADD = "add"
|
||||
CLOSE = "close"
|
||||
|
||||
|
||||
class HostListener:
|
||||
def __init__(self, webserver, module):
|
||||
self._window_per_id = {}
|
||||
self.module = module
|
||||
self.webserver = webserver
|
||||
self._window_per_id = {} # dialogs per host name
|
||||
self._action_per_id = {} # QAction per host name
|
||||
|
||||
webserver.add_route('*', "/ws/host_listener", self.websocket_handler)
|
||||
|
||||
def _host_is_connecting(self, host_name, label):
|
||||
from openpype.tools.tray_app.app import ConsoleDialog
|
||||
|
||||
""" Initialize dialog, adds to submenu. """
|
||||
services_submenu = self.module._services_submenu
|
||||
action = QtWidgets.QAction(label, services_submenu)
|
||||
action.triggered.connect(lambda: self.show_widget(host_name))
|
||||
|
||||
services_submenu.addAction(action)
|
||||
self._action_per_id[host_name] = action
|
||||
self._set_host_icon(host_name, IconType.IDLE)
|
||||
widget = ConsoleDialog("")
|
||||
self._window_per_id[host_name] = widget
|
||||
|
||||
def _set_host_icon(self, host_name, icon_type):
|
||||
"""Assigns icon to action for 'host_name' with 'icon_type'.
|
||||
|
||||
Action must exist in self._action_per_id
|
||||
|
||||
Args:
|
||||
host_name (str)
|
||||
icon_type (IconType)
|
||||
"""
|
||||
action = self._action_per_id.get(host_name)
|
||||
if not action:
|
||||
raise ValueError("Unknown host {}".format(host_name))
|
||||
|
||||
icon = None
|
||||
if icon_type == IconType.IDLE:
|
||||
icon = ITrayService.get_icon_idle()
|
||||
elif icon_type == IconType.RUNNING:
|
||||
icon = ITrayService.get_icon_running()
|
||||
elif icon_type == IconType.FAILED:
|
||||
icon = ITrayService.get_icon_failed()
|
||||
else:
|
||||
log.info("Unknown icon type {} for {}".format(icon_type,
|
||||
host_name))
|
||||
action.setIcon(icon)
|
||||
|
||||
def show_widget(self, host_name):
|
||||
"""Shows prepared widget for 'host_name'.
|
||||
|
||||
Dialog get initialized when 'host_name' is connecting.
|
||||
"""
|
||||
self.module.execute_in_main_thread(
|
||||
lambda: self._show_widget(host_name))
|
||||
|
||||
def _show_widget(self, host_name):
|
||||
widget = self._window_per_id[host_name]
|
||||
widget.show()
|
||||
widget.raise_()
|
||||
widget.activateWindow()
|
||||
|
||||
async def websocket_handler(self, request):
|
||||
ws = web.WebSocketResponse()
|
||||
await ws.prepare(request)
|
||||
|
||||
widget = None
|
||||
try:
|
||||
async for msg in ws:
|
||||
if msg.type == aiohttp.WSMsgType.TEXT:
|
||||
host_name, action, text = self._parse_message(msg)
|
||||
|
||||
if action == MsgAction.CONNECTING:
|
||||
self._action_per_id[host_name] = None
|
||||
# must be sent to main thread, or action wont trigger
|
||||
self.module.execute_in_main_thread(
|
||||
lambda: self._host_is_connecting(host_name, text))
|
||||
elif action == MsgAction.CLOSE:
|
||||
# clean close
|
||||
self._close(host_name)
|
||||
await ws.close()
|
||||
elif action == MsgAction.INITIALIZED:
|
||||
self.module.execute_in_main_thread(
|
||||
# must be queued as _host_is_connecting might not
|
||||
# be triggered/finished yet
|
||||
lambda: self._set_host_icon(host_name,
|
||||
IconType.RUNNING))
|
||||
elif action == MsgAction.ADD:
|
||||
self.module.execute_in_main_thread(
|
||||
lambda: self._add_text(host_name, text))
|
||||
elif msg.type == aiohttp.WSMsgType.ERROR:
|
||||
print('ws connection closed with exception %s' %
|
||||
ws.exception())
|
||||
host_name, _, _ = self._parse_message(msg)
|
||||
self._set_host_icon(host_name, IconType.FAILED)
|
||||
except CancelledError: # recoverable
|
||||
pass
|
||||
except Exception as exc:
|
||||
log.warning("Exception during communication", exc_info=True)
|
||||
if widget:
|
||||
error_msg = str(exc)
|
||||
widget.append_text(error_msg)
|
||||
|
||||
return ws
|
||||
|
||||
def _add_text(self, host_name, text):
|
||||
widget = self._window_per_id[host_name]
|
||||
widget.append_text(text)
|
||||
|
||||
def _close(self, host_name):
|
||||
""" Clean close - remove from menu, delete widget."""
|
||||
services_submenu = self.module._services_submenu
|
||||
action = self._action_per_id.pop(host_name)
|
||||
services_submenu.removeAction(action)
|
||||
widget = self._window_per_id.pop(host_name)
|
||||
if widget.isVisible():
|
||||
widget.hide()
|
||||
widget.deleteLater()
|
||||
|
||||
def _parse_message(self, msg):
|
||||
data = json.loads(msg.data)
|
||||
action = data.get("action")
|
||||
host_name = data["host"]
|
||||
value = data.get("text")
|
||||
|
||||
return host_name, action, value
|
||||
|
|
@ -23,6 +23,7 @@ class WebServerModule(PypeModule, ITrayService):
|
|||
def initialize(self, _module_settings):
|
||||
self.enabled = True
|
||||
self.server_manager = None
|
||||
self._host_listener = None
|
||||
|
||||
self.port = self.find_free_port()
|
||||
|
||||
|
|
@ -37,6 +38,7 @@ class WebServerModule(PypeModule, ITrayService):
|
|||
def tray_init(self):
|
||||
self.create_server_manager()
|
||||
self._add_resources_statics()
|
||||
self._add_listeners()
|
||||
|
||||
def tray_start(self):
|
||||
self.start_server()
|
||||
|
|
@ -54,6 +56,13 @@ class WebServerModule(PypeModule, ITrayService):
|
|||
webserver_url, static_prefix
|
||||
)
|
||||
|
||||
def _add_listeners(self):
|
||||
from openpype.modules.webserver import host_console_listener
|
||||
|
||||
self._host_listener = host_console_listener.HostListener(
|
||||
self.server_manager, self
|
||||
)
|
||||
|
||||
def start_server(self):
|
||||
if self.server_manager:
|
||||
self.server_manager.start_server()
|
||||
|
|
|
|||
|
|
@ -11,7 +11,7 @@ from openpype import resources
|
|||
|
||||
from openpype.lib.delivery import (
|
||||
sizeof_fmt,
|
||||
path_from_represenation,
|
||||
path_from_representation,
|
||||
get_format_dict,
|
||||
check_destination_path,
|
||||
process_single_file,
|
||||
|
|
@ -170,7 +170,7 @@ class DeliveryOptionsDialog(QtWidgets.QDialog):
|
|||
if repre["name"] not in selected_repres:
|
||||
continue
|
||||
|
||||
repre_path = path_from_represenation(repre, self.anatomy)
|
||||
repre_path = path_from_representation(repre, self.anatomy)
|
||||
|
||||
anatomy_data = copy.deepcopy(repre["context"])
|
||||
new_report_items = check_destination_path(str(repre["_id"]),
|
||||
|
|
|
|||
|
|
@ -29,7 +29,7 @@ class CollectOcioReview(pyblish.api.InstancePlugin):
|
|||
otio_review_clips = []
|
||||
otio_timeline = instance.context.data["otioTimeline"]
|
||||
otio_clip = instance.data["otioClip"]
|
||||
|
||||
self.log.debug("__ otioClip: {}".format(otio_clip))
|
||||
# optionally get `reviewTrack`
|
||||
review_track_name = instance.data.get("reviewTrack")
|
||||
|
||||
|
|
@ -37,7 +37,7 @@ class CollectOcioReview(pyblish.api.InstancePlugin):
|
|||
otio_tl_range = otio_clip.range_in_parent()
|
||||
|
||||
# calculate real timeline end needed for the clip
|
||||
clip_end_frame = int(
|
||||
clip_frame_end = int(
|
||||
otio_tl_range.start_time.value + otio_tl_range.duration.value)
|
||||
|
||||
# skip if no review track available
|
||||
|
|
@ -57,13 +57,12 @@ class CollectOcioReview(pyblish.api.InstancePlugin):
|
|||
track_rip = track.range_in_parent()
|
||||
|
||||
# calculate real track end frame
|
||||
track_end_frame = int(
|
||||
track_rip.start_time.value + track_rip.duration.value)
|
||||
track_frame_end = int(track_rip.end_time_exclusive().value)
|
||||
|
||||
# check if the end of track is not lower then clip requirement
|
||||
if clip_end_frame > track_end_frame:
|
||||
if clip_frame_end > track_frame_end:
|
||||
# calculate diference duration
|
||||
gap_duration = clip_end_frame - track_end_frame
|
||||
gap_duration = clip_frame_end - track_frame_end
|
||||
# create rational time range for gap
|
||||
otio_gap_range = otio.opentime.TimeRange(
|
||||
start_time=otio.opentime.RationalTime(
|
||||
|
|
|
|||
|
|
@ -11,6 +11,7 @@ import clique
|
|||
import opentimelineio as otio
|
||||
import pyblish.api
|
||||
import openpype
|
||||
from openpype.lib import editorial
|
||||
|
||||
|
||||
class CollectOcioSubsetResources(pyblish.api.InstancePlugin):
|
||||
|
|
@ -27,59 +28,80 @@ class CollectOcioSubsetResources(pyblish.api.InstancePlugin):
|
|||
return
|
||||
|
||||
if not instance.data.get("representations"):
|
||||
instance.data["representations"] = list()
|
||||
version_data = dict()
|
||||
instance.data["representations"] = []
|
||||
|
||||
if not instance.data.get("versionData"):
|
||||
instance.data["versionData"] = {}
|
||||
|
||||
handle_start = instance.data["handleStart"]
|
||||
handle_end = instance.data["handleEnd"]
|
||||
|
||||
# get basic variables
|
||||
otio_clip = instance.data["otioClip"]
|
||||
frame_start = instance.data["frameStart"]
|
||||
frame_end = instance.data["frameEnd"]
|
||||
|
||||
# generate range in parent
|
||||
otio_src_range = otio_clip.source_range
|
||||
otio_avalable_range = otio_clip.available_range()
|
||||
trimmed_media_range = openpype.lib.trim_media_range(
|
||||
otio_avalable_range, otio_src_range)
|
||||
media_fps = otio_avalable_range.start_time.rate
|
||||
|
||||
# calculate wth handles
|
||||
otio_src_range_handles = openpype.lib.otio_range_with_handles(
|
||||
otio_src_range, instance)
|
||||
trimmed_media_range_h = openpype.lib.trim_media_range(
|
||||
otio_avalable_range, otio_src_range_handles)
|
||||
# get available range trimmed with processed retimes
|
||||
retimed_attributes = editorial.get_media_range_with_retimes(
|
||||
otio_clip, handle_start, handle_end)
|
||||
self.log.debug(
|
||||
">> retimed_attributes: {}".format(retimed_attributes))
|
||||
|
||||
# frame start and end from media
|
||||
s_frame_start, s_frame_end = openpype.lib.otio_range_to_frame_range(
|
||||
trimmed_media_range)
|
||||
a_frame_start, a_frame_end = openpype.lib.otio_range_to_frame_range(
|
||||
otio_avalable_range)
|
||||
a_frame_start_h, a_frame_end_h = openpype.lib.\
|
||||
otio_range_to_frame_range(trimmed_media_range_h)
|
||||
# break down into variables
|
||||
media_in = int(retimed_attributes["mediaIn"])
|
||||
media_out = int(retimed_attributes["mediaOut"])
|
||||
handle_start = int(retimed_attributes["handleStart"])
|
||||
handle_end = int(retimed_attributes["handleEnd"])
|
||||
|
||||
# fix frame_start and frame_end frame to be in range of media
|
||||
if a_frame_start_h < a_frame_start:
|
||||
a_frame_start_h = a_frame_start
|
||||
# set versiondata if any retime
|
||||
version_data = retimed_attributes.get("versionData")
|
||||
|
||||
if a_frame_end_h > a_frame_end:
|
||||
a_frame_end_h = a_frame_end
|
||||
if version_data:
|
||||
instance.data["versionData"].update(version_data)
|
||||
|
||||
# count the difference for frame_start and frame_end
|
||||
diff_start = s_frame_start - a_frame_start_h
|
||||
diff_end = a_frame_end_h - s_frame_end
|
||||
# convert to available frame range with handles
|
||||
a_frame_start_h = media_in - handle_start
|
||||
a_frame_end_h = media_out + handle_end
|
||||
|
||||
# create trimmed ocio time range
|
||||
trimmed_media_range_h = editorial.range_from_frames(
|
||||
a_frame_start_h, (a_frame_end_h - a_frame_start_h + 1),
|
||||
media_fps
|
||||
)
|
||||
self.log.debug("trimmed_media_range_h: {}".format(
|
||||
trimmed_media_range_h))
|
||||
self.log.debug("a_frame_start_h: {}".format(
|
||||
a_frame_start_h))
|
||||
self.log.debug("a_frame_end_h: {}".format(
|
||||
a_frame_end_h))
|
||||
|
||||
# create frame start and end
|
||||
frame_start = instance.data["frameStart"]
|
||||
frame_end = frame_start + (media_out - media_in)
|
||||
|
||||
# add to version data start and end range data
|
||||
# for loader plugins to be correctly displayed and loaded
|
||||
version_data.update({
|
||||
"frameStart": frame_start,
|
||||
"frameEnd": frame_end,
|
||||
"handleStart": diff_start,
|
||||
"handleEnd": diff_end,
|
||||
"fps": otio_avalable_range.start_time.rate
|
||||
instance.data["versionData"].update({
|
||||
"fps": media_fps
|
||||
})
|
||||
|
||||
if not instance.data["versionData"].get("retime"):
|
||||
instance.data["versionData"].update({
|
||||
"frameStart": frame_start,
|
||||
"frameEnd": frame_end,
|
||||
"handleStart": handle_start,
|
||||
"handleEnd": handle_end,
|
||||
})
|
||||
else:
|
||||
instance.data["versionData"].update({
|
||||
"frameStart": frame_start,
|
||||
"frameEnd": frame_end
|
||||
})
|
||||
|
||||
# change frame_start and frame_end values
|
||||
# for representation to be correctly renumbered in integrate_new
|
||||
frame_start -= diff_start
|
||||
frame_end += diff_end
|
||||
frame_start -= handle_start
|
||||
frame_end += handle_end
|
||||
|
||||
media_ref = otio_clip.media_reference
|
||||
metadata = media_ref.metadata
|
||||
|
|
@ -136,12 +158,13 @@ class CollectOcioSubsetResources(pyblish.api.InstancePlugin):
|
|||
frame_start, frame_end, file=filename)
|
||||
|
||||
if repre:
|
||||
instance.data["versionData"] = version_data
|
||||
self.log.debug(">>>>>>>> version data {}".format(version_data))
|
||||
# add representation to instance data
|
||||
instance.data["representations"].append(repre)
|
||||
self.log.debug(">>>>>>>> {}".format(repre))
|
||||
|
||||
import pprint
|
||||
self.log.debug(pprint.pformat(instance.data))
|
||||
|
||||
def _create_representation(self, start, end, **kwargs):
|
||||
"""
|
||||
Creating representation data.
|
||||
|
|
|
|||
|
|
@ -51,7 +51,6 @@ class ExtractOTIOReview(openpype.api.Extractor):
|
|||
|
||||
def process(self, instance):
|
||||
# TODO: convert resulting image sequence to mp4
|
||||
# TODO: add oudio ouput to the mp4 if audio in review is on.
|
||||
|
||||
# get otio clip and other time info from instance clip
|
||||
# TODO: what if handles are different in `versionData`?
|
||||
|
|
|
|||
|
|
@ -17,6 +17,7 @@ from openpype.lib import (
|
|||
get_decompress_dir,
|
||||
decompress
|
||||
)
|
||||
import speedcopy
|
||||
|
||||
|
||||
class ExtractReview(pyblish.api.InstancePlugin):
|
||||
|
|
@ -190,7 +191,16 @@ class ExtractReview(pyblish.api.InstancePlugin):
|
|||
"New representation tags: `{}`".format(new_repre["tags"])
|
||||
)
|
||||
|
||||
temp_data = self.prepare_temp_data(instance, repre, output_def)
|
||||
temp_data = self.prepare_temp_data(
|
||||
instance, repre, output_def)
|
||||
files_to_clean = []
|
||||
if temp_data["input_is_sequence"]:
|
||||
self.log.info("Filling gaps in sequence.")
|
||||
files_to_clean = self.fill_sequence_gaps(
|
||||
temp_data["origin_repre"]["files"],
|
||||
new_repre["stagingDir"],
|
||||
temp_data["frame_start"],
|
||||
temp_data["frame_end"])
|
||||
|
||||
try: # temporary until oiiotool is supported cross platform
|
||||
ffmpeg_args = self._ffmpeg_arguments(
|
||||
|
|
@ -201,7 +211,7 @@ class ExtractReview(pyblish.api.InstancePlugin):
|
|||
self.log.debug("Unsupported compression on input " +
|
||||
"files. Skipping!!!")
|
||||
return
|
||||
raise
|
||||
raise NotImplementedError
|
||||
|
||||
subprcs_cmd = " ".join(ffmpeg_args)
|
||||
|
||||
|
|
@ -212,6 +222,11 @@ class ExtractReview(pyblish.api.InstancePlugin):
|
|||
subprcs_cmd, shell=True, logger=self.log
|
||||
)
|
||||
|
||||
# delete files added to fill gaps
|
||||
if files_to_clean:
|
||||
for f in files_to_clean:
|
||||
os.unlink(f)
|
||||
|
||||
output_name = output_def["filename_suffix"]
|
||||
if temp_data["without_handles"]:
|
||||
output_name += "_noHandles"
|
||||
|
|
@ -604,6 +619,89 @@ class ExtractReview(pyblish.api.InstancePlugin):
|
|||
|
||||
return all_args
|
||||
|
||||
def fill_sequence_gaps(self, files, staging_dir, start_frame, end_frame):
|
||||
# type: (list, str, int, int) -> list
|
||||
"""Fill missing files in sequence by duplicating existing ones.
|
||||
|
||||
This will take nearest frame file and copy it with so as to fill
|
||||
gaps in sequence. Last existing file there is is used to for the
|
||||
hole ahead.
|
||||
|
||||
Args:
|
||||
files (list): List of representation files.
|
||||
staging_dir (str): Path to staging directory.
|
||||
start_frame (int): Sequence start (no matter what files are there)
|
||||
end_frame (int): Sequence end (no matter what files are there)
|
||||
|
||||
Returns:
|
||||
list of added files. Those should be cleaned after work
|
||||
is done.
|
||||
|
||||
Raises:
|
||||
AssertionError: if more then one collection is obtained.
|
||||
|
||||
"""
|
||||
collections = clique.assemble(files)[0]
|
||||
assert len(collections) == 1, "Multiple collections found."
|
||||
col = collections[0]
|
||||
# do nothing if sequence is complete
|
||||
if list(col.indexes)[0] == start_frame and \
|
||||
list(col.indexes)[-1] == end_frame and \
|
||||
col.is_contiguous():
|
||||
return []
|
||||
|
||||
holes = col.holes()
|
||||
|
||||
# generate ideal sequence
|
||||
complete_col = clique.assemble(
|
||||
[("{}{:0" + str(col.padding) + "d}{}").format(
|
||||
col.head, f, col.tail
|
||||
) for f in range(start_frame, end_frame)]
|
||||
)[0][0] # type: clique.Collection
|
||||
|
||||
new_files = {}
|
||||
last_existing_file = None
|
||||
|
||||
for idx in holes.indexes:
|
||||
# get previous existing file
|
||||
test_file = os.path.normpath(os.path.join(
|
||||
staging_dir,
|
||||
("{}{:0" + str(complete_col.padding) + "d}{}").format(
|
||||
complete_col.head, idx - 1, complete_col.tail)))
|
||||
if os.path.isfile(test_file):
|
||||
new_files[idx] = test_file
|
||||
last_existing_file = test_file
|
||||
else:
|
||||
if not last_existing_file:
|
||||
# previous file is not found (sequence has a hole
|
||||
# at the beginning. Use first available frame
|
||||
# there is.
|
||||
try:
|
||||
last_existing_file = list(col)[0]
|
||||
except IndexError:
|
||||
# empty collection?
|
||||
raise AssertionError(
|
||||
"Invalid sequence collected")
|
||||
new_files[idx] = os.path.normpath(
|
||||
os.path.join(staging_dir, last_existing_file))
|
||||
|
||||
files_to_clean = []
|
||||
if new_files:
|
||||
# so now new files are dict with missing frame as a key and
|
||||
# existing file as a value.
|
||||
for frame, file in new_files.items():
|
||||
self.log.info(
|
||||
"Filling gap {} with {}".format(frame, file))
|
||||
|
||||
hole = os.path.join(
|
||||
staging_dir,
|
||||
("{}{:0" + str(col.padding) + "d}{}").format(
|
||||
col.head, frame, col.tail))
|
||||
speedcopy.copyfile(file, hole)
|
||||
files_to_clean.append(hole)
|
||||
|
||||
return files_to_clean
|
||||
|
||||
def input_output_paths(self, new_repre, output_def, temp_data):
|
||||
"""Deduce input nad output file paths based on entered data.
|
||||
|
||||
|
|
@ -622,7 +720,6 @@ class ExtractReview(pyblish.api.InstancePlugin):
|
|||
|
||||
if temp_data["input_is_sequence"]:
|
||||
collections = clique.assemble(repre["files"])[0]
|
||||
|
||||
full_input_path = os.path.join(
|
||||
staging_dir,
|
||||
collections[0].format("{head}{padding}{tail}")
|
||||
|
|
@ -1686,7 +1783,7 @@ class OverscanCrop:
|
|||
def _convert_string_to_values(self, orig_string_value):
|
||||
string_value = orig_string_value.strip().lower()
|
||||
if not string_value:
|
||||
return
|
||||
return [PixValueRelative(0), PixValueRelative(0)]
|
||||
|
||||
# Replace "px" (and spaces before) with single space
|
||||
string_value = re.sub(r"([ ]+)?px", " ", string_value)
|
||||
|
|
|
|||
|
|
@ -417,21 +417,22 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin):
|
|||
|
||||
dst_padding_exp = src_padding_exp
|
||||
dst_start_frame = None
|
||||
collection_start = list(src_collection.indexes)[0]
|
||||
for i in src_collection.indexes:
|
||||
# TODO 1.) do not count padding in each index iteration
|
||||
# 2.) do not count dst_padding from src_padding before
|
||||
# index_frame_start check
|
||||
frame_number = i - collection_start
|
||||
src_padding = src_padding_exp % i
|
||||
|
||||
src_file_name = "{0}{1}{2}".format(
|
||||
src_head, src_padding, src_tail)
|
||||
|
||||
dst_padding = src_padding_exp % i
|
||||
dst_padding = src_padding_exp % frame_number
|
||||
|
||||
if index_frame_start is not None:
|
||||
dst_padding_exp = "%0{}d".format(frame_start_padding)
|
||||
dst_padding = dst_padding_exp % index_frame_start
|
||||
index_frame_start += 1
|
||||
dst_padding = dst_padding_exp % (index_frame_start + frame_number) # noqa: E501
|
||||
|
||||
dst = "{0}{1}{2}".format(
|
||||
dst_head,
|
||||
|
|
|
|||
|
|
@ -27,7 +27,10 @@ class PypeCommands:
|
|||
from openpype.tools import settings
|
||||
|
||||
# TODO change argument options to allow enum of user roles
|
||||
user_role = "developer"
|
||||
if dev:
|
||||
user_role = "developer"
|
||||
else:
|
||||
user_role = "manager"
|
||||
settings.main(user_role)
|
||||
|
||||
@staticmethod
|
||||
|
|
|
|||
|
Before Width: | Height: | Size: 2.1 KiB After Width: | Height: | Size: 2.1 KiB |
|
|
@ -81,11 +81,11 @@ def main(argv):
|
|||
|
||||
host_name = os.environ["AVALON_APP"].lower()
|
||||
if host_name == "photoshop":
|
||||
from avalon.photoshop.lib import launch
|
||||
from avalon.photoshop.lib import main
|
||||
elif host_name == "aftereffects":
|
||||
from avalon.aftereffects.lib import launch
|
||||
from avalon.aftereffects.lib import main
|
||||
elif host_name == "harmony":
|
||||
from avalon.harmony.lib import launch
|
||||
from avalon.harmony.lib import main
|
||||
else:
|
||||
title = "Unknown host name"
|
||||
message = (
|
||||
|
|
@ -97,7 +97,7 @@ def main(argv):
|
|||
|
||||
if launch_args:
|
||||
# Launch host implementation
|
||||
launch(*launch_args)
|
||||
main(*launch_args)
|
||||
else:
|
||||
# Show message box
|
||||
on_invalid_args(after_script_idx is None)
|
||||
|
|
|
|||
|
|
@ -4,8 +4,12 @@
|
|||
"enabled": true,
|
||||
"optional": true,
|
||||
"active": true,
|
||||
"skip_resolution_check": [".*"],
|
||||
"skip_timelines_check": [".*"]
|
||||
"skip_resolution_check": [
|
||||
".*"
|
||||
],
|
||||
"skip_timelines_check": [
|
||||
".*"
|
||||
]
|
||||
},
|
||||
"AfterEffectsSubmitDeadline": {
|
||||
"use_published": true,
|
||||
|
|
@ -14,5 +18,9 @@
|
|||
"secondary_pool": "",
|
||||
"chunk_size": 1000000
|
||||
}
|
||||
},
|
||||
"workfile_builder": {
|
||||
"create_first_version": false,
|
||||
"custom_templates": []
|
||||
}
|
||||
}
|
||||
6
openpype/settings/defaults/project_settings/blender.json
Normal file
6
openpype/settings/defaults/project_settings/blender.json
Normal file
|
|
@ -0,0 +1,6 @@
|
|||
{
|
||||
"workfile_builder": {
|
||||
"create_first_version": false,
|
||||
"custom_templates": []
|
||||
}
|
||||
}
|
||||
|
|
@ -273,8 +273,7 @@
|
|||
"active_site": "studio",
|
||||
"remote_site": "studio"
|
||||
},
|
||||
"sites": {
|
||||
}
|
||||
"sites": {}
|
||||
},
|
||||
"project_plugins": {
|
||||
"windows": [],
|
||||
|
|
|
|||
|
|
@ -293,19 +293,22 @@
|
|||
},
|
||||
"Display Options": {
|
||||
"background": [
|
||||
0.7,
|
||||
0.7,
|
||||
0.7
|
||||
125,
|
||||
125,
|
||||
125,
|
||||
255
|
||||
],
|
||||
"backgroundBottom": [
|
||||
0.7,
|
||||
0.7,
|
||||
0.7
|
||||
125,
|
||||
125,
|
||||
125,
|
||||
255
|
||||
],
|
||||
"backgroundTop": [
|
||||
0.7,
|
||||
0.7,
|
||||
0.7
|
||||
125,
|
||||
125,
|
||||
125,
|
||||
255
|
||||
],
|
||||
"override_display": true
|
||||
},
|
||||
|
|
@ -393,74 +396,88 @@
|
|||
"load": {
|
||||
"colors": {
|
||||
"model": [
|
||||
0.821,
|
||||
0.518,
|
||||
0.117
|
||||
209,
|
||||
132,
|
||||
30,
|
||||
255
|
||||
],
|
||||
"rig": [
|
||||
0.144,
|
||||
0.443,
|
||||
0.463
|
||||
59,
|
||||
226,
|
||||
235,
|
||||
255
|
||||
],
|
||||
"pointcache": [
|
||||
0.368,
|
||||
0.821,
|
||||
0.117
|
||||
94,
|
||||
209,
|
||||
30,
|
||||
255
|
||||
],
|
||||
"animation": [
|
||||
0.368,
|
||||
0.821,
|
||||
0.117
|
||||
94,
|
||||
209,
|
||||
30,
|
||||
255
|
||||
],
|
||||
"ass": [
|
||||
1.0,
|
||||
0.332,
|
||||
0.312
|
||||
249,
|
||||
135,
|
||||
53,
|
||||
255
|
||||
],
|
||||
"camera": [
|
||||
0.447,
|
||||
0.312,
|
||||
1.0
|
||||
136,
|
||||
114,
|
||||
244,
|
||||
255
|
||||
],
|
||||
"fbx": [
|
||||
1.0,
|
||||
0.931,
|
||||
0.312
|
||||
215,
|
||||
166,
|
||||
255,
|
||||
255
|
||||
],
|
||||
"mayaAscii": [
|
||||
0.312,
|
||||
1.0,
|
||||
0.747
|
||||
67,
|
||||
174,
|
||||
255,
|
||||
255
|
||||
],
|
||||
"setdress": [
|
||||
0.312,
|
||||
1.0,
|
||||
0.747
|
||||
255,
|
||||
250,
|
||||
90,
|
||||
255
|
||||
],
|
||||
"layout": [
|
||||
0.312,
|
||||
1.0,
|
||||
0.747
|
||||
255,
|
||||
250,
|
||||
90,
|
||||
255
|
||||
],
|
||||
"vdbcache": [
|
||||
0.312,
|
||||
1.0,
|
||||
0.428
|
||||
249,
|
||||
54,
|
||||
0,
|
||||
255
|
||||
],
|
||||
"vrayproxy": [
|
||||
0.258,
|
||||
0.95,
|
||||
0.541
|
||||
255,
|
||||
150,
|
||||
12,
|
||||
255
|
||||
],
|
||||
"yeticache": [
|
||||
0.2,
|
||||
0.8,
|
||||
0.3
|
||||
99,
|
||||
206,
|
||||
220,
|
||||
255
|
||||
],
|
||||
"yetiRig": [
|
||||
0.0,
|
||||
0.8,
|
||||
0.5
|
||||
0,
|
||||
205,
|
||||
125,
|
||||
255
|
||||
]
|
||||
}
|
||||
},
|
||||
|
|
|
|||
|
|
@ -6,9 +6,7 @@
|
|||
"load": "ctrl+alt+l",
|
||||
"manage": "ctrl+alt+m",
|
||||
"build_workfile": "ctrl+alt+b"
|
||||
},
|
||||
"open_workfile_at_start": false,
|
||||
"create_initial_workfile": true
|
||||
}
|
||||
},
|
||||
"create": {
|
||||
"CreateWriteRender": {
|
||||
|
|
@ -147,12 +145,13 @@
|
|||
"node_name_template": "{class_name}_{ext}"
|
||||
}
|
||||
},
|
||||
"workfile_build": {
|
||||
"workfile_builder": {
|
||||
"create_first_version": false,
|
||||
"custom_templates": [],
|
||||
"builder_on_start": false,
|
||||
"profiles": [
|
||||
{
|
||||
"tasks": [
|
||||
"compositing"
|
||||
],
|
||||
"tasks": [],
|
||||
"current_context": [
|
||||
{
|
||||
"subset_name_filters": [],
|
||||
|
|
@ -162,10 +161,12 @@
|
|||
],
|
||||
"repre_names": [
|
||||
"exr",
|
||||
"dpx"
|
||||
"dpx",
|
||||
"mov"
|
||||
],
|
||||
"loaders": [
|
||||
"LoadSequence"
|
||||
"LoadSequence",
|
||||
"LoadMov"
|
||||
]
|
||||
}
|
||||
],
|
||||
|
|
|
|||
|
|
@ -13,5 +13,9 @@
|
|||
"jpg"
|
||||
]
|
||||
}
|
||||
},
|
||||
"workfile_builder": {
|
||||
"create_first_version": false,
|
||||
"custom_templates": []
|
||||
}
|
||||
}
|
||||
|
|
@ -32,5 +32,9 @@
|
|||
}
|
||||
}
|
||||
},
|
||||
"workfile_builder": {
|
||||
"create_first_version": false,
|
||||
"custom_templates": []
|
||||
},
|
||||
"filters": {}
|
||||
}
|
||||
|
|
@ -164,5 +164,8 @@
|
|||
},
|
||||
"standalonepublish_tool": {
|
||||
"enabled": true
|
||||
},
|
||||
"project_manager": {
|
||||
"enabled": true
|
||||
}
|
||||
}
|
||||
|
|
@ -103,6 +103,7 @@ from .enum_entity import (
|
|||
EnumEntity,
|
||||
AppsEnumEntity,
|
||||
ToolsEnumEntity,
|
||||
TaskTypeEnumEntity,
|
||||
ProvidersEnum
|
||||
)
|
||||
|
||||
|
|
@ -154,6 +155,7 @@ __all__ = (
|
|||
"EnumEntity",
|
||||
"AppsEnumEntity",
|
||||
"ToolsEnumEntity",
|
||||
"TaskTypeEnumEntity",
|
||||
"ProvidersEnum",
|
||||
|
||||
"ListEntity",
|
||||
|
|
|
|||
|
|
@ -219,6 +219,41 @@ class ToolsEnumEntity(BaseEnumEntity):
|
|||
self._current_value = new_value
|
||||
|
||||
|
||||
class TaskTypeEnumEntity(BaseEnumEntity):
|
||||
schema_types = ["task-types-enum"]
|
||||
|
||||
def _item_initalization(self):
|
||||
self.multiselection = True
|
||||
self.value_on_not_set = []
|
||||
self.enum_items = []
|
||||
self.valid_keys = set()
|
||||
self.valid_value_types = (list, )
|
||||
self.placeholder = None
|
||||
|
||||
def _get_enum_values(self):
|
||||
anatomy_entity = self.get_entity_from_path(
|
||||
"project_settings/project_anatomy"
|
||||
)
|
||||
|
||||
valid_keys = set()
|
||||
enum_items = []
|
||||
for task_type in anatomy_entity["tasks"].keys():
|
||||
enum_items.append({task_type: task_type})
|
||||
valid_keys.add(task_type)
|
||||
|
||||
return enum_items, valid_keys
|
||||
|
||||
def set_override_state(self, *args, **kwargs):
|
||||
super(TaskTypeEnumEntity, self).set_override_state(*args, **kwargs)
|
||||
|
||||
self.enum_items, self.valid_keys = self._get_enum_values()
|
||||
new_value = []
|
||||
for key in self._current_value:
|
||||
if key in self.valid_keys:
|
||||
new_value.append(key)
|
||||
self._current_value = new_value
|
||||
|
||||
|
||||
class ProvidersEnum(BaseEnumEntity):
|
||||
schema_types = ["providers-enum"]
|
||||
|
||||
|
|
|
|||
|
|
@ -17,26 +17,60 @@ WRAPPER_TYPES = ["form", "collapsible-wrap"]
|
|||
NOT_SET = type("NOT_SET", (), {"__bool__": lambda obj: False})()
|
||||
OVERRIDE_VERSION = 1
|
||||
|
||||
DEFAULT_VALUES_KEY = "__default_values__"
|
||||
TEMPLATE_METADATA_KEYS = (
|
||||
DEFAULT_VALUES_KEY,
|
||||
)
|
||||
|
||||
template_key_pattern = re.compile(r"(\{.*?[^{0]*\})")
|
||||
|
||||
|
||||
def _pop_metadata_item(template):
|
||||
found_idx = None
|
||||
for idx, item in enumerate(template):
|
||||
if not isinstance(item, dict):
|
||||
continue
|
||||
|
||||
for key in TEMPLATE_METADATA_KEYS:
|
||||
if key in item:
|
||||
found_idx = idx
|
||||
break
|
||||
|
||||
if found_idx is not None:
|
||||
break
|
||||
|
||||
metadata_item = {}
|
||||
if found_idx is not None:
|
||||
metadata_item = template.pop(found_idx)
|
||||
return metadata_item
|
||||
|
||||
|
||||
def _fill_schema_template_data(
|
||||
template, template_data, required_keys=None, missing_keys=None
|
||||
template, template_data, skip_paths, required_keys=None, missing_keys=None
|
||||
):
|
||||
first = False
|
||||
if required_keys is None:
|
||||
first = True
|
||||
|
||||
if "skip_paths" in template_data:
|
||||
skip_paths = template_data["skip_paths"]
|
||||
if not isinstance(skip_paths, list):
|
||||
skip_paths = [skip_paths]
|
||||
|
||||
# Cleanup skip paths (skip empty values)
|
||||
skip_paths = [path for path in skip_paths if path]
|
||||
|
||||
required_keys = set()
|
||||
missing_keys = set()
|
||||
|
||||
_template = []
|
||||
default_values = {}
|
||||
for item in template:
|
||||
if isinstance(item, dict) and "__default_values__" in item:
|
||||
default_values = item["__default_values__"]
|
||||
else:
|
||||
_template.append(item)
|
||||
template = _template
|
||||
# Copy template data as content may change
|
||||
template = copy.deepcopy(template)
|
||||
|
||||
# Get metadata item from template
|
||||
metadata_item = _pop_metadata_item(template)
|
||||
|
||||
# Check for default values for template data
|
||||
default_values = metadata_item.get(DEFAULT_VALUES_KEY) or {}
|
||||
|
||||
for key, value in default_values.items():
|
||||
if key not in template_data:
|
||||
|
|
@ -46,21 +80,55 @@ def _fill_schema_template_data(
|
|||
output = template
|
||||
|
||||
elif isinstance(template, list):
|
||||
# Store paths by first part if path
|
||||
# - None value says that whole key should be skipped
|
||||
skip_paths_by_first_key = {}
|
||||
for path in skip_paths:
|
||||
parts = path.split("/")
|
||||
key = parts.pop(0)
|
||||
if key not in skip_paths_by_first_key:
|
||||
skip_paths_by_first_key[key] = []
|
||||
|
||||
value = "/".join(parts)
|
||||
skip_paths_by_first_key[key].append(value or None)
|
||||
|
||||
output = []
|
||||
for item in template:
|
||||
output.append(_fill_schema_template_data(
|
||||
item, template_data, required_keys, missing_keys
|
||||
))
|
||||
# Get skip paths for children item
|
||||
_skip_paths = []
|
||||
if not isinstance(item, dict):
|
||||
pass
|
||||
|
||||
elif item.get("type") in WRAPPER_TYPES:
|
||||
_skip_paths = copy.deepcopy(skip_paths)
|
||||
|
||||
elif skip_paths_by_first_key:
|
||||
# Check if this item should be skipped
|
||||
key = item.get("key")
|
||||
if key and key in skip_paths_by_first_key:
|
||||
_skip_paths = skip_paths_by_first_key[key]
|
||||
# Skip whole item if None is in skip paths value
|
||||
if None in _skip_paths:
|
||||
continue
|
||||
|
||||
output_item = _fill_schema_template_data(
|
||||
item, template_data, _skip_paths, required_keys, missing_keys
|
||||
)
|
||||
if output_item:
|
||||
output.append(output_item)
|
||||
|
||||
elif isinstance(template, dict):
|
||||
output = {}
|
||||
for key, value in template.items():
|
||||
output[key] = _fill_schema_template_data(
|
||||
value, template_data, required_keys, missing_keys
|
||||
value, template_data, skip_paths, required_keys, missing_keys
|
||||
)
|
||||
if output.get("type") in WRAPPER_TYPES and not output.get("children"):
|
||||
return {}
|
||||
|
||||
elif isinstance(template, STRING_TYPE):
|
||||
# TODO find much better way how to handle filling template data
|
||||
template = template.replace("{{", "__dbcb__").replace("}}", "__decb__")
|
||||
for replacement_string in template_key_pattern.findall(template):
|
||||
key = str(replacement_string[1:-1])
|
||||
required_keys.add(key)
|
||||
|
|
@ -76,7 +144,8 @@ def _fill_schema_template_data(
|
|||
else:
|
||||
# Only replace the key in string
|
||||
template = template.replace(replacement_string, value)
|
||||
output = template
|
||||
|
||||
output = template.replace("__dbcb__", "{").replace("__decb__", "}")
|
||||
|
||||
else:
|
||||
output = template
|
||||
|
|
@ -105,11 +174,15 @@ def _fill_schema_template(child_data, schema_collection, schema_templates):
|
|||
if isinstance(template_data, dict):
|
||||
template_data = [template_data]
|
||||
|
||||
skip_paths = child_data.get("skip_paths") or []
|
||||
if isinstance(skip_paths, STRING_TYPE):
|
||||
skip_paths = [skip_paths]
|
||||
|
||||
output = []
|
||||
for single_template_data in template_data:
|
||||
try:
|
||||
filled_child = _fill_schema_template_data(
|
||||
template, single_template_data
|
||||
template, single_template_data, skip_paths
|
||||
)
|
||||
|
||||
except SchemaTemplateMissingKeys as exc:
|
||||
|
|
@ -166,7 +239,7 @@ def _fill_inner_schemas(schema_data, schema_collection, schema_templates):
|
|||
schema_templates
|
||||
)
|
||||
|
||||
elif child_type == "schema_template":
|
||||
elif child_type in ("template", "schema_template"):
|
||||
for filled_child in _fill_schema_template(
|
||||
child, schema_collection, schema_templates
|
||||
):
|
||||
|
|
|
|||
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Add a link
Reference in a new issue